tree-ssa-alias-warnings.c (maybe_add_match): Cast according to the coding conventions.
[gcc.git] / gcc / tree-ssa-loop-prefetch.c
1 /* Array prefetching.
2 Copyright (C) 2005 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "hard-reg-set.h"
29 #include "basic-block.h"
30 #include "output.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "timevar.h"
35 #include "cfgloop.h"
36 #include "varray.h"
37 #include "expr.h"
38 #include "tree-pass.h"
39 #include "ggc.h"
40 #include "insn-config.h"
41 #include "recog.h"
42 #include "hashtab.h"
43 #include "tree-chrec.h"
44 #include "tree-scalar-evolution.h"
45 #include "toplev.h"
46 #include "params.h"
47 #include "langhooks.h"
48 #include "tree-inline.h"
49 #include "tree-data-ref.h"
50
51 /* This pass inserts prefetch instructions to optimize cache usage during
52 accesses to arrays in loops. It processes loops sequentially and:
53
54 1) Gathers all memory references in the single loop.
55 2) For each of the references it decides when it is profitable to prefetch
56 it. To do it, we evaluate the reuse among the accesses, and determines
57 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
58 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
59 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
60 iterations of the loop that are zero modulo PREFETCH_MOD). For example
61 (assuming cache line size is 64 bytes, char has size 1 byte and there
62 is no hardware sequential prefetch):
63
64 char *a;
65 for (i = 0; i < max; i++)
66 {
67 a[255] = ...; (0)
68 a[i] = ...; (1)
69 a[i + 64] = ...; (2)
70 a[16*i] = ...; (3)
71 a[187*i] = ...; (4)
72 a[187*i + 50] = ...; (5)
73 }
74
75 (0) obviously has PREFETCH_BEFORE 1
76 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
77 location 64 iterations before it, and PREFETCH_MOD 64 (since
78 it hits the same cache line otherwise).
79 (2) has PREFETCH_MOD 64
80 (3) has PREFETCH_MOD 4
81 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
82 the cache line accessed by (4) is the same with probability only
83 7/32.
84 (5) has PREFETCH_MOD 1 as well.
85
86 Additionally, we use data dependence analysis to determine for each
87 reference the distance till the first reuse; this information is used
88 to determine the temporality of the issued prefetch instruction.
89
90 3) We determine how much ahead we need to prefetch. The number of
91 iterations needed is time to fetch / time spent in one iteration of
92 the loop. The problem is that we do not know either of these values,
93 so we just make a heuristic guess based on a magic (possibly)
94 target-specific constant and size of the loop.
95
96 4) Determine which of the references we prefetch. We take into account
97 that there is a maximum number of simultaneous prefetches (provided
98 by machine description). We prefetch as many prefetches as possible
99 while still within this bound (starting with those with lowest
100 prefetch_mod, since they are responsible for most of the cache
101 misses).
102
103 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
104 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
105 prefetching nonaccessed memory.
106 TODO -- actually implement peeling.
107
108 6) We actually emit the prefetch instructions. ??? Perhaps emit the
109 prefetch instructions with guards in cases where 5) was not sufficient
110 to satisfy the constraints?
111
112 Some other TODO:
113 -- write and use more general reuse analysis (that could be also used
114 in other cache aimed loop optimizations)
115 -- make it behave sanely together with the prefetches given by user
116 (now we just ignore them; at the very least we should avoid
117 optimizing loops in that user put his own prefetches)
118 -- we assume cache line size alignment of arrays; this could be
119 improved. */
120
121 /* Magic constants follow. These should be replaced by machine specific
122 numbers. */
123
124 /* True if write can be prefetched by a read prefetch. */
125
126 #ifndef WRITE_CAN_USE_READ_PREFETCH
127 #define WRITE_CAN_USE_READ_PREFETCH 1
128 #endif
129
130 /* True if read can be prefetched by a write prefetch. */
131
132 #ifndef READ_CAN_USE_WRITE_PREFETCH
133 #define READ_CAN_USE_WRITE_PREFETCH 0
134 #endif
135
136 /* The size of the block loaded by a single prefetch. Usually, this is
137 the same as cache line size (at the moment, we only consider one level
138 of cache hierarchy). */
139
140 #ifndef PREFETCH_BLOCK
141 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
142 #endif
143
144 /* Do we have a forward hardware sequential prefetching? */
145
146 #ifndef HAVE_FORWARD_PREFETCH
147 #define HAVE_FORWARD_PREFETCH 0
148 #endif
149
150 /* Do we have a backward hardware sequential prefetching? */
151
152 #ifndef HAVE_BACKWARD_PREFETCH
153 #define HAVE_BACKWARD_PREFETCH 0
154 #endif
155
156 /* In some cases we are only able to determine that there is a certain
157 probability that the two accesses hit the same cache line. In this
158 case, we issue the prefetches for both of them if this probability
159 is less then (1000 - ACCEPTABLE_MISS_RATE) promile. */
160
161 #ifndef ACCEPTABLE_MISS_RATE
162 #define ACCEPTABLE_MISS_RATE 50
163 #endif
164
165 #ifndef HAVE_prefetch
166 #define HAVE_prefetch 0
167 #endif
168
169 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * L1_CACHE_LINE_SIZE))
170 /* TODO: Add parameter to specify L2 cache size. */
171 #define L2_CACHE_SIZE_BYTES (8 * L1_CACHE_SIZE_BYTES)
172
173 /* We consider a memory access nontemporal if it is not reused sooner than
174 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
175 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
176 so that we use nontemporal prefetches e.g. if single memory location
177 is accessed several times in a single iteration of the loop. */
178 #define NONTEMPORAL_FRACTION 16
179
180 /* The group of references between that reuse may occur. */
181
182 struct mem_ref_group
183 {
184 tree base; /* Base of the reference. */
185 HOST_WIDE_INT step; /* Step of the reference. */
186 struct mem_ref *refs; /* References in the group. */
187 struct mem_ref_group *next; /* Next group of references. */
188 };
189
190 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
191
192 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
193
194 /* The memory reference. */
195
196 struct mem_ref
197 {
198 tree stmt; /* Statement in that the reference appears. */
199 tree mem; /* The reference. */
200 HOST_WIDE_INT delta; /* Constant offset of the reference. */
201 bool write_p; /* Is it a write? */
202 struct mem_ref_group *group; /* The group of references it belongs to. */
203 unsigned HOST_WIDE_INT prefetch_mod;
204 /* Prefetch only each PREFETCH_MOD-th
205 iteration. */
206 unsigned HOST_WIDE_INT prefetch_before;
207 /* Prefetch only first PREFETCH_BEFORE
208 iterations. */
209 unsigned reuse_distance; /* The amount of data accessed before the first
210 reuse of this value. */
211 bool issue_prefetch_p; /* Should we really issue the prefetch? */
212 struct mem_ref *next; /* The next reference in the group. */
213 };
214
215 /* Dumps information about reference REF to FILE. */
216
217 static void
218 dump_mem_ref (FILE *file, struct mem_ref *ref)
219 {
220 fprintf (file, "Reference %p:\n", (void *) ref);
221
222 fprintf (file, " group %p (base ", (void *) ref->group);
223 print_generic_expr (file, ref->group->base, TDF_SLIM);
224 fprintf (file, ", step ");
225 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->group->step);
226 fprintf (file, ")\n");
227
228 fprintf (file, " delta ");
229 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
230 fprintf (file, "\n");
231
232 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
233
234 fprintf (file, "\n");
235 }
236
237 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
238 exist. */
239
240 static struct mem_ref_group *
241 find_or_create_group (struct mem_ref_group **groups, tree base,
242 HOST_WIDE_INT step)
243 {
244 struct mem_ref_group *group;
245
246 for (; *groups; groups = &(*groups)->next)
247 {
248 if ((*groups)->step == step
249 && operand_equal_p ((*groups)->base, base, 0))
250 return *groups;
251
252 /* Keep the list of groups sorted by decreasing step. */
253 if ((*groups)->step < step)
254 break;
255 }
256
257 group = XNEW (struct mem_ref_group);
258 group->base = base;
259 group->step = step;
260 group->refs = NULL;
261 group->next = *groups;
262 *groups = group;
263
264 return group;
265 }
266
267 /* Records a memory reference MEM in GROUP with offset DELTA and write status
268 WRITE_P. The reference occurs in statement STMT. */
269
270 static void
271 record_ref (struct mem_ref_group *group, tree stmt, tree mem,
272 HOST_WIDE_INT delta, bool write_p)
273 {
274 struct mem_ref **aref;
275
276 /* Do not record the same address twice. */
277 for (aref = &group->refs; *aref; aref = &(*aref)->next)
278 {
279 /* It does not have to be possible for write reference to reuse the read
280 prefetch, or vice versa. */
281 if (!WRITE_CAN_USE_READ_PREFETCH
282 && write_p
283 && !(*aref)->write_p)
284 continue;
285 if (!READ_CAN_USE_WRITE_PREFETCH
286 && !write_p
287 && (*aref)->write_p)
288 continue;
289
290 if ((*aref)->delta == delta)
291 return;
292 }
293
294 (*aref) = XNEW (struct mem_ref);
295 (*aref)->stmt = stmt;
296 (*aref)->mem = mem;
297 (*aref)->delta = delta;
298 (*aref)->write_p = write_p;
299 (*aref)->prefetch_before = PREFETCH_ALL;
300 (*aref)->prefetch_mod = 1;
301 (*aref)->reuse_distance = 0;
302 (*aref)->issue_prefetch_p = false;
303 (*aref)->group = group;
304 (*aref)->next = NULL;
305
306 if (dump_file && (dump_flags & TDF_DETAILS))
307 dump_mem_ref (dump_file, *aref);
308 }
309
310 /* Release memory references in GROUPS. */
311
312 static void
313 release_mem_refs (struct mem_ref_group *groups)
314 {
315 struct mem_ref_group *next_g;
316 struct mem_ref *ref, *next_r;
317
318 for (; groups; groups = next_g)
319 {
320 next_g = groups->next;
321 for (ref = groups->refs; ref; ref = next_r)
322 {
323 next_r = ref->next;
324 free (ref);
325 }
326 free (groups);
327 }
328 }
329
330 /* A structure used to pass arguments to idx_analyze_ref. */
331
332 struct ar_data
333 {
334 struct loop *loop; /* Loop of the reference. */
335 tree stmt; /* Statement of the reference. */
336 HOST_WIDE_INT *step; /* Step of the memory reference. */
337 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
338 };
339
340 /* Analyzes a single INDEX of a memory reference to obtain information
341 described at analyze_ref. Callback for for_each_index. */
342
343 static bool
344 idx_analyze_ref (tree base, tree *index, void *data)
345 {
346 struct ar_data *ar_data = (struct ar_data *) data;
347 tree ibase, step, stepsize;
348 HOST_WIDE_INT istep, idelta = 0, imult = 1;
349 affine_iv iv;
350
351 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
352 || TREE_CODE (base) == ALIGN_INDIRECT_REF)
353 return false;
354
355 if (!simple_iv (ar_data->loop, ar_data->stmt, *index, &iv, false))
356 return false;
357 ibase = iv.base;
358 step = iv.step;
359
360 if (!cst_and_fits_in_hwi (step))
361 return false;
362 istep = int_cst_value (step);
363
364 if (TREE_CODE (ibase) == PLUS_EXPR
365 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
366 {
367 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
368 ibase = TREE_OPERAND (ibase, 0);
369 }
370 if (cst_and_fits_in_hwi (ibase))
371 {
372 idelta += int_cst_value (ibase);
373 ibase = build_int_cst (TREE_TYPE (ibase), 0);
374 }
375
376 if (TREE_CODE (base) == ARRAY_REF)
377 {
378 stepsize = array_ref_element_size (base);
379 if (!cst_and_fits_in_hwi (stepsize))
380 return false;
381 imult = int_cst_value (stepsize);
382
383 istep *= imult;
384 idelta *= imult;
385 }
386
387 *ar_data->step += istep;
388 *ar_data->delta += idelta;
389 *index = ibase;
390
391 return true;
392 }
393
394 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
395 STEP are integer constants and iter is number of iterations of LOOP. The
396 reference occurs in statement STMT. Strips nonaddressable component
397 references from REF_P. */
398
399 static bool
400 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
401 HOST_WIDE_INT *step, HOST_WIDE_INT *delta,
402 tree stmt)
403 {
404 struct ar_data ar_data;
405 tree off;
406 HOST_WIDE_INT bit_offset;
407 tree ref = *ref_p;
408
409 *step = 0;
410 *delta = 0;
411
412 /* First strip off the component references. Ignore bitfields. */
413 if (TREE_CODE (ref) == COMPONENT_REF
414 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
415 ref = TREE_OPERAND (ref, 0);
416
417 *ref_p = ref;
418
419 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
420 {
421 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
422 bit_offset = TREE_INT_CST_LOW (off);
423 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
424
425 *delta += bit_offset / BITS_PER_UNIT;
426 }
427
428 *base = unshare_expr (ref);
429 ar_data.loop = loop;
430 ar_data.stmt = stmt;
431 ar_data.step = step;
432 ar_data.delta = delta;
433 return for_each_index (base, idx_analyze_ref, &ar_data);
434 }
435
436 /* Record a memory reference REF to the list REFS. The reference occurs in
437 LOOP in statement STMT and it is write if WRITE_P. */
438
439 static void
440 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
441 tree ref, bool write_p, tree stmt)
442 {
443 tree base;
444 HOST_WIDE_INT step, delta;
445 struct mem_ref_group *agrp;
446
447 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
448 return;
449
450 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
451 are integer constants. */
452 agrp = find_or_create_group (refs, base, step);
453 record_ref (agrp, stmt, ref, delta, write_p);
454 }
455
456 /* Record the suitable memory references in LOOP. */
457
458 static struct mem_ref_group *
459 gather_memory_references (struct loop *loop)
460 {
461 basic_block *body = get_loop_body_in_dom_order (loop);
462 basic_block bb;
463 unsigned i;
464 block_stmt_iterator bsi;
465 tree stmt, lhs, rhs;
466 struct mem_ref_group *refs = NULL;
467
468 /* Scan the loop body in order, so that the former references precede the
469 later ones. */
470 for (i = 0; i < loop->num_nodes; i++)
471 {
472 bb = body[i];
473 if (bb->loop_father != loop)
474 continue;
475
476 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
477 {
478 stmt = bsi_stmt (bsi);
479 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
480 continue;
481
482 lhs = GIMPLE_STMT_OPERAND (stmt, 0);
483 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
484
485 if (REFERENCE_CLASS_P (rhs))
486 gather_memory_references_ref (loop, &refs, rhs, false, stmt);
487 if (REFERENCE_CLASS_P (lhs))
488 gather_memory_references_ref (loop, &refs, lhs, true, stmt);
489 }
490 }
491 free (body);
492
493 return refs;
494 }
495
496 /* Prune the prefetch candidate REF using the self-reuse. */
497
498 static void
499 prune_ref_by_self_reuse (struct mem_ref *ref)
500 {
501 HOST_WIDE_INT step = ref->group->step;
502 bool backward = step < 0;
503
504 if (step == 0)
505 {
506 /* Prefetch references to invariant address just once. */
507 ref->prefetch_before = 1;
508 return;
509 }
510
511 if (backward)
512 step = -step;
513
514 if (step > PREFETCH_BLOCK)
515 return;
516
517 if ((backward && HAVE_BACKWARD_PREFETCH)
518 || (!backward && HAVE_FORWARD_PREFETCH))
519 {
520 ref->prefetch_before = 1;
521 return;
522 }
523
524 ref->prefetch_mod = PREFETCH_BLOCK / step;
525 }
526
527 /* Divides X by BY, rounding down. */
528
529 static HOST_WIDE_INT
530 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
531 {
532 gcc_assert (by > 0);
533
534 if (x >= 0)
535 return x / by;
536 else
537 return (x + by - 1) / by;
538 }
539
540 /* Prune the prefetch candidate REF using the reuse with BY.
541 If BY_IS_BEFORE is true, BY is before REF in the loop. */
542
543 static void
544 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
545 bool by_is_before)
546 {
547 HOST_WIDE_INT step = ref->group->step;
548 bool backward = step < 0;
549 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
550 HOST_WIDE_INT delta = delta_b - delta_r;
551 HOST_WIDE_INT hit_from;
552 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
553
554 if (delta == 0)
555 {
556 /* If the references has the same address, only prefetch the
557 former. */
558 if (by_is_before)
559 ref->prefetch_before = 0;
560
561 return;
562 }
563
564 if (!step)
565 {
566 /* If the reference addresses are invariant and fall into the
567 same cache line, prefetch just the first one. */
568 if (!by_is_before)
569 return;
570
571 if (ddown (ref->delta, PREFETCH_BLOCK)
572 != ddown (by->delta, PREFETCH_BLOCK))
573 return;
574
575 ref->prefetch_before = 0;
576 return;
577 }
578
579 /* Only prune the reference that is behind in the array. */
580 if (backward)
581 {
582 if (delta > 0)
583 return;
584
585 /* Transform the data so that we may assume that the accesses
586 are forward. */
587 delta = - delta;
588 step = -step;
589 delta_r = PREFETCH_BLOCK - 1 - delta_r;
590 delta_b = PREFETCH_BLOCK - 1 - delta_b;
591 }
592 else
593 {
594 if (delta < 0)
595 return;
596 }
597
598 /* Check whether the two references are likely to hit the same cache
599 line, and how distant the iterations in that it occurs are from
600 each other. */
601
602 if (step <= PREFETCH_BLOCK)
603 {
604 /* The accesses are sure to meet. Let us check when. */
605 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
606 prefetch_before = (hit_from - delta_r + step - 1) / step;
607
608 if (prefetch_before < ref->prefetch_before)
609 ref->prefetch_before = prefetch_before;
610
611 return;
612 }
613
614 /* A more complicated case. First let us ensure that size of cache line
615 and step are coprime (here we assume that PREFETCH_BLOCK is a power
616 of two. */
617 prefetch_block = PREFETCH_BLOCK;
618 while ((step & 1) == 0
619 && prefetch_block > 1)
620 {
621 step >>= 1;
622 prefetch_block >>= 1;
623 delta >>= 1;
624 }
625
626 /* Now step > prefetch_block, and step and prefetch_block are coprime.
627 Determine the probability that the accesses hit the same cache line. */
628
629 prefetch_before = delta / step;
630 delta %= step;
631 if ((unsigned HOST_WIDE_INT) delta
632 <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
633 {
634 if (prefetch_before < ref->prefetch_before)
635 ref->prefetch_before = prefetch_before;
636
637 return;
638 }
639
640 /* Try also the following iteration. */
641 prefetch_before++;
642 delta = step - delta;
643 if ((unsigned HOST_WIDE_INT) delta
644 <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
645 {
646 if (prefetch_before < ref->prefetch_before)
647 ref->prefetch_before = prefetch_before;
648
649 return;
650 }
651
652 /* The ref probably does not reuse by. */
653 return;
654 }
655
656 /* Prune the prefetch candidate REF using the reuses with other references
657 in REFS. */
658
659 static void
660 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
661 {
662 struct mem_ref *prune_by;
663 bool before = true;
664
665 prune_ref_by_self_reuse (ref);
666
667 for (prune_by = refs; prune_by; prune_by = prune_by->next)
668 {
669 if (prune_by == ref)
670 {
671 before = false;
672 continue;
673 }
674
675 if (!WRITE_CAN_USE_READ_PREFETCH
676 && ref->write_p
677 && !prune_by->write_p)
678 continue;
679 if (!READ_CAN_USE_WRITE_PREFETCH
680 && !ref->write_p
681 && prune_by->write_p)
682 continue;
683
684 prune_ref_by_group_reuse (ref, prune_by, before);
685 }
686 }
687
688 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
689
690 static void
691 prune_group_by_reuse (struct mem_ref_group *group)
692 {
693 struct mem_ref *ref_pruned;
694
695 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
696 {
697 prune_ref_by_reuse (ref_pruned, group->refs);
698
699 if (dump_file && (dump_flags & TDF_DETAILS))
700 {
701 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
702
703 if (ref_pruned->prefetch_before == PREFETCH_ALL
704 && ref_pruned->prefetch_mod == 1)
705 fprintf (dump_file, " no restrictions");
706 else if (ref_pruned->prefetch_before == 0)
707 fprintf (dump_file, " do not prefetch");
708 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
709 fprintf (dump_file, " prefetch once");
710 else
711 {
712 if (ref_pruned->prefetch_before != PREFETCH_ALL)
713 {
714 fprintf (dump_file, " prefetch before ");
715 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
716 ref_pruned->prefetch_before);
717 }
718 if (ref_pruned->prefetch_mod != 1)
719 {
720 fprintf (dump_file, " prefetch mod ");
721 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
722 ref_pruned->prefetch_mod);
723 }
724 }
725 fprintf (dump_file, "\n");
726 }
727 }
728 }
729
730 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
731
732 static void
733 prune_by_reuse (struct mem_ref_group *groups)
734 {
735 for (; groups; groups = groups->next)
736 prune_group_by_reuse (groups);
737 }
738
739 /* Returns true if we should issue prefetch for REF. */
740
741 static bool
742 should_issue_prefetch_p (struct mem_ref *ref)
743 {
744 /* For now do not issue prefetches for only first few of the
745 iterations. */
746 if (ref->prefetch_before != PREFETCH_ALL)
747 return false;
748
749 return true;
750 }
751
752 /* Decide which of the prefetch candidates in GROUPS to prefetch.
753 AHEAD is the number of iterations to prefetch ahead (which corresponds
754 to the number of simultaneous instances of one prefetch running at a
755 time). UNROLL_FACTOR is the factor by that the loop is going to be
756 unrolled. Returns true if there is anything to prefetch. */
757
758 static bool
759 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
760 unsigned ahead)
761 {
762 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
763 unsigned slots_per_prefetch;
764 struct mem_ref *ref;
765 bool any = false;
766
767 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
768 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
769
770 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
771 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
772 it will need a prefetch slot. */
773 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
774 if (dump_file && (dump_flags & TDF_DETAILS))
775 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
776 slots_per_prefetch);
777
778 /* For now we just take memory references one by one and issue
779 prefetches for as many as possible. The groups are sorted
780 starting with the largest step, since the references with
781 large step are more likely to cause many cache misses. */
782
783 for (; groups; groups = groups->next)
784 for (ref = groups->refs; ref; ref = ref->next)
785 {
786 if (!should_issue_prefetch_p (ref))
787 continue;
788
789 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
790 and we unroll the loop UNROLL_FACTOR times, we need to insert
791 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
792 iteration. */
793 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
794 / ref->prefetch_mod);
795 prefetch_slots = n_prefetches * slots_per_prefetch;
796
797 /* If more than half of the prefetches would be lost anyway, do not
798 issue the prefetch. */
799 if (2 * remaining_prefetch_slots < prefetch_slots)
800 continue;
801
802 ref->issue_prefetch_p = true;
803
804 if (remaining_prefetch_slots <= prefetch_slots)
805 return true;
806 remaining_prefetch_slots -= prefetch_slots;
807 any = true;
808 }
809
810 return any;
811 }
812
813 /* Determine whether there is any reference suitable for prefetching
814 in GROUPS. */
815
816 static bool
817 anything_to_prefetch_p (struct mem_ref_group *groups)
818 {
819 struct mem_ref *ref;
820
821 for (; groups; groups = groups->next)
822 for (ref = groups->refs; ref; ref = ref->next)
823 if (should_issue_prefetch_p (ref))
824 return true;
825
826 return false;
827 }
828
829 /* Issue prefetches for the reference REF into loop as decided before.
830 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
831 is the factor by which LOOP was unrolled. */
832
833 static void
834 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
835 {
836 HOST_WIDE_INT delta;
837 tree addr, addr_base, prefetch, write_p, local;
838 block_stmt_iterator bsi;
839 unsigned n_prefetches, ap;
840 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
841
842 if (dump_file && (dump_flags & TDF_DETAILS))
843 fprintf (dump_file, "Issued%s prefetch for %p.\n",
844 nontemporal ? " nontemporal" : "",
845 (void *) ref);
846
847 bsi = bsi_for_stmt (ref->stmt);
848
849 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
850 / ref->prefetch_mod);
851 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
852 addr_base = force_gimple_operand_bsi (&bsi, unshare_expr (addr_base), true, NULL);
853 write_p = ref->write_p ? integer_one_node : integer_zero_node;
854 local = build_int_cst (integer_type_node, nontemporal ? 0 : 3);
855
856 for (ap = 0; ap < n_prefetches; ap++)
857 {
858 /* Determine the address to prefetch. */
859 delta = (ahead + ap * ref->prefetch_mod) * ref->group->step;
860 addr = fold_build2 (PLUS_EXPR, ptr_type_node,
861 addr_base, build_int_cst (ptr_type_node, delta));
862 addr = force_gimple_operand_bsi (&bsi, unshare_expr (addr), true, NULL);
863
864 /* Create the prefetch instruction. */
865 prefetch = build_call_expr (built_in_decls[BUILT_IN_PREFETCH],
866 3, addr, write_p, local);
867 bsi_insert_before (&bsi, prefetch, BSI_SAME_STMT);
868 }
869 }
870
871 /* Issue prefetches for the references in GROUPS into loop as decided before.
872 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
873 factor by that LOOP was unrolled. */
874
875 static void
876 issue_prefetches (struct mem_ref_group *groups,
877 unsigned unroll_factor, unsigned ahead)
878 {
879 struct mem_ref *ref;
880
881 for (; groups; groups = groups->next)
882 for (ref = groups->refs; ref; ref = ref->next)
883 if (ref->issue_prefetch_p)
884 issue_prefetch_ref (ref, unroll_factor, ahead);
885 }
886
887 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
888 this is the case, fill in DESC by the description of number of
889 iterations. */
890
891 static bool
892 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
893 unsigned factor)
894 {
895 if (!can_unroll_loop_p (loop, factor, desc))
896 return false;
897
898 /* We only consider loops without control flow for unrolling. This is not
899 a hard restriction -- tree_unroll_loop works with arbitrary loops
900 as well; but the unrolling/prefetching is usually more profitable for
901 loops consisting of a single basic block, and we want to limit the
902 code growth. */
903 if (loop->num_nodes > 2)
904 return false;
905
906 return true;
907 }
908
909 /* Determine the coefficient by that unroll LOOP, from the information
910 contained in the list of memory references REFS. Description of
911 umber of iterations of LOOP is stored to DESC. NINSNS is the number of
912 insns of the LOOP. EST_NITER is the estimated number of iterations of
913 the loop, or -1 if no estimate is available. */
914
915 static unsigned
916 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
917 unsigned ninsns, struct tree_niter_desc *desc,
918 HOST_WIDE_INT est_niter)
919 {
920 unsigned upper_bound;
921 unsigned nfactor, factor, mod_constraint;
922 struct mem_ref_group *agp;
923 struct mem_ref *ref;
924
925 /* First check whether the loop is not too large to unroll. We ignore
926 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
927 from unrolling them enough to make exactly one cache line covered by each
928 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
929 us from unrolling the loops too many times in cases where we only expect
930 gains from better scheduling and decreasing loop overhead, which is not
931 the case here. */
932 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
933
934 /* If we unrolled the loop more times than it iterates, the unrolled version
935 of the loop would be never entered. */
936 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
937 upper_bound = est_niter;
938
939 if (upper_bound <= 1)
940 return 1;
941
942 /* Choose the factor so that we may prefetch each cache just once,
943 but bound the unrolling by UPPER_BOUND. */
944 factor = 1;
945 for (agp = refs; agp; agp = agp->next)
946 for (ref = agp->refs; ref; ref = ref->next)
947 if (should_issue_prefetch_p (ref))
948 {
949 mod_constraint = ref->prefetch_mod;
950 nfactor = least_common_multiple (mod_constraint, factor);
951 if (nfactor <= upper_bound)
952 factor = nfactor;
953 }
954
955 if (!should_unroll_loop_p (loop, desc, factor))
956 return 1;
957
958 return factor;
959 }
960
961 /* Returns the total volume of the memory references REFS, taking into account
962 reuses in the innermost loop and cache line size. TODO -- we should also
963 take into account reuses across the iterations of the loops in the loop
964 nest. */
965
966 static unsigned
967 volume_of_references (struct mem_ref_group *refs)
968 {
969 unsigned volume = 0;
970 struct mem_ref_group *gr;
971 struct mem_ref *ref;
972
973 for (gr = refs; gr; gr = gr->next)
974 for (ref = gr->refs; ref; ref = ref->next)
975 {
976 /* Almost always reuses another value? */
977 if (ref->prefetch_before != PREFETCH_ALL)
978 continue;
979
980 /* If several iterations access the same cache line, use the size of
981 the line divided by this number. Otherwise, a cache line is
982 accessed in each iteration. TODO -- in the latter case, we should
983 take the size of the reference into account, rounding it up on cache
984 line size multiple. */
985 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
986 }
987 return volume;
988 }
989
990 /* Returns the volume of memory references accessed across VEC iterations of
991 loops, whose sizes are described in the LOOP_SIZES array. N is the number
992 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
993
994 static unsigned
995 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
996 {
997 unsigned i;
998
999 for (i = 0; i < n; i++)
1000 if (vec[i] != 0)
1001 break;
1002
1003 if (i == n)
1004 return 0;
1005
1006 gcc_assert (vec[i] > 0);
1007
1008 /* We ignore the parts of the distance vector in subloops, since usually
1009 the numbers of iterations are much smaller. */
1010 return loop_sizes[i] * vec[i];
1011 }
1012
1013 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1014 at the position corresponding to the loop of the step. N is the depth
1015 of the considered loop nest, and, LOOP is its innermost loop. */
1016
1017 static void
1018 add_subscript_strides (tree access_fn, unsigned stride,
1019 HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1020 {
1021 struct loop *aloop;
1022 tree step;
1023 HOST_WIDE_INT astep;
1024 unsigned min_depth = loop_depth (loop) - n;
1025
1026 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1027 {
1028 aloop = get_chrec_loop (access_fn);
1029 step = CHREC_RIGHT (access_fn);
1030 access_fn = CHREC_LEFT (access_fn);
1031
1032 if ((unsigned) loop_depth (aloop) <= min_depth)
1033 continue;
1034
1035 if (host_integerp (step, 0))
1036 astep = tree_low_cst (step, 0);
1037 else
1038 astep = L1_CACHE_LINE_SIZE;
1039
1040 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1041
1042 }
1043 }
1044
1045 /* Returns the volume of memory references accessed between two consecutive
1046 self-reuses of the reference DR. We consider the subscripts of DR in N
1047 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1048 loops. LOOP is the innermost loop of the current loop nest. */
1049
1050 static unsigned
1051 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1052 struct loop *loop)
1053 {
1054 tree stride, access_fn;
1055 HOST_WIDE_INT *strides, astride;
1056 VEC (tree, heap) *access_fns;
1057 tree ref = DR_REF (dr);
1058 unsigned i, ret = ~0u;
1059
1060 /* In the following example:
1061
1062 for (i = 0; i < N; i++)
1063 for (j = 0; j < N; j++)
1064 use (a[j][i]);
1065 the same cache line is accessed each N steps (except if the change from
1066 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1067 we cannot rely purely on the results of the data dependence analysis.
1068
1069 Instead, we compute the stride of the reference in each loop, and consider
1070 the innermost loop in that the stride is less than cache size. */
1071
1072 strides = XCNEWVEC (HOST_WIDE_INT, n);
1073 access_fns = DR_ACCESS_FNS (dr);
1074
1075 for (i = 0; VEC_iterate (tree, access_fns, i, access_fn); i++)
1076 {
1077 /* Keep track of the reference corresponding to the subscript, so that we
1078 know its stride. */
1079 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1080 ref = TREE_OPERAND (ref, 0);
1081
1082 if (TREE_CODE (ref) == ARRAY_REF)
1083 {
1084 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1085 if (host_integerp (stride, 1))
1086 astride = tree_low_cst (stride, 1);
1087 else
1088 astride = L1_CACHE_LINE_SIZE;
1089
1090 ref = TREE_OPERAND (ref, 0);
1091 }
1092 else
1093 astride = 1;
1094
1095 add_subscript_strides (access_fn, astride, strides, n, loop);
1096 }
1097
1098 for (i = n; i-- > 0; )
1099 {
1100 unsigned HOST_WIDE_INT s;
1101
1102 s = strides[i] < 0 ? -strides[i] : strides[i];
1103
1104 if (s < (unsigned) L1_CACHE_LINE_SIZE
1105 && (loop_sizes[i]
1106 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1107 {
1108 ret = loop_sizes[i];
1109 break;
1110 }
1111 }
1112
1113 free (strides);
1114 return ret;
1115 }
1116
1117 /* Determines the distance till the first reuse of each reference in REFS
1118 in the loop nest of LOOP. */
1119
1120 static void
1121 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs)
1122 {
1123 struct loop *nest, *aloop;
1124 VEC (data_reference_p, heap) *datarefs = NULL;
1125 VEC (ddr_p, heap) *dependences = NULL;
1126 struct mem_ref_group *gr;
1127 struct mem_ref *ref;
1128 VEC (loop_p, heap) *vloops = NULL;
1129 unsigned *loop_data_size;
1130 unsigned i, j, n;
1131 unsigned volume, dist, adist;
1132 HOST_WIDE_INT vol;
1133 data_reference_p dr;
1134 ddr_p dep;
1135
1136 if (loop->inner)
1137 return;
1138
1139 /* Find the outermost loop of the loop nest of loop (we require that
1140 there are no sibling loops inside the nest). */
1141 nest = loop;
1142 while (1)
1143 {
1144 aloop = loop_outer (nest);
1145
1146 if (aloop == current_loops->tree_root
1147 || aloop->inner->next)
1148 break;
1149
1150 nest = aloop;
1151 }
1152
1153 /* For each loop, determine the amount of data accessed in each iteration.
1154 We use this to estimate whether the reference is evicted from the
1155 cache before its reuse. */
1156 find_loop_nest (nest, &vloops);
1157 n = VEC_length (loop_p, vloops);
1158 loop_data_size = XNEWVEC (unsigned, n);
1159 volume = volume_of_references (refs);
1160 i = n;
1161 while (i-- != 0)
1162 {
1163 loop_data_size[i] = volume;
1164 /* Bound the volume by the L2 cache size, since above this bound,
1165 all dependence distances are equivalent. */
1166 if (volume > L2_CACHE_SIZE_BYTES)
1167 continue;
1168
1169 aloop = VEC_index (loop_p, vloops, i);
1170 vol = estimated_loop_iterations_int (aloop, false);
1171 if (vol < 0)
1172 vol = expected_loop_iterations (aloop);
1173 volume *= vol;
1174 }
1175
1176 /* Prepare the references in the form suitable for data dependence
1177 analysis. We ignore unanalysable data references (the results
1178 are used just as a heuristics to estimate temporality of the
1179 references, hence we do not need to worry about correctness). */
1180 for (gr = refs; gr; gr = gr->next)
1181 for (ref = gr->refs; ref; ref = ref->next)
1182 {
1183 dr = create_data_ref (nest, ref->mem, ref->stmt, !ref->write_p);
1184
1185 if (dr)
1186 {
1187 ref->reuse_distance = volume;
1188 dr->aux = ref;
1189 VEC_safe_push (data_reference_p, heap, datarefs, dr);
1190 }
1191 }
1192
1193 for (i = 0; VEC_iterate (data_reference_p, datarefs, i, dr); i++)
1194 {
1195 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1196 ref = dr->aux;
1197 if (ref->reuse_distance > dist)
1198 ref->reuse_distance = dist;
1199 }
1200
1201 compute_all_dependences (datarefs, &dependences, vloops, true);
1202
1203 for (i = 0; VEC_iterate (ddr_p, dependences, i, dep); i++)
1204 {
1205 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1206 continue;
1207
1208 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1209 || DDR_NUM_DIST_VECTS (dep) == 0)
1210 {
1211 /* If the dependence cannot be analysed, assume that there might be
1212 a reuse. */
1213 dist = 0;
1214 }
1215 else
1216 {
1217 /* The distance vectors are normalised to be always lexicographically
1218 positive, hence we cannot tell just from them whether DDR_A comes
1219 before DDR_B or vice versa. However, it is not important,
1220 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1221 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1222 in cache (and marking it as nontemporal would not affect
1223 anything). */
1224
1225 dist = volume;
1226 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1227 {
1228 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1229 loop_data_size, n);
1230
1231 /* Ignore accesses closer than
1232 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1233 so that we use nontemporal prefetches e.g. if single memory
1234 location is accessed several times in a single iteration of
1235 the loop. */
1236 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1237 continue;
1238
1239 if (adist < dist)
1240 dist = adist;
1241 }
1242 }
1243
1244 ref = DDR_A (dep)->aux;
1245 if (ref->reuse_distance > dist)
1246 ref->reuse_distance = dist;
1247 ref = DDR_B (dep)->aux;
1248 if (ref->reuse_distance > dist)
1249 ref->reuse_distance = dist;
1250 }
1251
1252 free_dependence_relations (dependences);
1253 free_data_refs (datarefs);
1254 free (loop_data_size);
1255
1256 if (dump_file && (dump_flags & TDF_DETAILS))
1257 {
1258 fprintf (dump_file, "Reuse distances:\n");
1259 for (gr = refs; gr; gr = gr->next)
1260 for (ref = gr->refs; ref; ref = ref->next)
1261 fprintf (dump_file, " ref %p distance %u\n",
1262 (void *) ref, ref->reuse_distance);
1263 }
1264 }
1265
1266 /* Issue prefetch instructions for array references in LOOP. Returns
1267 true if the LOOP was unrolled. */
1268
1269 static bool
1270 loop_prefetch_arrays (struct loop *loop)
1271 {
1272 struct mem_ref_group *refs;
1273 unsigned ahead, ninsns, time, unroll_factor;
1274 HOST_WIDE_INT est_niter;
1275 struct tree_niter_desc desc;
1276 bool unrolled = false;
1277
1278 if (!maybe_hot_bb_p (loop->header))
1279 {
1280 if (dump_file && (dump_flags & TDF_DETAILS))
1281 fprintf (dump_file, " ignored (cold area)\n");
1282 return false;
1283 }
1284
1285 /* Step 1: gather the memory references. */
1286 refs = gather_memory_references (loop);
1287
1288 /* Step 2: estimate the reuse effects. */
1289 prune_by_reuse (refs);
1290
1291 if (!anything_to_prefetch_p (refs))
1292 goto fail;
1293
1294 determine_loop_nest_reuse (loop, refs);
1295
1296 /* Step 3: determine the ahead and unroll factor. */
1297
1298 /* FIXME: the time should be weighted by the probabilities of the blocks in
1299 the loop body. */
1300 time = tree_num_loop_insns (loop, &eni_time_weights);
1301 ahead = (PREFETCH_LATENCY + time - 1) / time;
1302 est_niter = estimated_loop_iterations_int (loop, false);
1303
1304 /* The prefetches will run for AHEAD iterations of the original loop. Unless
1305 the loop rolls at least AHEAD times, prefetching the references does not
1306 make sense. */
1307 if (est_niter >= 0 && est_niter <= (HOST_WIDE_INT) ahead)
1308 {
1309 if (dump_file && (dump_flags & TDF_DETAILS))
1310 fprintf (dump_file,
1311 "Not prefetching -- loop estimated to roll only %d times\n",
1312 (int) est_niter);
1313 goto fail;
1314 }
1315
1316 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1317 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1318 est_niter);
1319 if (dump_file && (dump_flags & TDF_DETAILS))
1320 fprintf (dump_file, "Ahead %d, unroll factor %d\n", ahead, unroll_factor);
1321
1322 /* Step 4: what to prefetch? */
1323 if (!schedule_prefetches (refs, unroll_factor, ahead))
1324 goto fail;
1325
1326 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1327 iterations so that we do not issue superfluous prefetches. */
1328 if (unroll_factor != 1)
1329 {
1330 tree_unroll_loop (loop, unroll_factor,
1331 single_dom_exit (loop), &desc);
1332 unrolled = true;
1333 }
1334
1335 /* Step 6: issue the prefetches. */
1336 issue_prefetches (refs, unroll_factor, ahead);
1337
1338 fail:
1339 release_mem_refs (refs);
1340 return unrolled;
1341 }
1342
1343 /* Issue prefetch instructions for array references in loops. */
1344
1345 unsigned int
1346 tree_ssa_prefetch_arrays (void)
1347 {
1348 loop_iterator li;
1349 struct loop *loop;
1350 bool unrolled = false;
1351 int todo_flags = 0;
1352
1353 if (!HAVE_prefetch
1354 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1355 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1356 of processor costs and i486 does not have prefetch, but
1357 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1358 || PREFETCH_BLOCK == 0)
1359 return 0;
1360
1361 if (dump_file && (dump_flags & TDF_DETAILS))
1362 {
1363 fprintf (dump_file, "Prefetching parameters:\n");
1364 fprintf (dump_file, " simultaneous prefetches: %d\n",
1365 SIMULTANEOUS_PREFETCHES);
1366 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1367 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1368 fprintf (dump_file, " L1 cache size: %d lines, %d bytes\n",
1369 L1_CACHE_SIZE, L1_CACHE_SIZE_BYTES);
1370 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1371 fprintf (dump_file, " L2 cache size: %d bytes\n", L2_CACHE_SIZE_BYTES);
1372 fprintf (dump_file, "\n");
1373 }
1374
1375 initialize_original_copy_tables ();
1376
1377 if (!built_in_decls[BUILT_IN_PREFETCH])
1378 {
1379 tree type = build_function_type (void_type_node,
1380 tree_cons (NULL_TREE,
1381 const_ptr_type_node,
1382 NULL_TREE));
1383 tree decl = add_builtin_function ("__builtin_prefetch", type,
1384 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1385 NULL, NULL_TREE);
1386 DECL_IS_NOVOPS (decl) = true;
1387 built_in_decls[BUILT_IN_PREFETCH] = decl;
1388 }
1389
1390 /* We assume that size of cache line is a power of two, so verify this
1391 here. */
1392 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1393
1394 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1395 {
1396 if (dump_file && (dump_flags & TDF_DETAILS))
1397 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1398
1399 unrolled |= loop_prefetch_arrays (loop);
1400
1401 if (dump_file && (dump_flags & TDF_DETAILS))
1402 fprintf (dump_file, "\n\n");
1403 }
1404
1405 if (unrolled)
1406 {
1407 scev_reset ();
1408 todo_flags |= TODO_cleanup_cfg;
1409 }
1410
1411 free_original_copy_tables ();
1412 return todo_flags;
1413 }