target.h (globalize_decl_name): New.
[gcc.git] / gcc / tree-ssa-loop-prefetch.c
1 /* Array prefetching.
2 Copyright (C) 2005 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "hard-reg-set.h"
29 #include "basic-block.h"
30 #include "output.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "timevar.h"
35 #include "cfgloop.h"
36 #include "varray.h"
37 #include "expr.h"
38 #include "tree-pass.h"
39 #include "ggc.h"
40 #include "insn-config.h"
41 #include "recog.h"
42 #include "hashtab.h"
43 #include "tree-chrec.h"
44 #include "tree-scalar-evolution.h"
45 #include "toplev.h"
46 #include "params.h"
47 #include "langhooks.h"
48
49 /* This pass inserts prefetch instructions to optimize cache usage during
50 accesses to arrays in loops. It processes loops sequentially and:
51
52 1) Gathers all memory references in the single loop.
53 2) For each of the references it decides when it is profitable to prefetch
54 it. To do it, we evaluate the reuse among the accesses, and determines
55 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
56 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
57 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
58 iterations of the loop that are zero modulo PREFETCH_MOD). For example
59 (assuming cache line size is 64 bytes, char has size 1 byte and there
60 is no hardware sequential prefetch):
61
62 char *a;
63 for (i = 0; i < max; i++)
64 {
65 a[255] = ...; (0)
66 a[i] = ...; (1)
67 a[i + 64] = ...; (2)
68 a[16*i] = ...; (3)
69 a[187*i] = ...; (4)
70 a[187*i + 50] = ...; (5)
71 }
72
73 (0) obviously has PREFETCH_BEFORE 1
74 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
75 location 64 iterations before it, and PREFETCH_MOD 64 (since
76 it hits the same cache line otherwise).
77 (2) has PREFETCH_MOD 64
78 (3) has PREFETCH_MOD 4
79 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
80 the cache line accessed by (4) is the same with probability only
81 7/32.
82 (5) has PREFETCH_MOD 1 as well.
83
84 3) We determine how much ahead we need to prefetch. The number of
85 iterations needed is time to fetch / time spent in one iteration of
86 the loop. The problem is that we do not know either of these values,
87 so we just make a heuristic guess based on a magic (possibly)
88 target-specific constant and size of the loop.
89
90 4) Determine which of the references we prefetch. We take into account
91 that there is a maximum number of simultaneous prefetches (provided
92 by machine description). We prefetch as many prefetches as possible
93 while still within this bound (starting with those with lowest
94 prefetch_mod, since they are responsible for most of the cache
95 misses).
96
97 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
98 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
99 prefetching nonaccessed memory.
100 TODO -- actually implement peeling.
101
102 6) We actually emit the prefetch instructions. ??? Perhaps emit the
103 prefetch instructions with guards in cases where 5) was not sufficient
104 to satisfy the constraints?
105
106 Some other TODO:
107 -- write and use more general reuse analysis (that could be also used
108 in other cache aimed loop optimizations)
109 -- make it behave sanely together with the prefetches given by user
110 (now we just ignore them; at the very least we should avoid
111 optimizing loops in that user put his own prefetches)
112 -- we assume cache line size alignment of arrays; this could be
113 improved. */
114
115 /* Magic constants follow. These should be replaced by machine specific
116 numbers. */
117
118 /* True if write can be prefetched by a read prefetch. */
119
120 #ifndef WRITE_CAN_USE_READ_PREFETCH
121 #define WRITE_CAN_USE_READ_PREFETCH 1
122 #endif
123
124 /* True if read can be prefetched by a write prefetch. */
125
126 #ifndef READ_CAN_USE_WRITE_PREFETCH
127 #define READ_CAN_USE_WRITE_PREFETCH 0
128 #endif
129
130 /* The size of the block loaded by a single prefetch. Usually, this is
131 the same as cache line size (at the moment, we only consider one level
132 of cache hierarchy). */
133
134 #ifndef PREFETCH_BLOCK
135 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
136 #endif
137
138 /* Do we have a forward hardware sequential prefetching? */
139
140 #ifndef HAVE_FORWARD_PREFETCH
141 #define HAVE_FORWARD_PREFETCH 0
142 #endif
143
144 /* Do we have a backward hardware sequential prefetching? */
145
146 #ifndef HAVE_BACKWARD_PREFETCH
147 #define HAVE_BACKWARD_PREFETCH 0
148 #endif
149
150 /* In some cases we are only able to determine that there is a certain
151 probability that the two accesses hit the same cache line. In this
152 case, we issue the prefetches for both of them if this probability
153 is less then (1000 - ACCEPTABLE_MISS_RATE) promile. */
154
155 #ifndef ACCEPTABLE_MISS_RATE
156 #define ACCEPTABLE_MISS_RATE 50
157 #endif
158
159 #ifndef HAVE_prefetch
160 #define HAVE_prefetch 0
161 #endif
162
163 /* The group of references between that reuse may occur. */
164
165 struct mem_ref_group
166 {
167 tree base; /* Base of the reference. */
168 HOST_WIDE_INT step; /* Step of the reference. */
169 struct mem_ref *refs; /* References in the group. */
170 struct mem_ref_group *next; /* Next group of references. */
171 };
172
173 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
174
175 #define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0)
176
177 /* The memory reference. */
178
179 struct mem_ref
180 {
181 tree stmt; /* Statement in that the reference appears. */
182 tree mem; /* The reference. */
183 HOST_WIDE_INT delta; /* Constant offset of the reference. */
184 bool write_p; /* Is it a write? */
185 struct mem_ref_group *group; /* The group of references it belongs to. */
186 unsigned HOST_WIDE_INT prefetch_mod;
187 /* Prefetch only each PREFETCH_MOD-th
188 iteration. */
189 unsigned HOST_WIDE_INT prefetch_before;
190 /* Prefetch only first PREFETCH_BEFORE
191 iterations. */
192 bool issue_prefetch_p; /* Should we really issue the prefetch? */
193 struct mem_ref *next; /* The next reference in the group. */
194 };
195
196 /* Dumps information about reference REF to FILE. */
197
198 static void
199 dump_mem_ref (FILE *file, struct mem_ref *ref)
200 {
201 fprintf (file, "Reference %p:\n", (void *) ref);
202
203 fprintf (file, " group %p (base ", (void *) ref->group);
204 print_generic_expr (file, ref->group->base, TDF_SLIM);
205 fprintf (file, ", step ");
206 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->group->step);
207 fprintf (file, ")\n");
208
209 fprintf (file, " delta ");
210 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ref->delta);
211 fprintf (file, "\n");
212
213 fprintf (file, " %s\n", ref->write_p ? "write" : "read");
214
215 fprintf (file, "\n");
216 }
217
218 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
219 exist. */
220
221 static struct mem_ref_group *
222 find_or_create_group (struct mem_ref_group **groups, tree base,
223 HOST_WIDE_INT step)
224 {
225 struct mem_ref_group *group;
226
227 for (; *groups; groups = &(*groups)->next)
228 {
229 if ((*groups)->step == step
230 && operand_equal_p ((*groups)->base, base, 0))
231 return *groups;
232
233 /* Keep the list of groups sorted by decreasing step. */
234 if ((*groups)->step < step)
235 break;
236 }
237
238 group = xcalloc (1, sizeof (struct mem_ref_group));
239 group->base = base;
240 group->step = step;
241 group->refs = NULL;
242 group->next = *groups;
243 *groups = group;
244
245 return group;
246 }
247
248 /* Records a memory reference MEM in GROUP with offset DELTA and write status
249 WRITE_P. The reference occurs in statement STMT. */
250
251 static void
252 record_ref (struct mem_ref_group *group, tree stmt, tree mem,
253 HOST_WIDE_INT delta, bool write_p)
254 {
255 struct mem_ref **aref;
256
257 /* Do not record the same address twice. */
258 for (aref = &group->refs; *aref; aref = &(*aref)->next)
259 {
260 /* It does not have to be possible for write reference to reuse the read
261 prefetch, or vice versa. */
262 if (!WRITE_CAN_USE_READ_PREFETCH
263 && write_p
264 && !(*aref)->write_p)
265 continue;
266 if (!READ_CAN_USE_WRITE_PREFETCH
267 && !write_p
268 && (*aref)->write_p)
269 continue;
270
271 if ((*aref)->delta == delta)
272 return;
273 }
274
275 (*aref) = xcalloc (1, sizeof (struct mem_ref));
276 (*aref)->stmt = stmt;
277 (*aref)->mem = mem;
278 (*aref)->delta = delta;
279 (*aref)->write_p = write_p;
280 (*aref)->prefetch_before = PREFETCH_ALL;
281 (*aref)->prefetch_mod = 1;
282 (*aref)->issue_prefetch_p = false;
283 (*aref)->group = group;
284 (*aref)->next = NULL;
285
286 if (dump_file && (dump_flags & TDF_DETAILS))
287 dump_mem_ref (dump_file, *aref);
288 }
289
290 /* Release memory references in GROUPS. */
291
292 static void
293 release_mem_refs (struct mem_ref_group *groups)
294 {
295 struct mem_ref_group *next_g;
296 struct mem_ref *ref, *next_r;
297
298 for (; groups; groups = next_g)
299 {
300 next_g = groups->next;
301 for (ref = groups->refs; ref; ref = next_r)
302 {
303 next_r = ref->next;
304 free (ref);
305 }
306 free (groups);
307 }
308 }
309
310 /* A structure used to pass arguments to idx_analyze_ref. */
311
312 struct ar_data
313 {
314 struct loop *loop; /* Loop of the reference. */
315 tree stmt; /* Statement of the reference. */
316 HOST_WIDE_INT *step; /* Step of the memory reference. */
317 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
318 };
319
320 /* Analyzes a single INDEX of a memory reference to obtain information
321 described at analyze_ref. Callback for for_each_index. */
322
323 static bool
324 idx_analyze_ref (tree base, tree *index, void *data)
325 {
326 struct ar_data *ar_data = data;
327 tree ibase, step, stepsize;
328 HOST_WIDE_INT istep, idelta = 0, imult = 1;
329 affine_iv iv;
330
331 if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF
332 || TREE_CODE (base) == ALIGN_INDIRECT_REF)
333 return false;
334
335 if (!simple_iv (ar_data->loop, ar_data->stmt, *index, &iv, false))
336 return false;
337 ibase = iv.base;
338 step = iv.step;
339
340 if (!cst_and_fits_in_hwi (step))
341 return false;
342 istep = int_cst_value (step);
343
344 if (TREE_CODE (ibase) == PLUS_EXPR
345 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
346 {
347 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
348 ibase = TREE_OPERAND (ibase, 0);
349 }
350 if (cst_and_fits_in_hwi (ibase))
351 {
352 idelta += int_cst_value (ibase);
353 ibase = build_int_cst (TREE_TYPE (ibase), 0);
354 }
355
356 if (TREE_CODE (base) == ARRAY_REF)
357 {
358 stepsize = array_ref_element_size (base);
359 if (!cst_and_fits_in_hwi (stepsize))
360 return false;
361 imult = int_cst_value (stepsize);
362
363 istep *= imult;
364 idelta *= imult;
365 }
366
367 *ar_data->step += istep;
368 *ar_data->delta += idelta;
369 *index = ibase;
370
371 return true;
372 }
373
374 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
375 STEP are integer constants and iter is number of iterations of LOOP. The
376 reference occurs in statement STMT. Strips nonaddressable component
377 references from REF_P. */
378
379 static bool
380 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
381 HOST_WIDE_INT *step, HOST_WIDE_INT *delta,
382 tree stmt)
383 {
384 struct ar_data ar_data;
385 tree off;
386 HOST_WIDE_INT bit_offset;
387 tree ref = *ref_p;
388
389 *step = 0;
390 *delta = 0;
391
392 /* First strip off the component references. Ignore bitfields. */
393 if (TREE_CODE (ref) == COMPONENT_REF
394 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1)))
395 ref = TREE_OPERAND (ref, 0);
396
397 *ref_p = ref;
398
399 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
400 {
401 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
402 bit_offset = TREE_INT_CST_LOW (off);
403 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
404
405 *delta += bit_offset / BITS_PER_UNIT;
406 }
407
408 *base = unshare_expr (ref);
409 ar_data.loop = loop;
410 ar_data.stmt = stmt;
411 ar_data.step = step;
412 ar_data.delta = delta;
413 return for_each_index (base, idx_analyze_ref, &ar_data);
414 }
415
416 /* Record a memory reference REF to the list REFS. The reference occurs in
417 LOOP in statement STMT and it is write if WRITE_P. */
418
419 static void
420 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
421 tree ref, bool write_p, tree stmt)
422 {
423 tree base;
424 HOST_WIDE_INT step, delta;
425 struct mem_ref_group *agrp;
426
427 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
428 return;
429
430 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
431 are integer constants. */
432 agrp = find_or_create_group (refs, base, step);
433 record_ref (agrp, stmt, ref, delta, write_p);
434 }
435
436 /* Record the suitable memory references in LOOP. */
437
438 static struct mem_ref_group *
439 gather_memory_references (struct loop *loop)
440 {
441 basic_block *body = get_loop_body_in_dom_order (loop);
442 basic_block bb;
443 unsigned i;
444 block_stmt_iterator bsi;
445 tree stmt, lhs, rhs;
446 struct mem_ref_group *refs = NULL;
447
448 /* Scan the loop body in order, so that the former references precede the
449 later ones. */
450 for (i = 0; i < loop->num_nodes; i++)
451 {
452 bb = body[i];
453 if (bb->loop_father != loop)
454 continue;
455
456 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
457 {
458 stmt = bsi_stmt (bsi);
459 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
460 continue;
461
462 lhs = GIMPLE_STMT_OPERAND (stmt, 0);
463 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
464
465 if (REFERENCE_CLASS_P (rhs))
466 gather_memory_references_ref (loop, &refs, rhs, false, stmt);
467 if (REFERENCE_CLASS_P (lhs))
468 gather_memory_references_ref (loop, &refs, lhs, true, stmt);
469 }
470 }
471 free (body);
472
473 return refs;
474 }
475
476 /* Prune the prefetch candidate REF using the self-reuse. */
477
478 static void
479 prune_ref_by_self_reuse (struct mem_ref *ref)
480 {
481 HOST_WIDE_INT step = ref->group->step;
482 bool backward = step < 0;
483
484 if (step == 0)
485 {
486 /* Prefetch references to invariant address just once. */
487 ref->prefetch_before = 1;
488 return;
489 }
490
491 if (backward)
492 step = -step;
493
494 if (step > PREFETCH_BLOCK)
495 return;
496
497 if ((backward && HAVE_BACKWARD_PREFETCH)
498 || (!backward && HAVE_FORWARD_PREFETCH))
499 {
500 ref->prefetch_before = 1;
501 return;
502 }
503
504 ref->prefetch_mod = PREFETCH_BLOCK / step;
505 }
506
507 /* Divides X by BY, rounding down. */
508
509 static HOST_WIDE_INT
510 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
511 {
512 gcc_assert (by > 0);
513
514 if (x >= 0)
515 return x / by;
516 else
517 return (x + by - 1) / by;
518 }
519
520 /* Prune the prefetch candidate REF using the reuse with BY.
521 If BY_IS_BEFORE is true, BY is before REF in the loop. */
522
523 static void
524 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
525 bool by_is_before)
526 {
527 HOST_WIDE_INT step = ref->group->step;
528 bool backward = step < 0;
529 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
530 HOST_WIDE_INT delta = delta_b - delta_r;
531 HOST_WIDE_INT hit_from;
532 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
533
534 if (delta == 0)
535 {
536 /* If the references has the same address, only prefetch the
537 former. */
538 if (by_is_before)
539 ref->prefetch_before = 0;
540
541 return;
542 }
543
544 if (!step)
545 {
546 /* If the reference addresses are invariant and fall into the
547 same cache line, prefetch just the first one. */
548 if (!by_is_before)
549 return;
550
551 if (ddown (ref->delta, PREFETCH_BLOCK)
552 != ddown (by->delta, PREFETCH_BLOCK))
553 return;
554
555 ref->prefetch_before = 0;
556 return;
557 }
558
559 /* Only prune the reference that is behind in the array. */
560 if (backward)
561 {
562 if (delta > 0)
563 return;
564
565 /* Transform the data so that we may assume that the accesses
566 are forward. */
567 delta = - delta;
568 step = -step;
569 delta_r = PREFETCH_BLOCK - 1 - delta_r;
570 delta_b = PREFETCH_BLOCK - 1 - delta_b;
571 }
572 else
573 {
574 if (delta < 0)
575 return;
576 }
577
578 /* Check whether the two references are likely to hit the same cache
579 line, and how distant the iterations in that it occurs are from
580 each other. */
581
582 if (step <= PREFETCH_BLOCK)
583 {
584 /* The accesses are sure to meet. Let us check when. */
585 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
586 prefetch_before = (hit_from - delta_r + step - 1) / step;
587
588 if (prefetch_before < ref->prefetch_before)
589 ref->prefetch_before = prefetch_before;
590
591 return;
592 }
593
594 /* A more complicated case. First let us ensure that size of cache line
595 and step are coprime (here we assume that PREFETCH_BLOCK is a power
596 of two. */
597 prefetch_block = PREFETCH_BLOCK;
598 while ((step & 1) == 0
599 && prefetch_block > 1)
600 {
601 step >>= 1;
602 prefetch_block >>= 1;
603 delta >>= 1;
604 }
605
606 /* Now step > prefetch_block, and step and prefetch_block are coprime.
607 Determine the probability that the accesses hit the same cache line. */
608
609 prefetch_before = delta / step;
610 delta %= step;
611 if ((unsigned HOST_WIDE_INT) delta
612 <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
613 {
614 if (prefetch_before < ref->prefetch_before)
615 ref->prefetch_before = prefetch_before;
616
617 return;
618 }
619
620 /* Try also the following iteration. */
621 prefetch_before++;
622 delta = step - delta;
623 if ((unsigned HOST_WIDE_INT) delta
624 <= (prefetch_block * ACCEPTABLE_MISS_RATE / 1000))
625 {
626 if (prefetch_before < ref->prefetch_before)
627 ref->prefetch_before = prefetch_before;
628
629 return;
630 }
631
632 /* The ref probably does not reuse by. */
633 return;
634 }
635
636 /* Prune the prefetch candidate REF using the reuses with other references
637 in REFS. */
638
639 static void
640 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
641 {
642 struct mem_ref *prune_by;
643 bool before = true;
644
645 prune_ref_by_self_reuse (ref);
646
647 for (prune_by = refs; prune_by; prune_by = prune_by->next)
648 {
649 if (prune_by == ref)
650 {
651 before = false;
652 continue;
653 }
654
655 if (!WRITE_CAN_USE_READ_PREFETCH
656 && ref->write_p
657 && !prune_by->write_p)
658 continue;
659 if (!READ_CAN_USE_WRITE_PREFETCH
660 && !ref->write_p
661 && prune_by->write_p)
662 continue;
663
664 prune_ref_by_group_reuse (ref, prune_by, before);
665 }
666 }
667
668 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
669
670 static void
671 prune_group_by_reuse (struct mem_ref_group *group)
672 {
673 struct mem_ref *ref_pruned;
674
675 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
676 {
677 prune_ref_by_reuse (ref_pruned, group->refs);
678
679 if (dump_file && (dump_flags & TDF_DETAILS))
680 {
681 fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
682
683 if (ref_pruned->prefetch_before == PREFETCH_ALL
684 && ref_pruned->prefetch_mod == 1)
685 fprintf (dump_file, " no restrictions");
686 else if (ref_pruned->prefetch_before == 0)
687 fprintf (dump_file, " do not prefetch");
688 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
689 fprintf (dump_file, " prefetch once");
690 else
691 {
692 if (ref_pruned->prefetch_before != PREFETCH_ALL)
693 {
694 fprintf (dump_file, " prefetch before ");
695 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
696 ref_pruned->prefetch_before);
697 }
698 if (ref_pruned->prefetch_mod != 1)
699 {
700 fprintf (dump_file, " prefetch mod ");
701 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
702 ref_pruned->prefetch_mod);
703 }
704 }
705 fprintf (dump_file, "\n");
706 }
707 }
708 }
709
710 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
711
712 static void
713 prune_by_reuse (struct mem_ref_group *groups)
714 {
715 for (; groups; groups = groups->next)
716 prune_group_by_reuse (groups);
717 }
718
719 /* Returns true if we should issue prefetch for REF. */
720
721 static bool
722 should_issue_prefetch_p (struct mem_ref *ref)
723 {
724 /* For now do not issue prefetches for only first few of the
725 iterations. */
726 if (ref->prefetch_before != PREFETCH_ALL)
727 return false;
728
729 return true;
730 }
731
732 /* Decide which of the prefetch candidates in GROUPS to prefetch.
733 AHEAD is the number of iterations to prefetch ahead (which corresponds
734 to the number of simultaneous instances of one prefetch running at a
735 time). UNROLL_FACTOR is the factor by that the loop is going to be
736 unrolled. Returns true if there is anything to prefetch. */
737
738 static bool
739 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
740 unsigned ahead)
741 {
742 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
743 unsigned slots_per_prefetch;
744 struct mem_ref *ref;
745 bool any = false;
746
747 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
748 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
749
750 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
751 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
752 it will need a prefetch slot. */
753 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
754 if (dump_file && (dump_flags & TDF_DETAILS))
755 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
756 slots_per_prefetch);
757
758 /* For now we just take memory references one by one and issue
759 prefetches for as many as possible. The groups are sorted
760 starting with the largest step, since the references with
761 large step are more likely to cause many cache misses. */
762
763 for (; groups; groups = groups->next)
764 for (ref = groups->refs; ref; ref = ref->next)
765 {
766 if (!should_issue_prefetch_p (ref))
767 continue;
768
769 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
770 and we unroll the loop UNROLL_FACTOR times, we need to insert
771 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
772 iteration. */
773 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
774 / ref->prefetch_mod);
775 prefetch_slots = n_prefetches * slots_per_prefetch;
776
777 /* If more than half of the prefetches would be lost anyway, do not
778 issue the prefetch. */
779 if (2 * remaining_prefetch_slots < prefetch_slots)
780 continue;
781
782 ref->issue_prefetch_p = true;
783
784 if (remaining_prefetch_slots <= prefetch_slots)
785 return true;
786 remaining_prefetch_slots -= prefetch_slots;
787 any = true;
788 }
789
790 return any;
791 }
792
793 /* Determine whether there is any reference suitable for prefetching
794 in GROUPS. */
795
796 static bool
797 anything_to_prefetch_p (struct mem_ref_group *groups)
798 {
799 struct mem_ref *ref;
800
801 for (; groups; groups = groups->next)
802 for (ref = groups->refs; ref; ref = ref->next)
803 if (should_issue_prefetch_p (ref))
804 return true;
805
806 return false;
807 }
808
809 /* Issue prefetches for the reference REF into loop as decided before.
810 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
811 is the factor by which LOOP was unrolled. */
812
813 static void
814 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
815 {
816 HOST_WIDE_INT delta;
817 tree addr, addr_base, prefetch, params, write_p;
818 block_stmt_iterator bsi;
819 unsigned n_prefetches, ap;
820
821 if (dump_file && (dump_flags & TDF_DETAILS))
822 fprintf (dump_file, "Issued prefetch for %p.\n", (void *) ref);
823
824 bsi = bsi_for_stmt (ref->stmt);
825
826 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
827 / ref->prefetch_mod);
828 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
829 addr_base = force_gimple_operand_bsi (&bsi, unshare_expr (addr_base), true, NULL);
830 write_p = ref->write_p ? integer_one_node : integer_zero_node;
831
832 for (ap = 0; ap < n_prefetches; ap++)
833 {
834 /* Determine the address to prefetch. */
835 delta = (ahead + ap * ref->prefetch_mod) * ref->group->step;
836 addr = fold_build2 (PLUS_EXPR, ptr_type_node,
837 addr_base, build_int_cst (ptr_type_node, delta));
838 addr = force_gimple_operand_bsi (&bsi, unshare_expr (addr), true, NULL);
839
840 /* Create the prefetch instruction. */
841 params = tree_cons (NULL_TREE, addr,
842 tree_cons (NULL_TREE, write_p, NULL_TREE));
843
844 prefetch = build_function_call_expr (built_in_decls[BUILT_IN_PREFETCH],
845 params);
846 bsi_insert_before (&bsi, prefetch, BSI_SAME_STMT);
847 }
848 }
849
850 /* Issue prefetches for the references in GROUPS into loop as decided before.
851 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
852 factor by that LOOP was unrolled. */
853
854 static void
855 issue_prefetches (struct mem_ref_group *groups,
856 unsigned unroll_factor, unsigned ahead)
857 {
858 struct mem_ref *ref;
859
860 for (; groups; groups = groups->next)
861 for (ref = groups->refs; ref; ref = ref->next)
862 if (ref->issue_prefetch_p)
863 issue_prefetch_ref (ref, unroll_factor, ahead);
864 }
865
866 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
867 this is the case, fill in DESC by the description of number of
868 iterations. */
869
870 static bool
871 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
872 unsigned factor)
873 {
874 if (!can_unroll_loop_p (loop, factor, desc))
875 return false;
876
877 /* We only consider loops without control flow for unrolling. This is not
878 a hard restriction -- tree_unroll_loop works with arbitrary loops
879 as well; but the unrolling/prefetching is usually more profitable for
880 loops consisting of a single basic block, and we want to limit the
881 code growth. */
882 if (loop->num_nodes > 2)
883 return false;
884
885 return true;
886 }
887
888 /* Determine the coefficient by that unroll LOOP, from the information
889 contained in the list of memory references REFS. Description of
890 umber of iterations of LOOP is stored to DESC. AHEAD is the number
891 of iterations ahead that we need to prefetch. NINSNS is number of
892 insns of the LOOP. */
893
894 static unsigned
895 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
896 unsigned ninsns, struct tree_niter_desc *desc)
897 {
898 unsigned upper_bound;
899 unsigned nfactor, factor, mod_constraint;
900 struct mem_ref_group *agp;
901 struct mem_ref *ref;
902
903 /* First check whether the loop is not too large to unroll. We ignore
904 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
905 from unrolling them enough to make exactly one cache line covered by each
906 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
907 us from unrolling the loops too many times in cases where we only expect
908 gains from better scheduling and decreasing loop overhead, which is not
909 the case here. */
910 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
911 if (upper_bound <= 1)
912 return 1;
913
914 /* Choose the factor so that we may prefetch each cache just once,
915 but bound the unrolling by UPPER_BOUND. */
916 factor = 1;
917 for (agp = refs; agp; agp = agp->next)
918 for (ref = agp->refs; ref; ref = ref->next)
919 if (should_issue_prefetch_p (ref))
920 {
921 mod_constraint = ref->prefetch_mod;
922 nfactor = least_common_multiple (mod_constraint, factor);
923 if (nfactor <= upper_bound)
924 factor = nfactor;
925 }
926
927 if (!should_unroll_loop_p (loop, desc, factor))
928 return 1;
929
930 return factor;
931 }
932
933 /* Issue prefetch instructions for array references in LOOP. Returns
934 true if the LOOP was unrolled. */
935
936 static bool
937 loop_prefetch_arrays (struct loop *loop)
938 {
939 struct mem_ref_group *refs;
940 unsigned ahead, ninsns, unroll_factor;
941 struct tree_niter_desc desc;
942 bool unrolled = false;
943
944 /* Step 1: gather the memory references. */
945 refs = gather_memory_references (loop);
946
947 /* Step 2: estimate the reuse effects. */
948 prune_by_reuse (refs);
949
950 if (!anything_to_prefetch_p (refs))
951 goto fail;
952
953 /* Step 3: determine the ahead and unroll factor. */
954
955 /* FIXME: We should use not size of the loop, but the average number of
956 instructions executed per iteration of the loop. */
957 ninsns = tree_num_loop_insns (loop);
958 ahead = (PREFETCH_LATENCY + ninsns - 1) / ninsns;
959 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc);
960 if (dump_file && (dump_flags & TDF_DETAILS))
961 fprintf (dump_file, "Ahead %d, unroll factor %d\n", ahead, unroll_factor);
962
963 /* If the loop rolls less than the required unroll factor, prefetching
964 is useless. */
965 if (unroll_factor > 1
966 && cst_and_fits_in_hwi (desc.niter)
967 && (unsigned HOST_WIDE_INT) int_cst_value (desc.niter) < unroll_factor)
968 goto fail;
969
970 /* Step 4: what to prefetch? */
971 if (!schedule_prefetches (refs, unroll_factor, ahead))
972 goto fail;
973
974 /* Step 5: unroll the loop. TODO -- peeling of first and last few
975 iterations so that we do not issue superfluous prefetches. */
976 if (unroll_factor != 1)
977 {
978 tree_unroll_loop (loop, unroll_factor,
979 single_dom_exit (loop), &desc);
980 unrolled = true;
981 }
982
983 /* Step 6: issue the prefetches. */
984 issue_prefetches (refs, unroll_factor, ahead);
985
986 fail:
987 release_mem_refs (refs);
988 return unrolled;
989 }
990
991 /* Issue prefetch instructions for array references in loops. */
992
993 unsigned int
994 tree_ssa_prefetch_arrays (void)
995 {
996 loop_iterator li;
997 struct loop *loop;
998 bool unrolled = false;
999 int todo_flags = 0;
1000
1001 if (!HAVE_prefetch
1002 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1003 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1004 of processor costs and i486 does not have prefetch, but
1005 -march=pentium4 causes HAVE_prefetch to be true. Ugh. */
1006 || PREFETCH_BLOCK == 0)
1007 return 0;
1008
1009 if (dump_file && (dump_flags & TDF_DETAILS))
1010 {
1011 fprintf (dump_file, "Prefetching parameters:\n");
1012 fprintf (dump_file, " simultaneous prefetches: %d\n",
1013 SIMULTANEOUS_PREFETCHES);
1014 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
1015 fprintf (dump_file, " L1 cache size: %d (%d bytes)\n",
1016 L1_CACHE_SIZE, L1_CACHE_SIZE * L1_CACHE_LINE_SIZE);
1017 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1018 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
1019 fprintf (dump_file, "\n");
1020 }
1021
1022 initialize_original_copy_tables ();
1023
1024 if (!built_in_decls[BUILT_IN_PREFETCH])
1025 {
1026 tree type = build_function_type (void_type_node,
1027 tree_cons (NULL_TREE,
1028 const_ptr_type_node,
1029 NULL_TREE));
1030 tree decl = add_builtin_function ("__builtin_prefetch", type,
1031 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1032 NULL, NULL_TREE);
1033 DECL_IS_NOVOPS (decl) = true;
1034 built_in_decls[BUILT_IN_PREFETCH] = decl;
1035 }
1036
1037 /* We assume that size of cache line is a power of two, so verify this
1038 here. */
1039 gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1040
1041 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1042 {
1043 if (dump_file && (dump_flags & TDF_DETAILS))
1044 fprintf (dump_file, "Processing loop %d:\n", loop->num);
1045
1046 unrolled |= loop_prefetch_arrays (loop);
1047
1048 if (dump_file && (dump_flags & TDF_DETAILS))
1049 fprintf (dump_file, "\n\n");
1050 }
1051
1052 if (unrolled)
1053 {
1054 scev_reset ();
1055 todo_flags |= TODO_cleanup_cfg;
1056 }
1057
1058 free_original_copy_tables ();
1059 return todo_flags;
1060 }