runtime: mark go-context.S as no-executable-stack and split-stack supported
[gcc.git] / gcc / tree-ssa-loop-prefetch.c
1 /* Array prefetching.
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "tree-pass.h"
30 #include "gimple-ssa.h"
31 #include "optabs-query.h"
32 #include "tree-pretty-print.h"
33 #include "fold-const.h"
34 #include "stor-layout.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "gimplify-me.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-manip.h"
40 #include "tree-ssa-loop-niter.h"
41 #include "tree-ssa-loop.h"
42 #include "ssa.h"
43 #include "tree-into-ssa.h"
44 #include "cfgloop.h"
45 #include "tree-scalar-evolution.h"
46 #include "params.h"
47 #include "langhooks.h"
48 #include "tree-inline.h"
49 #include "tree-data-ref.h"
50 #include "diagnostic-core.h"
51 #include "dbgcnt.h"
52
53 /* This pass inserts prefetch instructions to optimize cache usage during
54 accesses to arrays in loops. It processes loops sequentially and:
55
56 1) Gathers all memory references in the single loop.
57 2) For each of the references it decides when it is profitable to prefetch
58 it. To do it, we evaluate the reuse among the accesses, and determines
59 two values: PREFETCH_BEFORE (meaning that it only makes sense to do
60 prefetching in the first PREFETCH_BEFORE iterations of the loop) and
61 PREFETCH_MOD (meaning that it only makes sense to prefetch in the
62 iterations of the loop that are zero modulo PREFETCH_MOD). For example
63 (assuming cache line size is 64 bytes, char has size 1 byte and there
64 is no hardware sequential prefetch):
65
66 char *a;
67 for (i = 0; i < max; i++)
68 {
69 a[255] = ...; (0)
70 a[i] = ...; (1)
71 a[i + 64] = ...; (2)
72 a[16*i] = ...; (3)
73 a[187*i] = ...; (4)
74 a[187*i + 50] = ...; (5)
75 }
76
77 (0) obviously has PREFETCH_BEFORE 1
78 (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
79 location 64 iterations before it, and PREFETCH_MOD 64 (since
80 it hits the same cache line otherwise).
81 (2) has PREFETCH_MOD 64
82 (3) has PREFETCH_MOD 4
83 (4) has PREFETCH_MOD 1. We do not set PREFETCH_BEFORE here, since
84 the cache line accessed by (5) is the same with probability only
85 7/32.
86 (5) has PREFETCH_MOD 1 as well.
87
88 Additionally, we use data dependence analysis to determine for each
89 reference the distance till the first reuse; this information is used
90 to determine the temporality of the issued prefetch instruction.
91
92 3) We determine how much ahead we need to prefetch. The number of
93 iterations needed is time to fetch / time spent in one iteration of
94 the loop. The problem is that we do not know either of these values,
95 so we just make a heuristic guess based on a magic (possibly)
96 target-specific constant and size of the loop.
97
98 4) Determine which of the references we prefetch. We take into account
99 that there is a maximum number of simultaneous prefetches (provided
100 by machine description). We prefetch as many prefetches as possible
101 while still within this bound (starting with those with lowest
102 prefetch_mod, since they are responsible for most of the cache
103 misses).
104
105 5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
106 and PREFETCH_BEFORE requirements (within some bounds), and to avoid
107 prefetching nonaccessed memory.
108 TODO -- actually implement peeling.
109
110 6) We actually emit the prefetch instructions. ??? Perhaps emit the
111 prefetch instructions with guards in cases where 5) was not sufficient
112 to satisfy the constraints?
113
114 A cost model is implemented to determine whether or not prefetching is
115 profitable for a given loop. The cost model has three heuristics:
116
117 1. Function trip_count_to_ahead_ratio_too_small_p implements a
118 heuristic that determines whether or not the loop has too few
119 iterations (compared to ahead). Prefetching is not likely to be
120 beneficial if the trip count to ahead ratio is below a certain
121 minimum.
122
123 2. Function mem_ref_count_reasonable_p implements a heuristic that
124 determines whether the given loop has enough CPU ops that can be
125 overlapped with cache missing memory ops. If not, the loop
126 won't benefit from prefetching. In the implementation,
127 prefetching is not considered beneficial if the ratio between
128 the instruction count and the mem ref count is below a certain
129 minimum.
130
131 3. Function insn_to_prefetch_ratio_too_small_p implements a
132 heuristic that disables prefetching in a loop if the prefetching
133 cost is above a certain limit. The relative prefetching cost is
134 estimated by taking the ratio between the prefetch count and the
135 total intruction count (this models the I-cache cost).
136
137 The limits used in these heuristics are defined as parameters with
138 reasonable default values. Machine-specific default values will be
139 added later.
140
141 Some other TODO:
142 -- write and use more general reuse analysis (that could be also used
143 in other cache aimed loop optimizations)
144 -- make it behave sanely together with the prefetches given by user
145 (now we just ignore them; at the very least we should avoid
146 optimizing loops in that user put his own prefetches)
147 -- we assume cache line size alignment of arrays; this could be
148 improved. */
149
150 /* Magic constants follow. These should be replaced by machine specific
151 numbers. */
152
153 /* True if write can be prefetched by a read prefetch. */
154
155 #ifndef WRITE_CAN_USE_READ_PREFETCH
156 #define WRITE_CAN_USE_READ_PREFETCH 1
157 #endif
158
159 /* True if read can be prefetched by a write prefetch. */
160
161 #ifndef READ_CAN_USE_WRITE_PREFETCH
162 #define READ_CAN_USE_WRITE_PREFETCH 0
163 #endif
164
165 /* The size of the block loaded by a single prefetch. Usually, this is
166 the same as cache line size (at the moment, we only consider one level
167 of cache hierarchy). */
168
169 #ifndef PREFETCH_BLOCK
170 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
171 #endif
172
173 /* Do we have a forward hardware sequential prefetching? */
174
175 #ifndef HAVE_FORWARD_PREFETCH
176 #define HAVE_FORWARD_PREFETCH 0
177 #endif
178
179 /* Do we have a backward hardware sequential prefetching? */
180
181 #ifndef HAVE_BACKWARD_PREFETCH
182 #define HAVE_BACKWARD_PREFETCH 0
183 #endif
184
185 /* In some cases we are only able to determine that there is a certain
186 probability that the two accesses hit the same cache line. In this
187 case, we issue the prefetches for both of them if this probability
188 is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand. */
189
190 #ifndef ACCEPTABLE_MISS_RATE
191 #define ACCEPTABLE_MISS_RATE 50
192 #endif
193
194 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
195 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
196
197 /* We consider a memory access nontemporal if it is not reused sooner than
198 after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore
199 accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
200 so that we use nontemporal prefetches e.g. if single memory location
201 is accessed several times in a single iteration of the loop. */
202 #define NONTEMPORAL_FRACTION 16
203
204 /* In case we have to emit a memory fence instruction after the loop that
205 uses nontemporal stores, this defines the builtin to use. */
206
207 #ifndef FENCE_FOLLOWING_MOVNT
208 #define FENCE_FOLLOWING_MOVNT NULL_TREE
209 #endif
210
211 /* It is not profitable to prefetch when the trip count is not at
212 least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
213 For example, in a loop with a prefetch ahead distance of 10,
214 supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
215 profitable to prefetch when the trip count is greater or equal to
216 40. In that case, 30 out of the 40 iterations will benefit from
217 prefetching. */
218
219 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
220 #define TRIP_COUNT_TO_AHEAD_RATIO 4
221 #endif
222
223 /* The group of references between that reuse may occur. */
224
225 struct mem_ref_group
226 {
227 tree base; /* Base of the reference. */
228 tree step; /* Step of the reference. */
229 struct mem_ref *refs; /* References in the group. */
230 struct mem_ref_group *next; /* Next group of references. */
231 unsigned int uid; /* Group UID, used only for debugging. */
232 };
233
234 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */
235
236 #define PREFETCH_ALL HOST_WIDE_INT_M1U
237
238 /* Do not generate a prefetch if the unroll factor is significantly less
239 than what is required by the prefetch. This is to avoid redundant
240 prefetches. For example, when prefetch_mod is 16 and unroll_factor is
241 2, prefetching requires unrolling the loop 16 times, but
242 the loop is actually unrolled twice. In this case (ratio = 8),
243 prefetching is not likely to be beneficial. */
244
245 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
246 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
247 #endif
248
249 /* Some of the prefetch computations have quadratic complexity. We want to
250 avoid huge compile times and, therefore, want to limit the amount of
251 memory references per loop where we consider prefetching. */
252
253 #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
254 #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
255 #endif
256
257 /* The memory reference. */
258
259 struct mem_ref
260 {
261 gimple *stmt; /* Statement in that the reference appears. */
262 tree mem; /* The reference. */
263 HOST_WIDE_INT delta; /* Constant offset of the reference. */
264 struct mem_ref_group *group; /* The group of references it belongs to. */
265 unsigned HOST_WIDE_INT prefetch_mod;
266 /* Prefetch only each PREFETCH_MOD-th
267 iteration. */
268 unsigned HOST_WIDE_INT prefetch_before;
269 /* Prefetch only first PREFETCH_BEFORE
270 iterations. */
271 unsigned reuse_distance; /* The amount of data accessed before the first
272 reuse of this value. */
273 struct mem_ref *next; /* The next reference in the group. */
274 unsigned int uid; /* Ref UID, used only for debugging. */
275 unsigned write_p : 1; /* Is it a write? */
276 unsigned independent_p : 1; /* True if the reference is independent on
277 all other references inside the loop. */
278 unsigned issue_prefetch_p : 1; /* Should we really issue the prefetch? */
279 unsigned storent_p : 1; /* True if we changed the store to a
280 nontemporal one. */
281 };
282
283 /* Dumps information about memory reference */
284 static void
285 dump_mem_details (FILE *file, tree base, tree step,
286 HOST_WIDE_INT delta, bool write_p)
287 {
288 fprintf (file, "(base ");
289 print_generic_expr (file, base, TDF_SLIM);
290 fprintf (file, ", step ");
291 if (cst_and_fits_in_hwi (step))
292 fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step));
293 else
294 print_generic_expr (file, step, TDF_SLIM);
295 fprintf (file, ")\n");
296 fprintf (file, " delta " HOST_WIDE_INT_PRINT_DEC "\n", delta);
297 fprintf (file, " %s\n\n", write_p ? "write" : "read");
298 }
299
300 /* Dumps information about reference REF to FILE. */
301
302 static void
303 dump_mem_ref (FILE *file, struct mem_ref *ref)
304 {
305 fprintf (file, "reference %u:%u (", ref->group->uid, ref->uid);
306 print_generic_expr (file, ref->mem, TDF_SLIM);
307 fprintf (file, ")\n");
308 }
309
310 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
311 exist. */
312
313 static struct mem_ref_group *
314 find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
315 {
316 /* Global count for setting struct mem_ref_group->uid. */
317 static unsigned int last_mem_ref_group_uid = 0;
318
319 struct mem_ref_group *group;
320
321 for (; *groups; groups = &(*groups)->next)
322 {
323 if (operand_equal_p ((*groups)->step, step, 0)
324 && operand_equal_p ((*groups)->base, base, 0))
325 return *groups;
326
327 /* If step is an integer constant, keep the list of groups sorted
328 by decreasing step. */
329 if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
330 && int_cst_value ((*groups)->step) < int_cst_value (step))
331 break;
332 }
333
334 group = XNEW (struct mem_ref_group);
335 group->base = base;
336 group->step = step;
337 group->refs = NULL;
338 group->uid = ++last_mem_ref_group_uid;
339 group->next = *groups;
340 *groups = group;
341
342 return group;
343 }
344
345 /* Records a memory reference MEM in GROUP with offset DELTA and write status
346 WRITE_P. The reference occurs in statement STMT. */
347
348 static void
349 record_ref (struct mem_ref_group *group, gimple *stmt, tree mem,
350 HOST_WIDE_INT delta, bool write_p)
351 {
352 unsigned int last_mem_ref_uid = 0;
353 struct mem_ref **aref;
354
355 /* Do not record the same address twice. */
356 for (aref = &group->refs; *aref; aref = &(*aref)->next)
357 {
358 last_mem_ref_uid = (*aref)->uid;
359
360 /* It does not have to be possible for write reference to reuse the read
361 prefetch, or vice versa. */
362 if (!WRITE_CAN_USE_READ_PREFETCH
363 && write_p
364 && !(*aref)->write_p)
365 continue;
366 if (!READ_CAN_USE_WRITE_PREFETCH
367 && !write_p
368 && (*aref)->write_p)
369 continue;
370
371 if ((*aref)->delta == delta)
372 return;
373 }
374
375 (*aref) = XNEW (struct mem_ref);
376 (*aref)->stmt = stmt;
377 (*aref)->mem = mem;
378 (*aref)->delta = delta;
379 (*aref)->write_p = write_p;
380 (*aref)->prefetch_before = PREFETCH_ALL;
381 (*aref)->prefetch_mod = 1;
382 (*aref)->reuse_distance = 0;
383 (*aref)->issue_prefetch_p = false;
384 (*aref)->group = group;
385 (*aref)->next = NULL;
386 (*aref)->independent_p = false;
387 (*aref)->storent_p = false;
388 (*aref)->uid = last_mem_ref_uid + 1;
389
390 if (dump_file && (dump_flags & TDF_DETAILS))
391 {
392 dump_mem_ref (dump_file, *aref);
393
394 fprintf (dump_file, " group %u ", group->uid);
395 dump_mem_details (dump_file, group->base, group->step, delta,
396 write_p);
397 }
398 }
399
400 /* Release memory references in GROUPS. */
401
402 static void
403 release_mem_refs (struct mem_ref_group *groups)
404 {
405 struct mem_ref_group *next_g;
406 struct mem_ref *ref, *next_r;
407
408 for (; groups; groups = next_g)
409 {
410 next_g = groups->next;
411 for (ref = groups->refs; ref; ref = next_r)
412 {
413 next_r = ref->next;
414 free (ref);
415 }
416 free (groups);
417 }
418 }
419
420 /* A structure used to pass arguments to idx_analyze_ref. */
421
422 struct ar_data
423 {
424 class loop *loop; /* Loop of the reference. */
425 gimple *stmt; /* Statement of the reference. */
426 tree *step; /* Step of the memory reference. */
427 HOST_WIDE_INT *delta; /* Offset of the memory reference. */
428 };
429
430 /* Analyzes a single INDEX of a memory reference to obtain information
431 described at analyze_ref. Callback for for_each_index. */
432
433 static bool
434 idx_analyze_ref (tree base, tree *index, void *data)
435 {
436 struct ar_data *ar_data = (struct ar_data *) data;
437 tree ibase, step, stepsize;
438 HOST_WIDE_INT idelta = 0, imult = 1;
439 affine_iv iv;
440
441 if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
442 *index, &iv, true))
443 return false;
444 ibase = iv.base;
445 step = iv.step;
446
447 if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
448 && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
449 {
450 idelta = int_cst_value (TREE_OPERAND (ibase, 1));
451 ibase = TREE_OPERAND (ibase, 0);
452 }
453 if (cst_and_fits_in_hwi (ibase))
454 {
455 idelta += int_cst_value (ibase);
456 ibase = build_int_cst (TREE_TYPE (ibase), 0);
457 }
458
459 if (TREE_CODE (base) == ARRAY_REF)
460 {
461 stepsize = array_ref_element_size (base);
462 if (!cst_and_fits_in_hwi (stepsize))
463 return false;
464 imult = int_cst_value (stepsize);
465 step = fold_build2 (MULT_EXPR, sizetype,
466 fold_convert (sizetype, step),
467 fold_convert (sizetype, stepsize));
468 idelta *= imult;
469 }
470
471 if (*ar_data->step == NULL_TREE)
472 *ar_data->step = step;
473 else
474 *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
475 fold_convert (sizetype, *ar_data->step),
476 fold_convert (sizetype, step));
477 *ar_data->delta += idelta;
478 *index = ibase;
479
480 return true;
481 }
482
483 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
484 STEP are integer constants and iter is number of iterations of LOOP. The
485 reference occurs in statement STMT. Strips nonaddressable component
486 references from REF_P. */
487
488 static bool
489 analyze_ref (class loop *loop, tree *ref_p, tree *base,
490 tree *step, HOST_WIDE_INT *delta,
491 gimple *stmt)
492 {
493 struct ar_data ar_data;
494 tree off;
495 HOST_WIDE_INT bit_offset;
496 tree ref = *ref_p;
497
498 *step = NULL_TREE;
499 *delta = 0;
500
501 /* First strip off the component references. Ignore bitfields.
502 Also strip off the real and imagine parts of a complex, so that
503 they can have the same base. */
504 if (TREE_CODE (ref) == REALPART_EXPR
505 || TREE_CODE (ref) == IMAGPART_EXPR
506 || (TREE_CODE (ref) == COMPONENT_REF
507 && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
508 {
509 if (TREE_CODE (ref) == IMAGPART_EXPR)
510 *delta += int_size_in_bytes (TREE_TYPE (ref));
511 ref = TREE_OPERAND (ref, 0);
512 }
513
514 *ref_p = ref;
515
516 for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
517 {
518 off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
519 bit_offset = TREE_INT_CST_LOW (off);
520 gcc_assert (bit_offset % BITS_PER_UNIT == 0);
521
522 *delta += bit_offset / BITS_PER_UNIT;
523 }
524
525 *base = unshare_expr (ref);
526 ar_data.loop = loop;
527 ar_data.stmt = stmt;
528 ar_data.step = step;
529 ar_data.delta = delta;
530 return for_each_index (base, idx_analyze_ref, &ar_data);
531 }
532
533 /* Record a memory reference REF to the list REFS. The reference occurs in
534 LOOP in statement STMT and it is write if WRITE_P. Returns true if the
535 reference was recorded, false otherwise. */
536
537 static bool
538 gather_memory_references_ref (class loop *loop, struct mem_ref_group **refs,
539 tree ref, bool write_p, gimple *stmt)
540 {
541 tree base, step;
542 HOST_WIDE_INT delta;
543 struct mem_ref_group *agrp;
544
545 if (get_base_address (ref) == NULL)
546 return false;
547
548 if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
549 return false;
550 /* If analyze_ref fails the default is a NULL_TREE. We can stop here. */
551 if (step == NULL_TREE)
552 return false;
553
554 /* Stop if the address of BASE could not be taken. */
555 if (may_be_nonaddressable_p (base))
556 return false;
557
558 /* Limit non-constant step prefetching only to the innermost loops and
559 only when the step is loop invariant in the entire loop nest. */
560 if (!cst_and_fits_in_hwi (step))
561 {
562 if (loop->inner != NULL)
563 {
564 if (dump_file && (dump_flags & TDF_DETAILS))
565 {
566 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
567 print_generic_expr (dump_file, ref, TDF_SLIM);
568 fprintf (dump_file,":");
569 dump_mem_details (dump_file, base, step, delta, write_p);
570 fprintf (dump_file,
571 "Ignoring %p, non-constant step prefetching is "
572 "limited to inner most loops \n",
573 (void *) ref);
574 }
575 return false;
576 }
577 else
578 {
579 if (!expr_invariant_in_loop_p (loop_outermost (loop), step))
580 {
581 if (dump_file && (dump_flags & TDF_DETAILS))
582 {
583 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
584 print_generic_expr (dump_file, ref, TDF_SLIM);
585 fprintf (dump_file,":");
586 dump_mem_details (dump_file, base, step, delta, write_p);
587 fprintf (dump_file,
588 "Not prefetching, ignoring %p due to "
589 "loop variant step\n",
590 (void *) ref);
591 }
592 return false;
593 }
594 }
595 }
596
597 /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
598 are integer constants. */
599 agrp = find_or_create_group (refs, base, step);
600 record_ref (agrp, stmt, ref, delta, write_p);
601
602 return true;
603 }
604
605 /* Record the suitable memory references in LOOP. NO_OTHER_REFS is set to
606 true if there are no other memory references inside the loop. */
607
608 static struct mem_ref_group *
609 gather_memory_references (class loop *loop, bool *no_other_refs, unsigned *ref_count)
610 {
611 basic_block *body = get_loop_body_in_dom_order (loop);
612 basic_block bb;
613 unsigned i;
614 gimple_stmt_iterator bsi;
615 gimple *stmt;
616 tree lhs, rhs;
617 struct mem_ref_group *refs = NULL;
618
619 *no_other_refs = true;
620 *ref_count = 0;
621
622 /* Scan the loop body in order, so that the former references precede the
623 later ones. */
624 for (i = 0; i < loop->num_nodes; i++)
625 {
626 bb = body[i];
627 if (bb->loop_father != loop)
628 continue;
629
630 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
631 {
632 stmt = gsi_stmt (bsi);
633
634 if (gimple_code (stmt) != GIMPLE_ASSIGN)
635 {
636 if (gimple_vuse (stmt)
637 || (is_gimple_call (stmt)
638 && !(gimple_call_flags (stmt) & ECF_CONST)))
639 *no_other_refs = false;
640 continue;
641 }
642
643 if (! gimple_vuse (stmt))
644 continue;
645
646 lhs = gimple_assign_lhs (stmt);
647 rhs = gimple_assign_rhs1 (stmt);
648
649 if (REFERENCE_CLASS_P (rhs))
650 {
651 *no_other_refs &= gather_memory_references_ref (loop, &refs,
652 rhs, false, stmt);
653 *ref_count += 1;
654 }
655 if (REFERENCE_CLASS_P (lhs))
656 {
657 *no_other_refs &= gather_memory_references_ref (loop, &refs,
658 lhs, true, stmt);
659 *ref_count += 1;
660 }
661 }
662 }
663 free (body);
664
665 return refs;
666 }
667
668 /* Prune the prefetch candidate REF using the self-reuse. */
669
670 static void
671 prune_ref_by_self_reuse (struct mem_ref *ref)
672 {
673 HOST_WIDE_INT step;
674 bool backward;
675
676 /* If the step size is non constant, we cannot calculate prefetch_mod. */
677 if (!cst_and_fits_in_hwi (ref->group->step))
678 return;
679
680 step = int_cst_value (ref->group->step);
681
682 backward = step < 0;
683
684 if (step == 0)
685 {
686 /* Prefetch references to invariant address just once. */
687 ref->prefetch_before = 1;
688 return;
689 }
690
691 if (backward)
692 step = -step;
693
694 if (step > PREFETCH_BLOCK)
695 return;
696
697 if ((backward && HAVE_BACKWARD_PREFETCH)
698 || (!backward && HAVE_FORWARD_PREFETCH))
699 {
700 ref->prefetch_before = 1;
701 return;
702 }
703
704 ref->prefetch_mod = PREFETCH_BLOCK / step;
705 }
706
707 /* Divides X by BY, rounding down. */
708
709 static HOST_WIDE_INT
710 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
711 {
712 gcc_assert (by > 0);
713
714 if (x >= 0)
715 return x / (HOST_WIDE_INT) by;
716 else
717 return (x + (HOST_WIDE_INT) by - 1) / (HOST_WIDE_INT) by;
718 }
719
720 /* Given a CACHE_LINE_SIZE and two inductive memory references
721 with a common STEP greater than CACHE_LINE_SIZE and an address
722 difference DELTA, compute the probability that they will fall
723 in different cache lines. Return true if the computed miss rate
724 is not greater than the ACCEPTABLE_MISS_RATE. DISTINCT_ITERS is the
725 number of distinct iterations after which the pattern repeats itself.
726 ALIGN_UNIT is the unit of alignment in bytes. */
727
728 static bool
729 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
730 HOST_WIDE_INT step, HOST_WIDE_INT delta,
731 unsigned HOST_WIDE_INT distinct_iters,
732 int align_unit)
733 {
734 unsigned align, iter;
735 int total_positions, miss_positions, max_allowed_miss_positions;
736 int address1, address2, cache_line1, cache_line2;
737
738 /* It always misses if delta is greater than or equal to the cache
739 line size. */
740 if (delta >= (HOST_WIDE_INT) cache_line_size)
741 return false;
742
743 miss_positions = 0;
744 total_positions = (cache_line_size / align_unit) * distinct_iters;
745 max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
746
747 /* Iterate through all possible alignments of the first
748 memory reference within its cache line. */
749 for (align = 0; align < cache_line_size; align += align_unit)
750
751 /* Iterate through all distinct iterations. */
752 for (iter = 0; iter < distinct_iters; iter++)
753 {
754 address1 = align + step * iter;
755 address2 = address1 + delta;
756 cache_line1 = address1 / cache_line_size;
757 cache_line2 = address2 / cache_line_size;
758 if (cache_line1 != cache_line2)
759 {
760 miss_positions += 1;
761 if (miss_positions > max_allowed_miss_positions)
762 return false;
763 }
764 }
765 return true;
766 }
767
768 /* Prune the prefetch candidate REF using the reuse with BY.
769 If BY_IS_BEFORE is true, BY is before REF in the loop. */
770
771 static void
772 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
773 bool by_is_before)
774 {
775 HOST_WIDE_INT step;
776 bool backward;
777 HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
778 HOST_WIDE_INT delta = delta_b - delta_r;
779 HOST_WIDE_INT hit_from;
780 unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
781 HOST_WIDE_INT reduced_step;
782 unsigned HOST_WIDE_INT reduced_prefetch_block;
783 tree ref_type;
784 int align_unit;
785
786 /* If the step is non constant we cannot calculate prefetch_before. */
787 if (!cst_and_fits_in_hwi (ref->group->step)) {
788 return;
789 }
790
791 step = int_cst_value (ref->group->step);
792
793 backward = step < 0;
794
795
796 if (delta == 0)
797 {
798 /* If the references has the same address, only prefetch the
799 former. */
800 if (by_is_before)
801 ref->prefetch_before = 0;
802
803 return;
804 }
805
806 if (!step)
807 {
808 /* If the reference addresses are invariant and fall into the
809 same cache line, prefetch just the first one. */
810 if (!by_is_before)
811 return;
812
813 if (ddown (ref->delta, PREFETCH_BLOCK)
814 != ddown (by->delta, PREFETCH_BLOCK))
815 return;
816
817 ref->prefetch_before = 0;
818 return;
819 }
820
821 /* Only prune the reference that is behind in the array. */
822 if (backward)
823 {
824 if (delta > 0)
825 return;
826
827 /* Transform the data so that we may assume that the accesses
828 are forward. */
829 delta = - delta;
830 step = -step;
831 delta_r = PREFETCH_BLOCK - 1 - delta_r;
832 delta_b = PREFETCH_BLOCK - 1 - delta_b;
833 }
834 else
835 {
836 if (delta < 0)
837 return;
838 }
839
840 /* Check whether the two references are likely to hit the same cache
841 line, and how distant the iterations in that it occurs are from
842 each other. */
843
844 if (step <= PREFETCH_BLOCK)
845 {
846 /* The accesses are sure to meet. Let us check when. */
847 hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
848 prefetch_before = (hit_from - delta_r + step - 1) / step;
849
850 /* Do not reduce prefetch_before if we meet beyond cache size. */
851 if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
852 prefetch_before = PREFETCH_ALL;
853 if (prefetch_before < ref->prefetch_before)
854 ref->prefetch_before = prefetch_before;
855
856 return;
857 }
858
859 /* A more complicated case with step > prefetch_block. First reduce
860 the ratio between the step and the cache line size to its simplest
861 terms. The resulting denominator will then represent the number of
862 distinct iterations after which each address will go back to its
863 initial location within the cache line. This computation assumes
864 that PREFETCH_BLOCK is a power of two. */
865 prefetch_block = PREFETCH_BLOCK;
866 reduced_prefetch_block = prefetch_block;
867 reduced_step = step;
868 while ((reduced_step & 1) == 0
869 && reduced_prefetch_block > 1)
870 {
871 reduced_step >>= 1;
872 reduced_prefetch_block >>= 1;
873 }
874
875 prefetch_before = delta / step;
876 delta %= step;
877 ref_type = TREE_TYPE (ref->mem);
878 align_unit = TYPE_ALIGN (ref_type) / 8;
879 if (is_miss_rate_acceptable (prefetch_block, step, delta,
880 reduced_prefetch_block, align_unit))
881 {
882 /* Do not reduce prefetch_before if we meet beyond cache size. */
883 if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
884 prefetch_before = PREFETCH_ALL;
885 if (prefetch_before < ref->prefetch_before)
886 ref->prefetch_before = prefetch_before;
887
888 return;
889 }
890
891 /* Try also the following iteration. */
892 prefetch_before++;
893 delta = step - delta;
894 if (is_miss_rate_acceptable (prefetch_block, step, delta,
895 reduced_prefetch_block, align_unit))
896 {
897 if (prefetch_before < ref->prefetch_before)
898 ref->prefetch_before = prefetch_before;
899
900 return;
901 }
902
903 /* The ref probably does not reuse by. */
904 return;
905 }
906
907 /* Prune the prefetch candidate REF using the reuses with other references
908 in REFS. */
909
910 static void
911 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
912 {
913 struct mem_ref *prune_by;
914 bool before = true;
915
916 prune_ref_by_self_reuse (ref);
917
918 for (prune_by = refs; prune_by; prune_by = prune_by->next)
919 {
920 if (prune_by == ref)
921 {
922 before = false;
923 continue;
924 }
925
926 if (!WRITE_CAN_USE_READ_PREFETCH
927 && ref->write_p
928 && !prune_by->write_p)
929 continue;
930 if (!READ_CAN_USE_WRITE_PREFETCH
931 && !ref->write_p
932 && prune_by->write_p)
933 continue;
934
935 prune_ref_by_group_reuse (ref, prune_by, before);
936 }
937 }
938
939 /* Prune the prefetch candidates in GROUP using the reuse analysis. */
940
941 static void
942 prune_group_by_reuse (struct mem_ref_group *group)
943 {
944 struct mem_ref *ref_pruned;
945
946 for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
947 {
948 prune_ref_by_reuse (ref_pruned, group->refs);
949
950 if (dump_file && (dump_flags & TDF_DETAILS))
951 {
952 dump_mem_ref (dump_file, ref_pruned);
953
954 if (ref_pruned->prefetch_before == PREFETCH_ALL
955 && ref_pruned->prefetch_mod == 1)
956 fprintf (dump_file, " no restrictions");
957 else if (ref_pruned->prefetch_before == 0)
958 fprintf (dump_file, " do not prefetch");
959 else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
960 fprintf (dump_file, " prefetch once");
961 else
962 {
963 if (ref_pruned->prefetch_before != PREFETCH_ALL)
964 {
965 fprintf (dump_file, " prefetch before ");
966 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
967 ref_pruned->prefetch_before);
968 }
969 if (ref_pruned->prefetch_mod != 1)
970 {
971 fprintf (dump_file, " prefetch mod ");
972 fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
973 ref_pruned->prefetch_mod);
974 }
975 }
976 fprintf (dump_file, "\n");
977 }
978 }
979 }
980
981 /* Prune the list of prefetch candidates GROUPS using the reuse analysis. */
982
983 static void
984 prune_by_reuse (struct mem_ref_group *groups)
985 {
986 for (; groups; groups = groups->next)
987 prune_group_by_reuse (groups);
988 }
989
990 /* Returns true if we should issue prefetch for REF. */
991
992 static bool
993 should_issue_prefetch_p (struct mem_ref *ref)
994 {
995 /* Do we want to issue prefetches for non-constant strides? */
996 if (!cst_and_fits_in_hwi (ref->group->step) && PREFETCH_DYNAMIC_STRIDES == 0)
997 {
998 if (dump_file && (dump_flags & TDF_DETAILS))
999 fprintf (dump_file,
1000 "Skipping non-constant step for reference %u:%u\n",
1001 ref->group->uid, ref->uid);
1002 return false;
1003 }
1004
1005 /* Some processors may have a hardware prefetcher that may conflict with
1006 prefetch hints for a range of strides. Make sure we don't issue
1007 prefetches for such cases if the stride is within this particular
1008 range. */
1009 if (cst_and_fits_in_hwi (ref->group->step)
1010 && abs_hwi (int_cst_value (ref->group->step))
1011 < (HOST_WIDE_INT) PREFETCH_MINIMUM_STRIDE)
1012 {
1013 if (dump_file && (dump_flags & TDF_DETAILS))
1014 fprintf (dump_file,
1015 "Step for reference %u:%u (" HOST_WIDE_INT_PRINT_DEC
1016 ") is less than the mininum required stride of %d\n",
1017 ref->group->uid, ref->uid, int_cst_value (ref->group->step),
1018 PREFETCH_MINIMUM_STRIDE);
1019 return false;
1020 }
1021
1022 /* For now do not issue prefetches for only first few of the
1023 iterations. */
1024 if (ref->prefetch_before != PREFETCH_ALL)
1025 {
1026 if (dump_file && (dump_flags & TDF_DETAILS))
1027 fprintf (dump_file, "Ignoring reference %u:%u due to prefetch_before\n",
1028 ref->group->uid, ref->uid);
1029 return false;
1030 }
1031
1032 /* Do not prefetch nontemporal stores. */
1033 if (ref->storent_p)
1034 {
1035 if (dump_file && (dump_flags & TDF_DETAILS))
1036 fprintf (dump_file, "Ignoring nontemporal store reference %u:%u\n", ref->group->uid, ref->uid);
1037 return false;
1038 }
1039
1040 return true;
1041 }
1042
1043 /* Decide which of the prefetch candidates in GROUPS to prefetch.
1044 AHEAD is the number of iterations to prefetch ahead (which corresponds
1045 to the number of simultaneous instances of one prefetch running at a
1046 time). UNROLL_FACTOR is the factor by that the loop is going to be
1047 unrolled. Returns true if there is anything to prefetch. */
1048
1049 static bool
1050 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
1051 unsigned ahead)
1052 {
1053 unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
1054 unsigned slots_per_prefetch;
1055 struct mem_ref *ref;
1056 bool any = false;
1057
1058 /* At most SIMULTANEOUS_PREFETCHES should be running at the same time. */
1059 remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
1060
1061 /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
1062 AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration,
1063 it will need a prefetch slot. */
1064 slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
1065 if (dump_file && (dump_flags & TDF_DETAILS))
1066 fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
1067 slots_per_prefetch);
1068
1069 /* For now we just take memory references one by one and issue
1070 prefetches for as many as possible. The groups are sorted
1071 starting with the largest step, since the references with
1072 large step are more likely to cause many cache misses. */
1073
1074 for (; groups; groups = groups->next)
1075 for (ref = groups->refs; ref; ref = ref->next)
1076 {
1077 if (!should_issue_prefetch_p (ref))
1078 continue;
1079
1080 /* The loop is far from being sufficiently unrolled for this
1081 prefetch. Do not generate prefetch to avoid many redudant
1082 prefetches. */
1083 if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1084 continue;
1085
1086 /* If we need to prefetch the reference each PREFETCH_MOD iterations,
1087 and we unroll the loop UNROLL_FACTOR times, we need to insert
1088 ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1089 iteration. */
1090 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1091 / ref->prefetch_mod);
1092 prefetch_slots = n_prefetches * slots_per_prefetch;
1093
1094 /* If more than half of the prefetches would be lost anyway, do not
1095 issue the prefetch. */
1096 if (2 * remaining_prefetch_slots < prefetch_slots)
1097 continue;
1098
1099 /* Stop prefetching if debug counter is activated. */
1100 if (!dbg_cnt (prefetch))
1101 continue;
1102
1103 ref->issue_prefetch_p = true;
1104 if (dump_file && (dump_flags & TDF_DETAILS))
1105 fprintf (dump_file, "Decided to issue prefetch for reference %u:%u\n",
1106 ref->group->uid, ref->uid);
1107
1108 if (remaining_prefetch_slots <= prefetch_slots)
1109 return true;
1110 remaining_prefetch_slots -= prefetch_slots;
1111 any = true;
1112 }
1113
1114 return any;
1115 }
1116
1117 /* Return TRUE if no prefetch is going to be generated in the given
1118 GROUPS. */
1119
1120 static bool
1121 nothing_to_prefetch_p (struct mem_ref_group *groups)
1122 {
1123 struct mem_ref *ref;
1124
1125 for (; groups; groups = groups->next)
1126 for (ref = groups->refs; ref; ref = ref->next)
1127 if (should_issue_prefetch_p (ref))
1128 return false;
1129
1130 return true;
1131 }
1132
1133 /* Estimate the number of prefetches in the given GROUPS.
1134 UNROLL_FACTOR is the factor by which LOOP was unrolled. */
1135
1136 static int
1137 estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
1138 {
1139 struct mem_ref *ref;
1140 unsigned n_prefetches;
1141 int prefetch_count = 0;
1142
1143 for (; groups; groups = groups->next)
1144 for (ref = groups->refs; ref; ref = ref->next)
1145 if (should_issue_prefetch_p (ref))
1146 {
1147 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1148 / ref->prefetch_mod);
1149 prefetch_count += n_prefetches;
1150 }
1151
1152 return prefetch_count;
1153 }
1154
1155 /* Issue prefetches for the reference REF into loop as decided before.
1156 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
1157 is the factor by which LOOP was unrolled. */
1158
1159 static void
1160 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1161 {
1162 HOST_WIDE_INT delta;
1163 tree addr, addr_base, write_p, local, forward;
1164 gcall *prefetch;
1165 gimple_stmt_iterator bsi;
1166 unsigned n_prefetches, ap;
1167 bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
1168
1169 if (dump_file && (dump_flags & TDF_DETAILS))
1170 fprintf (dump_file, "Issued%s prefetch for reference %u:%u.\n",
1171 nontemporal ? " nontemporal" : "",
1172 ref->group->uid, ref->uid);
1173
1174 bsi = gsi_for_stmt (ref->stmt);
1175
1176 n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1177 / ref->prefetch_mod);
1178 addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
1179 addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1180 true, NULL, true, GSI_SAME_STMT);
1181 write_p = ref->write_p ? integer_one_node : integer_zero_node;
1182 local = nontemporal ? integer_zero_node : integer_three_node;
1183
1184 for (ap = 0; ap < n_prefetches; ap++)
1185 {
1186 if (cst_and_fits_in_hwi (ref->group->step))
1187 {
1188 /* Determine the address to prefetch. */
1189 delta = (ahead + ap * ref->prefetch_mod) *
1190 int_cst_value (ref->group->step);
1191 addr = fold_build_pointer_plus_hwi (addr_base, delta);
1192 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1193 NULL, true, GSI_SAME_STMT);
1194 }
1195 else
1196 {
1197 /* The step size is non-constant but loop-invariant. We use the
1198 heuristic to simply prefetch ahead iterations ahead. */
1199 forward = fold_build2 (MULT_EXPR, sizetype,
1200 fold_convert (sizetype, ref->group->step),
1201 fold_convert (sizetype, size_int (ahead)));
1202 addr = fold_build_pointer_plus (addr_base, forward);
1203 addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1204 NULL, true, GSI_SAME_STMT);
1205 }
1206
1207 if (addr_base != addr
1208 && TREE_CODE (addr_base) == SSA_NAME
1209 && TREE_CODE (addr) == SSA_NAME)
1210 {
1211 duplicate_ssa_name_ptr_info (addr, SSA_NAME_PTR_INFO (addr_base));
1212 /* As this isn't a plain copy we have to reset alignment
1213 information. */
1214 if (SSA_NAME_PTR_INFO (addr))
1215 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr));
1216 }
1217
1218 /* Create the prefetch instruction. */
1219 prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
1220 3, addr, write_p, local);
1221 gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
1222 }
1223 }
1224
1225 /* Issue prefetches for the references in GROUPS into loop as decided before.
1226 HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR is the
1227 factor by that LOOP was unrolled. */
1228
1229 static void
1230 issue_prefetches (struct mem_ref_group *groups,
1231 unsigned unroll_factor, unsigned ahead)
1232 {
1233 struct mem_ref *ref;
1234
1235 for (; groups; groups = groups->next)
1236 for (ref = groups->refs; ref; ref = ref->next)
1237 if (ref->issue_prefetch_p)
1238 issue_prefetch_ref (ref, unroll_factor, ahead);
1239 }
1240
1241 /* Returns true if REF is a memory write for that a nontemporal store insn
1242 can be used. */
1243
1244 static bool
1245 nontemporal_store_p (struct mem_ref *ref)
1246 {
1247 machine_mode mode;
1248 enum insn_code code;
1249
1250 /* REF must be a write that is not reused. We require it to be independent
1251 on all other memory references in the loop, as the nontemporal stores may
1252 be reordered with respect to other memory references. */
1253 if (!ref->write_p
1254 || !ref->independent_p
1255 || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1256 return false;
1257
1258 /* Check that we have the storent instruction for the mode. */
1259 mode = TYPE_MODE (TREE_TYPE (ref->mem));
1260 if (mode == BLKmode)
1261 return false;
1262
1263 code = optab_handler (storent_optab, mode);
1264 return code != CODE_FOR_nothing;
1265 }
1266
1267 /* If REF is a nontemporal store, we mark the corresponding modify statement
1268 and return true. Otherwise, we return false. */
1269
1270 static bool
1271 mark_nontemporal_store (struct mem_ref *ref)
1272 {
1273 if (!nontemporal_store_p (ref))
1274 return false;
1275
1276 if (dump_file && (dump_flags & TDF_DETAILS))
1277 fprintf (dump_file, "Marked reference %u:%u as a nontemporal store.\n",
1278 ref->group->uid, ref->uid);
1279
1280 gimple_assign_set_nontemporal_move (ref->stmt, true);
1281 ref->storent_p = true;
1282
1283 return true;
1284 }
1285
1286 /* Issue a memory fence instruction after LOOP. */
1287
1288 static void
1289 emit_mfence_after_loop (class loop *loop)
1290 {
1291 vec<edge> exits = get_loop_exit_edges (loop);
1292 edge exit;
1293 gcall *call;
1294 gimple_stmt_iterator bsi;
1295 unsigned i;
1296
1297 FOR_EACH_VEC_ELT (exits, i, exit)
1298 {
1299 call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1300
1301 if (!single_pred_p (exit->dest)
1302 /* If possible, we prefer not to insert the fence on other paths
1303 in cfg. */
1304 && !(exit->flags & EDGE_ABNORMAL))
1305 split_loop_exit_edge (exit);
1306 bsi = gsi_after_labels (exit->dest);
1307
1308 gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1309 }
1310
1311 exits.release ();
1312 update_ssa (TODO_update_ssa_only_virtuals);
1313 }
1314
1315 /* Returns true if we can use storent in loop, false otherwise. */
1316
1317 static bool
1318 may_use_storent_in_loop_p (class loop *loop)
1319 {
1320 bool ret = true;
1321
1322 if (loop->inner != NULL)
1323 return false;
1324
1325 /* If we must issue a mfence insn after using storent, check that there
1326 is a suitable place for it at each of the loop exits. */
1327 if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1328 {
1329 vec<edge> exits = get_loop_exit_edges (loop);
1330 unsigned i;
1331 edge exit;
1332
1333 FOR_EACH_VEC_ELT (exits, i, exit)
1334 if ((exit->flags & EDGE_ABNORMAL)
1335 && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1336 ret = false;
1337
1338 exits.release ();
1339 }
1340
1341 return ret;
1342 }
1343
1344 /* Marks nontemporal stores in LOOP. GROUPS contains the description of memory
1345 references in the loop. */
1346
1347 static void
1348 mark_nontemporal_stores (class loop *loop, struct mem_ref_group *groups)
1349 {
1350 struct mem_ref *ref;
1351 bool any = false;
1352
1353 if (!may_use_storent_in_loop_p (loop))
1354 return;
1355
1356 for (; groups; groups = groups->next)
1357 for (ref = groups->refs; ref; ref = ref->next)
1358 any |= mark_nontemporal_store (ref);
1359
1360 if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1361 emit_mfence_after_loop (loop);
1362 }
1363
1364 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1365 this is the case, fill in DESC by the description of number of
1366 iterations. */
1367
1368 static bool
1369 should_unroll_loop_p (class loop *loop, class tree_niter_desc *desc,
1370 unsigned factor)
1371 {
1372 if (!can_unroll_loop_p (loop, factor, desc))
1373 return false;
1374
1375 /* We only consider loops without control flow for unrolling. This is not
1376 a hard restriction -- tree_unroll_loop works with arbitrary loops
1377 as well; but the unrolling/prefetching is usually more profitable for
1378 loops consisting of a single basic block, and we want to limit the
1379 code growth. */
1380 if (loop->num_nodes > 2)
1381 return false;
1382
1383 return true;
1384 }
1385
1386 /* Determine the coefficient by that unroll LOOP, from the information
1387 contained in the list of memory references REFS. Description of
1388 number of iterations of LOOP is stored to DESC. NINSNS is the number of
1389 insns of the LOOP. EST_NITER is the estimated number of iterations of
1390 the loop, or -1 if no estimate is available. */
1391
1392 static unsigned
1393 determine_unroll_factor (class loop *loop, struct mem_ref_group *refs,
1394 unsigned ninsns, class tree_niter_desc *desc,
1395 HOST_WIDE_INT est_niter)
1396 {
1397 unsigned upper_bound;
1398 unsigned nfactor, factor, mod_constraint;
1399 struct mem_ref_group *agp;
1400 struct mem_ref *ref;
1401
1402 /* First check whether the loop is not too large to unroll. We ignore
1403 PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1404 from unrolling them enough to make exactly one cache line covered by each
1405 iteration. Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1406 us from unrolling the loops too many times in cases where we only expect
1407 gains from better scheduling and decreasing loop overhead, which is not
1408 the case here. */
1409 upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1410
1411 /* If we unrolled the loop more times than it iterates, the unrolled version
1412 of the loop would be never entered. */
1413 if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1414 upper_bound = est_niter;
1415
1416 if (upper_bound <= 1)
1417 return 1;
1418
1419 /* Choose the factor so that we may prefetch each cache just once,
1420 but bound the unrolling by UPPER_BOUND. */
1421 factor = 1;
1422 for (agp = refs; agp; agp = agp->next)
1423 for (ref = agp->refs; ref; ref = ref->next)
1424 if (should_issue_prefetch_p (ref))
1425 {
1426 mod_constraint = ref->prefetch_mod;
1427 nfactor = least_common_multiple (mod_constraint, factor);
1428 if (nfactor <= upper_bound)
1429 factor = nfactor;
1430 }
1431
1432 if (!should_unroll_loop_p (loop, desc, factor))
1433 return 1;
1434
1435 return factor;
1436 }
1437
1438 /* Returns the total volume of the memory references REFS, taking into account
1439 reuses in the innermost loop and cache line size. TODO -- we should also
1440 take into account reuses across the iterations of the loops in the loop
1441 nest. */
1442
1443 static unsigned
1444 volume_of_references (struct mem_ref_group *refs)
1445 {
1446 unsigned volume = 0;
1447 struct mem_ref_group *gr;
1448 struct mem_ref *ref;
1449
1450 for (gr = refs; gr; gr = gr->next)
1451 for (ref = gr->refs; ref; ref = ref->next)
1452 {
1453 /* Almost always reuses another value? */
1454 if (ref->prefetch_before != PREFETCH_ALL)
1455 continue;
1456
1457 /* If several iterations access the same cache line, use the size of
1458 the line divided by this number. Otherwise, a cache line is
1459 accessed in each iteration. TODO -- in the latter case, we should
1460 take the size of the reference into account, rounding it up on cache
1461 line size multiple. */
1462 volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1463 }
1464 return volume;
1465 }
1466
1467 /* Returns the volume of memory references accessed across VEC iterations of
1468 loops, whose sizes are described in the LOOP_SIZES array. N is the number
1469 of the loops in the nest (length of VEC and LOOP_SIZES vectors). */
1470
1471 static unsigned
1472 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1473 {
1474 unsigned i;
1475
1476 for (i = 0; i < n; i++)
1477 if (vec[i] != 0)
1478 break;
1479
1480 if (i == n)
1481 return 0;
1482
1483 gcc_assert (vec[i] > 0);
1484
1485 /* We ignore the parts of the distance vector in subloops, since usually
1486 the numbers of iterations are much smaller. */
1487 return loop_sizes[i] * vec[i];
1488 }
1489
1490 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1491 at the position corresponding to the loop of the step. N is the depth
1492 of the considered loop nest, and, LOOP is its innermost loop. */
1493
1494 static void
1495 add_subscript_strides (tree access_fn, unsigned stride,
1496 HOST_WIDE_INT *strides, unsigned n, class loop *loop)
1497 {
1498 class loop *aloop;
1499 tree step;
1500 HOST_WIDE_INT astep;
1501 unsigned min_depth = loop_depth (loop) - n;
1502
1503 while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1504 {
1505 aloop = get_chrec_loop (access_fn);
1506 step = CHREC_RIGHT (access_fn);
1507 access_fn = CHREC_LEFT (access_fn);
1508
1509 if ((unsigned) loop_depth (aloop) <= min_depth)
1510 continue;
1511
1512 if (tree_fits_shwi_p (step))
1513 astep = tree_to_shwi (step);
1514 else
1515 astep = L1_CACHE_LINE_SIZE;
1516
1517 strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1518
1519 }
1520 }
1521
1522 /* Returns the volume of memory references accessed between two consecutive
1523 self-reuses of the reference DR. We consider the subscripts of DR in N
1524 loops, and LOOP_SIZES contains the volumes of accesses in each of the
1525 loops. LOOP is the innermost loop of the current loop nest. */
1526
1527 static unsigned
1528 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1529 class loop *loop)
1530 {
1531 tree stride, access_fn;
1532 HOST_WIDE_INT *strides, astride;
1533 vec<tree> access_fns;
1534 tree ref = DR_REF (dr);
1535 unsigned i, ret = ~0u;
1536
1537 /* In the following example:
1538
1539 for (i = 0; i < N; i++)
1540 for (j = 0; j < N; j++)
1541 use (a[j][i]);
1542 the same cache line is accessed each N steps (except if the change from
1543 i to i + 1 crosses the boundary of the cache line). Thus, for self-reuse,
1544 we cannot rely purely on the results of the data dependence analysis.
1545
1546 Instead, we compute the stride of the reference in each loop, and consider
1547 the innermost loop in that the stride is less than cache size. */
1548
1549 strides = XCNEWVEC (HOST_WIDE_INT, n);
1550 access_fns = DR_ACCESS_FNS (dr);
1551
1552 FOR_EACH_VEC_ELT (access_fns, i, access_fn)
1553 {
1554 /* Keep track of the reference corresponding to the subscript, so that we
1555 know its stride. */
1556 while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1557 ref = TREE_OPERAND (ref, 0);
1558
1559 if (TREE_CODE (ref) == ARRAY_REF)
1560 {
1561 stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1562 if (tree_fits_uhwi_p (stride))
1563 astride = tree_to_uhwi (stride);
1564 else
1565 astride = L1_CACHE_LINE_SIZE;
1566
1567 ref = TREE_OPERAND (ref, 0);
1568 }
1569 else
1570 astride = 1;
1571
1572 add_subscript_strides (access_fn, astride, strides, n, loop);
1573 }
1574
1575 for (i = n; i-- > 0; )
1576 {
1577 unsigned HOST_WIDE_INT s;
1578
1579 s = strides[i] < 0 ? -strides[i] : strides[i];
1580
1581 if (s < (unsigned) L1_CACHE_LINE_SIZE
1582 && (loop_sizes[i]
1583 > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1584 {
1585 ret = loop_sizes[i];
1586 break;
1587 }
1588 }
1589
1590 free (strides);
1591 return ret;
1592 }
1593
1594 /* Determines the distance till the first reuse of each reference in REFS
1595 in the loop nest of LOOP. NO_OTHER_REFS is true if there are no other
1596 memory references in the loop. Return false if the analysis fails. */
1597
1598 static bool
1599 determine_loop_nest_reuse (class loop *loop, struct mem_ref_group *refs,
1600 bool no_other_refs)
1601 {
1602 class loop *nest, *aloop;
1603 vec<data_reference_p> datarefs = vNULL;
1604 vec<ddr_p> dependences = vNULL;
1605 struct mem_ref_group *gr;
1606 struct mem_ref *ref, *refb;
1607 auto_vec<loop_p> vloops;
1608 unsigned *loop_data_size;
1609 unsigned i, j, n;
1610 unsigned volume, dist, adist;
1611 HOST_WIDE_INT vol;
1612 data_reference_p dr;
1613 ddr_p dep;
1614
1615 if (loop->inner)
1616 return true;
1617
1618 /* Find the outermost loop of the loop nest of loop (we require that
1619 there are no sibling loops inside the nest). */
1620 nest = loop;
1621 while (1)
1622 {
1623 aloop = loop_outer (nest);
1624
1625 if (aloop == current_loops->tree_root
1626 || aloop->inner->next)
1627 break;
1628
1629 nest = aloop;
1630 }
1631
1632 /* For each loop, determine the amount of data accessed in each iteration.
1633 We use this to estimate whether the reference is evicted from the
1634 cache before its reuse. */
1635 find_loop_nest (nest, &vloops);
1636 n = vloops.length ();
1637 loop_data_size = XNEWVEC (unsigned, n);
1638 volume = volume_of_references (refs);
1639 i = n;
1640 while (i-- != 0)
1641 {
1642 loop_data_size[i] = volume;
1643 /* Bound the volume by the L2 cache size, since above this bound,
1644 all dependence distances are equivalent. */
1645 if (volume > L2_CACHE_SIZE_BYTES)
1646 continue;
1647
1648 aloop = vloops[i];
1649 vol = estimated_stmt_executions_int (aloop);
1650 if (vol == -1)
1651 vol = expected_loop_iterations (aloop);
1652 volume *= vol;
1653 }
1654
1655 /* Prepare the references in the form suitable for data dependence
1656 analysis. We ignore unanalyzable data references (the results
1657 are used just as a heuristics to estimate temporality of the
1658 references, hence we do not need to worry about correctness). */
1659 for (gr = refs; gr; gr = gr->next)
1660 for (ref = gr->refs; ref; ref = ref->next)
1661 {
1662 dr = create_data_ref (loop_preheader_edge (nest),
1663 loop_containing_stmt (ref->stmt),
1664 ref->mem, ref->stmt, !ref->write_p, false);
1665
1666 if (dr)
1667 {
1668 ref->reuse_distance = volume;
1669 dr->aux = ref;
1670 datarefs.safe_push (dr);
1671 }
1672 else
1673 no_other_refs = false;
1674 }
1675
1676 FOR_EACH_VEC_ELT (datarefs, i, dr)
1677 {
1678 dist = self_reuse_distance (dr, loop_data_size, n, loop);
1679 ref = (struct mem_ref *) dr->aux;
1680 if (ref->reuse_distance > dist)
1681 ref->reuse_distance = dist;
1682
1683 if (no_other_refs)
1684 ref->independent_p = true;
1685 }
1686
1687 if (!compute_all_dependences (datarefs, &dependences, vloops, true))
1688 return false;
1689
1690 FOR_EACH_VEC_ELT (dependences, i, dep)
1691 {
1692 if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1693 continue;
1694
1695 ref = (struct mem_ref *) DDR_A (dep)->aux;
1696 refb = (struct mem_ref *) DDR_B (dep)->aux;
1697
1698 if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1699 || DDR_COULD_BE_INDEPENDENT_P (dep)
1700 || DDR_NUM_DIST_VECTS (dep) == 0)
1701 {
1702 /* If the dependence cannot be analyzed, assume that there might be
1703 a reuse. */
1704 dist = 0;
1705
1706 ref->independent_p = false;
1707 refb->independent_p = false;
1708 }
1709 else
1710 {
1711 /* The distance vectors are normalized to be always lexicographically
1712 positive, hence we cannot tell just from them whether DDR_A comes
1713 before DDR_B or vice versa. However, it is not important,
1714 anyway -- if DDR_A is close to DDR_B, then it is either reused in
1715 DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1716 in cache (and marking it as nontemporal would not affect
1717 anything). */
1718
1719 dist = volume;
1720 for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1721 {
1722 adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1723 loop_data_size, n);
1724
1725 /* If this is a dependence in the innermost loop (i.e., the
1726 distances in all superloops are zero) and it is not
1727 the trivial self-dependence with distance zero, record that
1728 the references are not completely independent. */
1729 if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1730 && (ref != refb
1731 || DDR_DIST_VECT (dep, j)[n-1] != 0))
1732 {
1733 ref->independent_p = false;
1734 refb->independent_p = false;
1735 }
1736
1737 /* Ignore accesses closer than
1738 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1739 so that we use nontemporal prefetches e.g. if single memory
1740 location is accessed several times in a single iteration of
1741 the loop. */
1742 if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1743 continue;
1744
1745 if (adist < dist)
1746 dist = adist;
1747 }
1748 }
1749
1750 if (ref->reuse_distance > dist)
1751 ref->reuse_distance = dist;
1752 if (refb->reuse_distance > dist)
1753 refb->reuse_distance = dist;
1754 }
1755
1756 free_dependence_relations (dependences);
1757 free_data_refs (datarefs);
1758 free (loop_data_size);
1759
1760 if (dump_file && (dump_flags & TDF_DETAILS))
1761 {
1762 fprintf (dump_file, "Reuse distances:\n");
1763 for (gr = refs; gr; gr = gr->next)
1764 for (ref = gr->refs; ref; ref = ref->next)
1765 fprintf (dump_file, " reference %u:%u distance %u\n",
1766 ref->group->uid, ref->uid, ref->reuse_distance);
1767 }
1768
1769 return true;
1770 }
1771
1772 /* Determine whether or not the trip count to ahead ratio is too small based
1773 on prefitablility consideration.
1774 AHEAD: the iteration ahead distance,
1775 EST_NITER: the estimated trip count. */
1776
1777 static bool
1778 trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1779 {
1780 /* Assume trip count to ahead ratio is big enough if the trip count could not
1781 be estimated at compile time. */
1782 if (est_niter < 0)
1783 return false;
1784
1785 if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1786 {
1787 if (dump_file && (dump_flags & TDF_DETAILS))
1788 fprintf (dump_file,
1789 "Not prefetching -- loop estimated to roll only %d times\n",
1790 (int) est_niter);
1791 return true;
1792 }
1793
1794 return false;
1795 }
1796
1797 /* Determine whether or not the number of memory references in the loop is
1798 reasonable based on the profitablity and compilation time considerations.
1799 NINSNS: estimated number of instructions in the loop,
1800 MEM_REF_COUNT: total number of memory references in the loop. */
1801
1802 static bool
1803 mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
1804 {
1805 int insn_to_mem_ratio;
1806
1807 if (mem_ref_count == 0)
1808 return false;
1809
1810 /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1811 (compute_all_dependences) have high costs based on quadratic complexity.
1812 To avoid huge compilation time, we give up prefetching if mem_ref_count
1813 is too large. */
1814 if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1815 return false;
1816
1817 /* Prefetching improves performance by overlapping cache missing
1818 memory accesses with CPU operations. If the loop does not have
1819 enough CPU operations to overlap with memory operations, prefetching
1820 won't give a significant benefit. One approximate way of checking
1821 this is to require the ratio of instructions to memory references to
1822 be above a certain limit. This approximation works well in practice.
1823 TODO: Implement a more precise computation by estimating the time
1824 for each CPU or memory op in the loop. Time estimates for memory ops
1825 should account for cache misses. */
1826 insn_to_mem_ratio = ninsns / mem_ref_count;
1827
1828 if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1829 {
1830 if (dump_file && (dump_flags & TDF_DETAILS))
1831 fprintf (dump_file,
1832 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1833 insn_to_mem_ratio);
1834 return false;
1835 }
1836
1837 return true;
1838 }
1839
1840 /* Determine whether or not the instruction to prefetch ratio in the loop is
1841 too small based on the profitablity consideration.
1842 NINSNS: estimated number of instructions in the loop,
1843 PREFETCH_COUNT: an estimate of the number of prefetches,
1844 UNROLL_FACTOR: the factor to unroll the loop if prefetching. */
1845
1846 static bool
1847 insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1848 unsigned unroll_factor)
1849 {
1850 int insn_to_prefetch_ratio;
1851
1852 /* Prefetching most likely causes performance degradation when the instruction
1853 to prefetch ratio is too small. Too many prefetch instructions in a loop
1854 may reduce the I-cache performance.
1855 (unroll_factor * ninsns) is used to estimate the number of instructions in
1856 the unrolled loop. This implementation is a bit simplistic -- the number
1857 of issued prefetch instructions is also affected by unrolling. So,
1858 prefetch_mod and the unroll factor should be taken into account when
1859 determining prefetch_count. Also, the number of insns of the unrolled
1860 loop will usually be significantly smaller than the number of insns of the
1861 original loop * unroll_factor (at least the induction variable increases
1862 and the exit branches will get eliminated), so it might be better to use
1863 tree_estimate_loop_size + estimated_unrolled_size. */
1864 insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1865 if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
1866 {
1867 if (dump_file && (dump_flags & TDF_DETAILS))
1868 fprintf (dump_file,
1869 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1870 insn_to_prefetch_ratio);
1871 return true;
1872 }
1873
1874 return false;
1875 }
1876
1877
1878 /* Issue prefetch instructions for array references in LOOP. Returns
1879 true if the LOOP was unrolled. */
1880
1881 static bool
1882 loop_prefetch_arrays (class loop *loop)
1883 {
1884 struct mem_ref_group *refs;
1885 unsigned ahead, ninsns, time, unroll_factor;
1886 HOST_WIDE_INT est_niter;
1887 class tree_niter_desc desc;
1888 bool unrolled = false, no_other_refs;
1889 unsigned prefetch_count;
1890 unsigned mem_ref_count;
1891
1892 if (optimize_loop_nest_for_size_p (loop))
1893 {
1894 if (dump_file && (dump_flags & TDF_DETAILS))
1895 fprintf (dump_file, " ignored (cold area)\n");
1896 return false;
1897 }
1898
1899 /* FIXME: the time should be weighted by the probabilities of the blocks in
1900 the loop body. */
1901 time = tree_num_loop_insns (loop, &eni_time_weights);
1902 if (time == 0)
1903 return false;
1904
1905 ahead = (PREFETCH_LATENCY + time - 1) / time;
1906 est_niter = estimated_stmt_executions_int (loop);
1907 if (est_niter == -1)
1908 est_niter = likely_max_stmt_executions_int (loop);
1909
1910 /* Prefetching is not likely to be profitable if the trip count to ahead
1911 ratio is too small. */
1912 if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1913 return false;
1914
1915 ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1916
1917 /* Step 1: gather the memory references. */
1918 refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1919
1920 /* Give up prefetching if the number of memory references in the
1921 loop is not reasonable based on profitablity and compilation time
1922 considerations. */
1923 if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1924 goto fail;
1925
1926 /* Step 2: estimate the reuse effects. */
1927 prune_by_reuse (refs);
1928
1929 if (nothing_to_prefetch_p (refs))
1930 goto fail;
1931
1932 if (!determine_loop_nest_reuse (loop, refs, no_other_refs))
1933 goto fail;
1934
1935 /* Step 3: determine unroll factor. */
1936 unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1937 est_niter);
1938
1939 /* Estimate prefetch count for the unrolled loop. */
1940 prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1941 if (prefetch_count == 0)
1942 goto fail;
1943
1944 if (dump_file && (dump_flags & TDF_DETAILS))
1945 fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1946 HOST_WIDE_INT_PRINT_DEC "\n"
1947 "insn count %d, mem ref count %d, prefetch count %d\n",
1948 ahead, unroll_factor, est_niter,
1949 ninsns, mem_ref_count, prefetch_count);
1950
1951 /* Prefetching is not likely to be profitable if the instruction to prefetch
1952 ratio is too small. */
1953 if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1954 unroll_factor))
1955 goto fail;
1956
1957 mark_nontemporal_stores (loop, refs);
1958
1959 /* Step 4: what to prefetch? */
1960 if (!schedule_prefetches (refs, unroll_factor, ahead))
1961 goto fail;
1962
1963 /* Step 5: unroll the loop. TODO -- peeling of first and last few
1964 iterations so that we do not issue superfluous prefetches. */
1965 if (unroll_factor != 1)
1966 {
1967 tree_unroll_loop (loop, unroll_factor,
1968 single_dom_exit (loop), &desc);
1969 unrolled = true;
1970 }
1971
1972 /* Step 6: issue the prefetches. */
1973 issue_prefetches (refs, unroll_factor, ahead);
1974
1975 fail:
1976 release_mem_refs (refs);
1977 return unrolled;
1978 }
1979
1980 /* Issue prefetch instructions for array references in loops. */
1981
1982 unsigned int
1983 tree_ssa_prefetch_arrays (void)
1984 {
1985 class loop *loop;
1986 bool unrolled = false;
1987 int todo_flags = 0;
1988
1989 if (!targetm.have_prefetch ()
1990 /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1991 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1992 of processor costs and i486 does not have prefetch, but
1993 -march=pentium4 causes targetm.have_prefetch to be true. Ugh. */
1994 || PREFETCH_BLOCK == 0)
1995 return 0;
1996
1997 if (dump_file && (dump_flags & TDF_DETAILS))
1998 {
1999 fprintf (dump_file, "Prefetching parameters:\n");
2000 fprintf (dump_file, " simultaneous prefetches: %d\n",
2001 SIMULTANEOUS_PREFETCHES);
2002 fprintf (dump_file, " prefetch latency: %d\n", PREFETCH_LATENCY);
2003 fprintf (dump_file, " prefetch block size: %d\n", PREFETCH_BLOCK);
2004 fprintf (dump_file, " L1 cache size: %d lines, %d kB\n",
2005 L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
2006 fprintf (dump_file, " L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
2007 fprintf (dump_file, " L2 cache size: %d kB\n", L2_CACHE_SIZE);
2008 fprintf (dump_file, " min insn-to-prefetch ratio: %d \n",
2009 MIN_INSN_TO_PREFETCH_RATIO);
2010 fprintf (dump_file, " min insn-to-mem ratio: %d \n",
2011 PREFETCH_MIN_INSN_TO_MEM_RATIO);
2012 fprintf (dump_file, "\n");
2013 }
2014
2015 initialize_original_copy_tables ();
2016
2017 if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
2018 {
2019 tree type = build_function_type_list (void_type_node,
2020 const_ptr_type_node, NULL_TREE);
2021 tree decl = add_builtin_function ("__builtin_prefetch", type,
2022 BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
2023 NULL, NULL_TREE);
2024 DECL_IS_NOVOPS (decl) = true;
2025 set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
2026 }
2027
2028 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2029 {
2030 if (dump_file && (dump_flags & TDF_DETAILS))
2031 fprintf (dump_file, "Processing loop %d:\n", loop->num);
2032
2033 unrolled |= loop_prefetch_arrays (loop);
2034
2035 if (dump_file && (dump_flags & TDF_DETAILS))
2036 fprintf (dump_file, "\n\n");
2037 }
2038
2039 if (unrolled)
2040 {
2041 scev_reset ();
2042 todo_flags |= TODO_cleanup_cfg;
2043 }
2044
2045 free_original_copy_tables ();
2046 return todo_flags;
2047 }
2048
2049 /* Prefetching. */
2050
2051 namespace {
2052
2053 const pass_data pass_data_loop_prefetch =
2054 {
2055 GIMPLE_PASS, /* type */
2056 "aprefetch", /* name */
2057 OPTGROUP_LOOP, /* optinfo_flags */
2058 TV_TREE_PREFETCH, /* tv_id */
2059 ( PROP_cfg | PROP_ssa ), /* properties_required */
2060 0, /* properties_provided */
2061 0, /* properties_destroyed */
2062 0, /* todo_flags_start */
2063 0, /* todo_flags_finish */
2064 };
2065
2066 class pass_loop_prefetch : public gimple_opt_pass
2067 {
2068 public:
2069 pass_loop_prefetch (gcc::context *ctxt)
2070 : gimple_opt_pass (pass_data_loop_prefetch, ctxt)
2071 {}
2072
2073 /* opt_pass methods: */
2074 virtual bool gate (function *) { return flag_prefetch_loop_arrays > 0; }
2075 virtual unsigned int execute (function *);
2076
2077 }; // class pass_loop_prefetch
2078
2079 unsigned int
2080 pass_loop_prefetch::execute (function *fun)
2081 {
2082 if (number_of_loops (fun) <= 1)
2083 return 0;
2084
2085 if ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) != 0)
2086 {
2087 static bool warned = false;
2088
2089 if (!warned)
2090 {
2091 warning (OPT_Wdisabled_optimization,
2092 "%<l1-cache-size%> parameter is not a power of two %d",
2093 PREFETCH_BLOCK);
2094 warned = true;
2095 }
2096 return 0;
2097 }
2098
2099 return tree_ssa_prefetch_arrays ();
2100 }
2101
2102 } // anon namespace
2103
2104 gimple_opt_pass *
2105 make_pass_loop_prefetch (gcc::context *ctxt)
2106 {
2107 return new pass_loop_prefetch (ctxt);
2108 }
2109
2110