re PR rtl-optimization/33638 (wrong code with -O2 -fforce-addr)
[gcc.git] / gcc / dse.c
1 /* RTL dead store elimination.
2 Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
3
4 Contributed by Richard Sandiford <rsandifor@codesourcery.com>
5 and Kenneth Zadeck <zadeck@naturalbridge.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #undef BASELINE
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "hashtab.h"
29 #include "tm.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "flags.h"
35 #include "df.h"
36 #include "cselib.h"
37 #include "timevar.h"
38 #include "tree-pass.h"
39 #include "alloc-pool.h"
40 #include "alias.h"
41 #include "insn-config.h"
42 #include "expr.h"
43 #include "recog.h"
44 #include "dse.h"
45 #include "optabs.h"
46 #include "dbgcnt.h"
47
48 /* This file contains three techniques for performing Dead Store
49 Elimination (dse).
50
51 * The first technique performs dse locally on any base address. It
52 is based on the cselib which is a local value numbering technique.
53 This technique is local to a basic block but deals with a fairly
54 general addresses.
55
56 * The second technique performs dse globally but is restricted to
57 base addresses that are either constant or are relative to the
58 frame_pointer.
59
60 * The third technique, (which is only done after register allocation)
61 processes the spill spill slots. This differs from the second
62 technique because it takes advantage of the fact that spilling is
63 completely free from the effects of aliasing.
64
65 Logically, dse is a backwards dataflow problem. A store can be
66 deleted if it if cannot be reached in the backward direction by any
67 use of the value being stored. However, the local technique uses a
68 forwards scan of the basic block because cselib requires that the
69 block be processed in that order.
70
71 The pass is logically broken into 7 steps:
72
73 0) Initialization.
74
75 1) The local algorithm, as well as scanning the insns for the two
76 global algorithms.
77
78 2) Analysis to see if the global algs are necessary. In the case
79 of stores base on a constant address, there must be at least two
80 stores to that address, to make it possible to delete some of the
81 stores. In the case of stores off of the frame or spill related
82 stores, only one store to an address is necessary because those
83 stores die at the end of the function.
84
85 3) Set up the global dataflow equations based on processing the
86 info parsed in the first step.
87
88 4) Solve the dataflow equations.
89
90 5) Delete the insns that the global analysis has indicated are
91 unnecessary.
92
93 6) Cleanup.
94
95 This step uses cselib and canon_rtx to build the largest expression
96 possible for each address. This pass is a forwards pass through
97 each basic block. From the point of view of the global technique,
98 the first pass could examine a block in either direction. The
99 forwards ordering is to accommodate cselib.
100
101 We a simplifying assumption: addresses fall into four broad
102 categories:
103
104 1) base has rtx_varies_p == false, offset is constant.
105 2) base has rtx_varies_p == false, offset variable.
106 3) base has rtx_varies_p == true, offset constant.
107 4) base has rtx_varies_p == true, offset variable.
108
109 The local passes are able to process all 4 kinds of addresses. The
110 global pass only handles (1).
111
112 The global problem is formulated as follows:
113
114 A store, S1, to address A, where A is not relative to the stack
115 frame, can be eliminated if all paths from S1 to the end of the
116 of the function contain another store to A before a read to A.
117
118 If the address A is relative to the stack frame, a store S2 to A
119 can be eliminated if there are no paths from S1 that reach the
120 end of the function that read A before another store to A. In
121 this case S2 can be deleted if there are paths to from S2 to the
122 end of the function that have no reads or writes to A. This
123 second case allows stores to the stack frame to be deleted that
124 would otherwise die when the function returns. This cannot be
125 done if stores_off_frame_dead_at_return is not true. See the doc
126 for that variable for when this variable is false.
127
128 The global problem is formulated as a backwards set union
129 dataflow problem where the stores are the gens and reads are the
130 kills. Set union problems are rare and require some special
131 handling given our representation of bitmaps. A straightforward
132 implementation of requires a lot of bitmaps filled with 1s.
133 These are expensive and cumbersome in our bitmap formulation so
134 care has been taken to avoid large vectors filled with 1s. See
135 the comments in bb_info and in the dataflow confluence functions
136 for details.
137
138 There are two places for further enhancements to this algorithm:
139
140 1) The original dse which was embedded in a pass called flow also
141 did local address forwarding. For example in
142
143 A <- r100
144 ... <- A
145
146 flow would replace the right hand side of the second insn with a
147 reference to r100. Most of the information is available to add this
148 to this pass. It has not done it because it is a lot of work in
149 the case that either r100 is assigned to between the first and
150 second insn and/or the second insn is a load of part of the value
151 stored by the first insn.
152
153 insn 5 in gcc.c-torture/compile/990203-1.c simple case.
154 insn 15 in gcc.c-torture/execute/20001017-2.c simple case.
155 insn 25 in gcc.c-torture/execute/20001026-1.c simple case.
156 insn 44 in gcc.c-torture/execute/20010910-1.c simple case.
157
158 2) The cleaning up of spill code is quite profitable. It currently
159 depends on reading tea leaves and chicken entrails left by reload.
160 This pass depends on reload creating a singleton alias set for each
161 spill slot and telling the next dse pass which of these alias sets
162 are the singletons. Rather than analyze the addresses of the
163 spills, dse's spill processing just does analysis of the loads and
164 stores that use those alias sets. There are three cases where this
165 falls short:
166
167 a) Reload sometimes creates the slot for one mode of access, and
168 then inserts loads and/or stores for a smaller mode. In this
169 case, the current code just punts on the slot. The proper thing
170 to do is to back out and use one bit vector position for each
171 byte of the entity associated with the slot. This depends on
172 KNOWING that reload always generates the accesses for each of the
173 bytes in some canonical (read that easy to understand several
174 passes after reload happens) way.
175
176 b) Reload sometimes decides that spill slot it allocated was not
177 large enough for the mode and goes back and allocates more slots
178 with the same mode and alias set. The backout in this case is a
179 little more graceful than (a). In this case the slot is unmarked
180 as being a spill slot and if final address comes out to be based
181 off the frame pointer, the global algorithm handles this slot.
182
183 c) For any pass that may prespill, there is currently no
184 mechanism to tell the dse pass that the slot being used has the
185 special properties that reload uses. It may be that all that is
186 required is to have those passes make the same calls that reload
187 does, assuming that the alias sets can be manipulated in the same
188 way. */
189
190 /* There are limits to the size of constant offsets we model for the
191 global problem. There are certainly test cases, that exceed this
192 limit, however, it is unlikely that there are important programs
193 that really have constant offsets this size. */
194 #define MAX_OFFSET (64 * 1024)
195
196
197 static bitmap scratch = NULL;
198 struct insn_info;
199
200 /* This structure holds information about a candidate store. */
201 struct store_info
202 {
203
204 /* False means this is a clobber. */
205 bool is_set;
206
207 /* The id of the mem group of the base address. If rtx_varies_p is
208 true, this is -1. Otherwise, it is the index into the group
209 table. */
210 int group_id;
211
212 /* This is the cselib value. */
213 cselib_val *cse_base;
214
215 /* This canonized mem. */
216 rtx mem;
217
218 /* The result of get_addr on mem. */
219 rtx mem_addr;
220
221 /* If this is non-zero, it is the alias set of a spill location. */
222 alias_set_type alias_set;
223
224 /* The offset of the first and byte before the last byte associated
225 with the operation. */
226 int begin, end;
227
228 /* An bitmask as wide as the number of bytes in the word that
229 contains a 1 if the byte may be needed. The store is unused if
230 all of the bits are 0. */
231 long positions_needed;
232
233 /* The next store info for this insn. */
234 struct store_info *next;
235
236 /* The right hand side of the store. This is used if there is a
237 subsequent reload of the mems address somewhere later in the
238 basic block. */
239 rtx rhs;
240 };
241
242 typedef struct store_info *store_info_t;
243 static alloc_pool cse_store_info_pool;
244 static alloc_pool rtx_store_info_pool;
245
246 /* This structure holds information about a load. These are only
247 built for rtx bases. */
248 struct read_info
249 {
250 /* The id of the mem group of the base address. */
251 int group_id;
252
253 /* If this is non-zero, it is the alias set of a spill location. */
254 alias_set_type alias_set;
255
256 /* The offset of the first and byte after the last byte associated
257 with the operation. If begin == end == 0, the read did not have
258 a constant offset. */
259 int begin, end;
260
261 /* The mem being read. */
262 rtx mem;
263
264 /* The next read_info for this insn. */
265 struct read_info *next;
266 };
267 typedef struct read_info *read_info_t;
268 static alloc_pool read_info_pool;
269
270
271 /* One of these records is created for each insn. */
272
273 struct insn_info
274 {
275 /* Set true if the insn contains a store but the insn itself cannot
276 be deleted. This is set if the insn is a parallel and there is
277 more than one non dead output or if the insn is in some way
278 volatile. */
279 bool cannot_delete;
280
281 /* This field is only used by the global algorithm. It is set true
282 if the insn contains any read of mem except for a (1). This is
283 also set if the insn is a call or has a clobber mem. If the insn
284 contains a wild read, the use_rec will be null. */
285 bool wild_read;
286
287 /* This field is only used for the processing of const functions.
288 These functions cannot read memory, but they can read the stack
289 because that is where they may get their parms. It is set to
290 true if the insn may contain a stack pointer based store. */
291 bool stack_pointer_based;
292
293 /* This is true if any of the sets within the store contains a
294 cselib base. Such stores can only be deleted by the local
295 algorithm. */
296 bool contains_cselib_groups;
297
298 /* The insn. */
299 rtx insn;
300
301 /* The list of mem sets or mem clobbers that are contained in this
302 insn. If the insn is deletable, it contains only one mem set.
303 But it could also contain clobbers. Insns that contain more than
304 one mem set are not deletable, but each of those mems are here in
305 order to provide info to delete other insns. */
306 store_info_t store_rec;
307
308 /* The linked list of mem uses in this insn. Only the reads from
309 rtx bases are listed here. The reads to cselib bases are
310 completely processed during the first scan and so are never
311 created. */
312 read_info_t read_rec;
313
314 /* The prev insn in the basic block. */
315 struct insn_info * prev_insn;
316
317 /* The linked list of insns that are in consideration for removal in
318 the forwards pass thru the basic block. This pointer may be
319 trash as it is not cleared when a wild read occurs. The only
320 time it is guaranteed to be correct is when the traveral starts
321 at active_local_stores. */
322 struct insn_info * next_local_store;
323 };
324
325 typedef struct insn_info *insn_info_t;
326 static alloc_pool insn_info_pool;
327
328 /* The linked list of stores that are under consideration in this
329 basic block. */
330 static insn_info_t active_local_stores;
331
332 struct bb_info
333 {
334
335 /* Pointer to the insn info for the last insn in the block. These
336 are linked so this is how all of the insns are reached. During
337 scanning this is the current insn being scanned. */
338 insn_info_t last_insn;
339
340 /* The info for the global dataflow problem. */
341
342
343 /* This is set if the transfer function should and in the wild_read
344 bitmap before applying the kill and gen sets. That vector knocks
345 out most of the bits in the bitmap and thus speeds up the
346 operations. */
347 bool apply_wild_read;
348
349 /* The set of store positions that exist in this block before a wild read. */
350 bitmap gen;
351
352 /* The set of load positions that exist in this block above the
353 same position of a store. */
354 bitmap kill;
355
356 /* The set of stores that reach the top of the block without being
357 killed by a read.
358
359 Do not represent the in if it is all ones. Note that this is
360 what the bitvector should logically be initialized to for a set
361 intersection problem. However, like the kill set, this is too
362 expensive. So initially, the in set will only be created for the
363 exit block and any block that contains a wild read. */
364 bitmap in;
365
366 /* The set of stores that reach the bottom of the block from it's
367 successors.
368
369 Do not represent the in if it is all ones. Note that this is
370 what the bitvector should logically be initialized to for a set
371 intersection problem. However, like the kill and in set, this is
372 too expensive. So what is done is that the confluence operator
373 just initializes the vector from one of the out sets of the
374 successors of the block. */
375 bitmap out;
376 };
377
378 typedef struct bb_info *bb_info_t;
379 static alloc_pool bb_info_pool;
380
381 /* Table to hold all bb_infos. */
382 static bb_info_t *bb_table;
383
384 /* There is a group_info for each rtx base that is used to reference
385 memory. There are also not many of the rtx bases because they are
386 very limited in scope. */
387
388 struct group_info
389 {
390 /* The actual base of the address. */
391 rtx rtx_base;
392
393 /* The sequential id of the base. This allows us to have a
394 canonical ordering of these that is not based on addresses. */
395 int id;
396
397 /* A mem wrapped around the base pointer for the group in order to
398 do read dependency. */
399 rtx base_mem;
400
401 /* Canonized version of base_mem, most likely the same thing. */
402 rtx canon_base_mem;
403
404 /* These two sets of two bitmaps are used to keep track of how many
405 stores are actually referencing that position from this base. We
406 only do this for rtx bases as this will be used to assign
407 positions in the bitmaps for the global problem. Bit N is set in
408 store1 on the first store for offset N. Bit N is set in store2
409 for the second store to offset N. This is all we need since we
410 only care about offsets that have two or more stores for them.
411
412 The "_n" suffix is for offsets less than 0 and the "_p" suffix is
413 for 0 and greater offsets.
414
415 There is one special case here, for stores into the stack frame,
416 we will or store1 into store2 before deciding which stores look
417 at globally. This is because stores to the stack frame that have
418 no other reads before the end of the function can also be
419 deleted. */
420 bitmap store1_n, store1_p, store2_n, store2_p;
421
422 /* The positions in this bitmap have the same assignments as the in,
423 out, gen and kill bitmaps. This bitmap is all zeros except for
424 the positions that are occupied by stores for this group. */
425 bitmap group_kill;
426
427 /* True if there are any positions that are to be processed
428 globally. */
429 bool process_globally;
430
431 /* True if the base of this group is either the frame_pointer or
432 hard_frame_pointer. */
433 bool frame_related;
434
435 /* The offset_map is used to map the offsets from this base into
436 positions in the global bitmaps. It is only created after all of
437 the all of stores have been scanned and we know which ones we
438 care about. */
439 int *offset_map_n, *offset_map_p;
440 int offset_map_size_n, offset_map_size_p;
441 };
442 typedef struct group_info *group_info_t;
443 typedef const struct group_info *const_group_info_t;
444 static alloc_pool rtx_group_info_pool;
445
446 /* Tables of group_info structures, hashed by base value. */
447 static htab_t rtx_group_table;
448
449 /* Index into the rtx_group_vec. */
450 static int rtx_group_next_id;
451
452 DEF_VEC_P(group_info_t);
453 DEF_VEC_ALLOC_P(group_info_t,heap);
454
455 static VEC(group_info_t,heap) *rtx_group_vec;
456
457
458 /* This structure holds the set of changes that are being deferred
459 when removing read operation. See replace_read. */
460 struct deferred_change
461 {
462
463 /* The mem that is being replaced. */
464 rtx *loc;
465
466 /* The reg it is being replaced with. */
467 rtx reg;
468
469 struct deferred_change *next;
470 };
471
472 typedef struct deferred_change *deferred_change_t;
473 static alloc_pool deferred_change_pool;
474
475 static deferred_change_t deferred_change_list = NULL;
476
477 /* This are used to hold the alias sets of spill variables. Since
478 these are never aliased and there may be a lot of them, it makes
479 sense to treat them specially. This bitvector is only allocated in
480 calls from dse_record_singleton_alias_set which currently is only
481 made during reload1. So when dse is called before reload this
482 mechanism does nothing. */
483
484 static bitmap clear_alias_sets = NULL;
485
486 /* The set of clear_alias_sets that have been disqualified because
487 there are loads or stores using a different mode than the alias set
488 was registered with. */
489 static bitmap disqualified_clear_alias_sets = NULL;
490
491 /* The group that holds all of the clear_alias_sets. */
492 static group_info_t clear_alias_group;
493
494 /* The modes of the clear_alias_sets. */
495 static htab_t clear_alias_mode_table;
496
497 /* Hash table element to look up the mode for an alias set. */
498 struct clear_alias_mode_holder
499 {
500 alias_set_type alias_set;
501 enum machine_mode mode;
502 };
503
504 static alloc_pool clear_alias_mode_pool;
505
506 /* This is true except for two cases:
507 (1) current_function_stdarg -- i.e. we cannot do this
508 for vararg functions because they play games with the frame.
509 (2) In ada, it is sometimes not safe to do assume that any stores
510 based off the stack frame go dead at the exit to a function. */
511 static bool stores_off_frame_dead_at_return;
512
513 /* Counter for stats. */
514 static int globally_deleted;
515 static int locally_deleted;
516 static int spill_deleted;
517
518 static bitmap all_blocks;
519
520 /* The number of bits used in the global bitmaps. */
521 static unsigned int current_position;
522
523
524 static bool gate_dse (void);
525
526 \f
527 /*----------------------------------------------------------------------------
528 Zeroth step.
529
530 Initialization.
531 ----------------------------------------------------------------------------*/
532
533 /* Hashtable callbacks for maintaining the "bases" field of
534 store_group_info, given that the addresses are function invariants. */
535
536 static int
537 clear_alias_mode_eq (const void *p1, const void *p2)
538 {
539 const struct clear_alias_mode_holder * h1
540 = (const struct clear_alias_mode_holder *) p1;
541 const struct clear_alias_mode_holder * h2
542 = (const struct clear_alias_mode_holder *) p2;
543 return h1->alias_set == h2->alias_set;
544 }
545
546
547 static hashval_t
548 clear_alias_mode_hash (const void *p)
549 {
550 const struct clear_alias_mode_holder *holder
551 = (const struct clear_alias_mode_holder *) p;
552 return holder->alias_set;
553 }
554
555
556 /* Find the entry associated with ALIAS_SET. */
557
558 static struct clear_alias_mode_holder *
559 clear_alias_set_lookup (alias_set_type alias_set)
560 {
561 struct clear_alias_mode_holder tmp_holder;
562 void **slot;
563
564 tmp_holder.alias_set = alias_set;
565 slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, NO_INSERT);
566 gcc_assert (*slot);
567
568 return *slot;
569 }
570
571
572 /* Hashtable callbacks for maintaining the "bases" field of
573 store_group_info, given that the addresses are function invariants. */
574
575 static int
576 invariant_group_base_eq (const void *p1, const void *p2)
577 {
578 const_group_info_t gi1 = (const_group_info_t) p1;
579 const_group_info_t gi2 = (const_group_info_t) p2;
580 return rtx_equal_p (gi1->rtx_base, gi2->rtx_base);
581 }
582
583
584 static hashval_t
585 invariant_group_base_hash (const void *p)
586 {
587 const_group_info_t gi = (const_group_info_t) p;
588 int do_not_record;
589 return hash_rtx (gi->rtx_base, Pmode, &do_not_record, NULL, false);
590 }
591
592
593 /* Get the GROUP for BASE. Add a new group if it is not there. */
594
595 static group_info_t
596 get_group_info (rtx base)
597 {
598 struct group_info tmp_gi;
599 group_info_t gi;
600 void **slot;
601
602 if (base)
603 {
604 /* Find the store_base_info structure for BASE, creating a new one
605 if necessary. */
606 tmp_gi.rtx_base = base;
607 slot = htab_find_slot (rtx_group_table, &tmp_gi, INSERT);
608 gi = (group_info_t) *slot;
609 }
610 else
611 {
612 if (!clear_alias_group)
613 {
614 clear_alias_group = gi = pool_alloc (rtx_group_info_pool);
615 memset (gi, 0, sizeof (struct group_info));
616 gi->id = rtx_group_next_id++;
617 gi->store1_n = BITMAP_ALLOC (NULL);
618 gi->store1_p = BITMAP_ALLOC (NULL);
619 gi->store2_n = BITMAP_ALLOC (NULL);
620 gi->store2_p = BITMAP_ALLOC (NULL);
621 gi->group_kill = BITMAP_ALLOC (NULL);
622 gi->process_globally = false;
623 gi->offset_map_size_n = 0;
624 gi->offset_map_size_p = 0;
625 gi->offset_map_n = NULL;
626 gi->offset_map_p = NULL;
627 VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
628 }
629 return clear_alias_group;
630 }
631
632 if (gi == NULL)
633 {
634 *slot = gi = pool_alloc (rtx_group_info_pool);
635 gi->rtx_base = base;
636 gi->id = rtx_group_next_id++;
637 gi->base_mem = gen_rtx_MEM (QImode, base);
638 gi->canon_base_mem = canon_rtx (gi->base_mem);
639 gi->store1_n = BITMAP_ALLOC (NULL);
640 gi->store1_p = BITMAP_ALLOC (NULL);
641 gi->store2_n = BITMAP_ALLOC (NULL);
642 gi->store2_p = BITMAP_ALLOC (NULL);
643 gi->group_kill = BITMAP_ALLOC (NULL);
644 gi->process_globally = false;
645 gi->frame_related =
646 (base == frame_pointer_rtx) || (base == hard_frame_pointer_rtx);
647 gi->offset_map_size_n = 0;
648 gi->offset_map_size_p = 0;
649 gi->offset_map_n = NULL;
650 gi->offset_map_p = NULL;
651 VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
652 }
653
654 return gi;
655 }
656
657
658 /* Initialization of data structures. */
659
660 static void
661 dse_step0 (void)
662 {
663 locally_deleted = 0;
664 globally_deleted = 0;
665 spill_deleted = 0;
666
667 scratch = BITMAP_ALLOC (NULL);
668
669 rtx_store_info_pool
670 = create_alloc_pool ("rtx_store_info_pool",
671 sizeof (struct store_info), 100);
672 read_info_pool
673 = create_alloc_pool ("read_info_pool",
674 sizeof (struct read_info), 100);
675 insn_info_pool
676 = create_alloc_pool ("insn_info_pool",
677 sizeof (struct insn_info), 100);
678 bb_info_pool
679 = create_alloc_pool ("bb_info_pool",
680 sizeof (struct bb_info), 100);
681 rtx_group_info_pool
682 = create_alloc_pool ("rtx_group_info_pool",
683 sizeof (struct group_info), 100);
684 deferred_change_pool
685 = create_alloc_pool ("deferred_change_pool",
686 sizeof (struct deferred_change), 10);
687
688 rtx_group_table = htab_create (11, invariant_group_base_hash,
689 invariant_group_base_eq, NULL);
690
691 bb_table = XCNEWVEC (bb_info_t, last_basic_block);
692 rtx_group_next_id = 0;
693
694 stores_off_frame_dead_at_return =
695 (!(TREE_CODE (TREE_TYPE (current_function_decl)) == FUNCTION_TYPE
696 && (TYPE_RETURNS_STACK_DEPRESSED (TREE_TYPE (current_function_decl)))))
697 && (!current_function_stdarg);
698
699 init_alias_analysis ();
700
701 if (clear_alias_sets)
702 clear_alias_group = get_group_info (NULL);
703 else
704 clear_alias_group = NULL;
705 }
706
707
708 \f
709 /*----------------------------------------------------------------------------
710 First step.
711
712 Scan all of the insns. Any random ordering of the blocks is fine.
713 Each block is scanned in forward order to accommodate cselib which
714 is used to remove stores with non-constant bases.
715 ----------------------------------------------------------------------------*/
716
717 /* Delete all of the store_info recs from INSN_INFO. */
718
719 static void
720 free_store_info (insn_info_t insn_info)
721 {
722 store_info_t store_info = insn_info->store_rec;
723 while (store_info)
724 {
725 store_info_t next = store_info->next;
726 if (store_info->cse_base)
727 pool_free (cse_store_info_pool, store_info);
728 else
729 pool_free (rtx_store_info_pool, store_info);
730 store_info = next;
731 }
732
733 insn_info->cannot_delete = true;
734 insn_info->contains_cselib_groups = false;
735 insn_info->store_rec = NULL;
736 }
737
738
739 struct insn_size {
740 int size;
741 rtx insn;
742 };
743
744
745 /* Add an insn to do the add inside a x if it is a
746 PRE/POST-INC/DEC/MODIFY. D is an structure containing the insn and
747 the size of the mode of the MEM that this is inside of. */
748
749 static int
750 replace_inc_dec (rtx *r, void *d)
751 {
752 rtx x = *r;
753 struct insn_size *data = (struct insn_size *)d;
754 switch (GET_CODE (x))
755 {
756 case PRE_INC:
757 case POST_INC:
758 {
759 rtx r1 = XEXP (x, 0);
760 rtx c = gen_int_mode (Pmode, data->size);
761 add_insn_before (data->insn,
762 gen_rtx_SET (Pmode, r1,
763 gen_rtx_PLUS (Pmode, r1, c)),
764 NULL);
765 return -1;
766 }
767
768 case PRE_DEC:
769 case POST_DEC:
770 {
771 rtx r1 = XEXP (x, 0);
772 rtx c = gen_int_mode (Pmode, -data->size);
773 add_insn_before (data->insn,
774 gen_rtx_SET (Pmode, r1,
775 gen_rtx_PLUS (Pmode, r1, c)),
776 NULL);
777 return -1;
778 }
779
780 case PRE_MODIFY:
781 case POST_MODIFY:
782 {
783 /* We can reuse the add because we are about to delete the
784 insn that contained it. */
785 rtx add = XEXP (x, 0);
786 rtx r1 = XEXP (add, 0);
787 add_insn_before (data->insn,
788 gen_rtx_SET (Pmode, r1, add), NULL);
789 return -1;
790 }
791
792 default:
793 return 0;
794 }
795 }
796
797
798 /* If X is a MEM, check the address to see if it is PRE/POST-INC/DEC/MODIFY
799 and generate an add to replace that. */
800
801 static int
802 replace_inc_dec_mem (rtx *r, void *d)
803 {
804 rtx x = *r;
805 if (GET_CODE (x) == MEM)
806 {
807 struct insn_size data;
808
809 data.size = GET_MODE_SIZE (GET_MODE (x));
810 data.insn = (rtx)d;
811
812 for_each_rtx (&XEXP (x, 0), replace_inc_dec, &data);
813
814 return -1;
815 }
816 return 0;
817 }
818
819 /* Before we delete INSN, make sure that the auto inc/dec, if it is
820 there, is split into a separate insn. */
821
822 static void
823 check_for_inc_dec (rtx insn)
824 {
825 rtx note = find_reg_note (insn, REG_INC, NULL_RTX);
826 if (note)
827 for_each_rtx (&insn, replace_inc_dec_mem, insn);
828 }
829
830
831 /* Delete the insn and free all of the fields inside INSN_INFO. */
832
833 static void
834 delete_dead_store_insn (insn_info_t insn_info)
835 {
836 read_info_t read_info;
837
838 if (!dbg_cnt (dse))
839 return;
840
841 check_for_inc_dec (insn_info->insn);
842 if (dump_file)
843 {
844 fprintf (dump_file, "Locally deleting insn %d ",
845 INSN_UID (insn_info->insn));
846 if (insn_info->store_rec->alias_set)
847 fprintf (dump_file, "alias set %d\n",
848 (int) insn_info->store_rec->alias_set);
849 else
850 fprintf (dump_file, "\n");
851 }
852
853 free_store_info (insn_info);
854 read_info = insn_info->read_rec;
855
856 while (read_info)
857 {
858 read_info_t next = read_info->next;
859 pool_free (read_info_pool, read_info);
860 read_info = next;
861 }
862 insn_info->read_rec = NULL;
863
864 delete_insn (insn_info->insn);
865 locally_deleted++;
866 insn_info->insn = NULL;
867
868 insn_info->wild_read = false;
869 }
870
871
872 /* Set the store* bitmaps offset_map_size* fields in GROUP based on
873 OFFSET and WIDTH. */
874
875 static void
876 set_usage_bits (group_info_t group, HOST_WIDE_INT offset, HOST_WIDE_INT width)
877 {
878 HOST_WIDE_INT i;
879
880 if ((offset > -MAX_OFFSET) && (offset < MAX_OFFSET))
881 for (i=offset; i<offset+width; i++)
882 {
883 bitmap store1;
884 bitmap store2;
885 int ai;
886 if (i < 0)
887 {
888 store1 = group->store1_n;
889 store2 = group->store2_n;
890 ai = -i;
891 }
892 else
893 {
894 store1 = group->store1_p;
895 store2 = group->store2_p;
896 ai = i;
897 }
898
899 if (bitmap_bit_p (store1, ai))
900 bitmap_set_bit (store2, ai);
901 else
902 {
903 bitmap_set_bit (store1, ai);
904 if (i < 0)
905 {
906 if (group->offset_map_size_n < ai)
907 group->offset_map_size_n = ai;
908 }
909 else
910 {
911 if (group->offset_map_size_p < ai)
912 group->offset_map_size_p = ai;
913 }
914 }
915 }
916 }
917
918
919 /* Set the BB_INFO so that the last insn is marked as a wild read. */
920
921 static void
922 add_wild_read (bb_info_t bb_info)
923 {
924 insn_info_t insn_info = bb_info->last_insn;
925 read_info_t *ptr = &insn_info->read_rec;
926
927 while (*ptr)
928 {
929 read_info_t next = (*ptr)->next;
930 if ((*ptr)->alias_set == 0)
931 {
932 pool_free (read_info_pool, *ptr);
933 *ptr = next;
934 }
935 else
936 ptr = &(*ptr)->next;
937 }
938 insn_info->wild_read = true;
939 active_local_stores = NULL;
940 }
941
942
943 /* Return true if X is a constant or one of the registers that behave
944 as a constant over the life of a function. This is equivalent to
945 !rtx_varies_p for memory addresses. */
946
947 static bool
948 const_or_frame_p (rtx x)
949 {
950 switch (GET_CODE (x))
951 {
952 case MEM:
953 return MEM_READONLY_P (x);
954
955 case CONST:
956 case CONST_INT:
957 case CONST_DOUBLE:
958 case CONST_VECTOR:
959 case SYMBOL_REF:
960 case LABEL_REF:
961 return true;
962
963 case REG:
964 /* Note that we have to test for the actual rtx used for the frame
965 and arg pointers and not just the register number in case we have
966 eliminated the frame and/or arg pointer and are using it
967 for pseudos. */
968 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
969 /* The arg pointer varies if it is not a fixed register. */
970 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
971 || x == pic_offset_table_rtx)
972 return true;
973 return false;
974
975 default:
976 return false;
977 }
978 }
979
980 /* Take all reasonable action to put the address of MEM into the form
981 that we can do analysis on.
982
983 The gold standard is to get the address into the form: address +
984 OFFSET where address is something that rtx_varies_p considers a
985 constant. When we can get the address in this form, we can do
986 global analysis on it. Note that for constant bases, address is
987 not actually returned, only the group_id. The address can be
988 obtained from that.
989
990 If that fails, we try cselib to get a value we can at least use
991 locally. If that fails we return false.
992
993 The GROUP_ID is set to -1 for cselib bases and the index of the
994 group for non_varying bases.
995
996 FOR_READ is true if this is a mem read and false if not. */
997
998 static bool
999 canon_address (rtx mem,
1000 alias_set_type *alias_set_out,
1001 int *group_id,
1002 HOST_WIDE_INT *offset,
1003 cselib_val **base)
1004 {
1005 rtx mem_address = XEXP (mem, 0);
1006 rtx expanded_address, address;
1007 /* Make sure that cselib is has initialized all of the operands of
1008 the address before asking it to do the subst. */
1009
1010 if (clear_alias_sets)
1011 {
1012 /* If this is a spill, do not do any further processing. */
1013 alias_set_type alias_set = MEM_ALIAS_SET (mem);
1014 if (dump_file)
1015 fprintf (dump_file, "found alias set %d\n", (int) alias_set);
1016 if (bitmap_bit_p (clear_alias_sets, alias_set))
1017 {
1018 struct clear_alias_mode_holder *entry
1019 = clear_alias_set_lookup (alias_set);
1020
1021 /* If the modes do not match, we cannot process this set. */
1022 if (entry->mode != GET_MODE (mem))
1023 {
1024 if (dump_file)
1025 fprintf (dump_file,
1026 "disqualifying alias set %d, (%s) != (%s)\n",
1027 (int) alias_set, GET_MODE_NAME (entry->mode),
1028 GET_MODE_NAME (GET_MODE (mem)));
1029
1030 bitmap_set_bit (disqualified_clear_alias_sets, alias_set);
1031 return false;
1032 }
1033
1034 *alias_set_out = alias_set;
1035 *group_id = clear_alias_group->id;
1036 return true;
1037 }
1038 }
1039
1040 *alias_set_out = 0;
1041
1042 cselib_lookup (mem_address, Pmode, 1);
1043
1044 if (dump_file)
1045 {
1046 fprintf (dump_file, " mem: ");
1047 print_inline_rtx (dump_file, mem_address, 0);
1048 fprintf (dump_file, "\n");
1049 }
1050
1051 /* Use cselib to replace all of the reg references with the full
1052 expression. This will take care of the case where we have
1053
1054 r_x = base + offset;
1055 val = *r_x;
1056
1057 by making it into
1058
1059 val = *(base + offset);
1060 */
1061
1062 expanded_address = cselib_expand_value_rtx (mem_address, scratch, 5);
1063
1064 /* If this fails, just go with the mem_address. */
1065 if (!expanded_address)
1066 expanded_address = mem_address;
1067
1068 /* Split the address into canonical BASE + OFFSET terms. */
1069 address = canon_rtx (expanded_address);
1070
1071 *offset = 0;
1072
1073 if (dump_file)
1074 {
1075 fprintf (dump_file, "\n after cselib_expand address: ");
1076 print_inline_rtx (dump_file, expanded_address, 0);
1077 fprintf (dump_file, "\n");
1078
1079 fprintf (dump_file, "\n after canon_rtx address: ");
1080 print_inline_rtx (dump_file, address, 0);
1081 fprintf (dump_file, "\n");
1082 }
1083
1084 if (GET_CODE (address) == CONST)
1085 address = XEXP (address, 0);
1086
1087 if (GET_CODE (address) == PLUS && GET_CODE (XEXP (address, 1)) == CONST_INT)
1088 {
1089 *offset = INTVAL (XEXP (address, 1));
1090 address = XEXP (address, 0);
1091 }
1092
1093 if (const_or_frame_p (address))
1094 {
1095 group_info_t group = get_group_info (address);
1096
1097 if (dump_file)
1098 fprintf (dump_file, " gid=%d offset=%d \n", group->id, (int)*offset);
1099 *base = NULL;
1100 *group_id = group->id;
1101 }
1102 else
1103 {
1104 *base = cselib_lookup (address, Pmode, true);
1105 *group_id = -1;
1106
1107 if (*base == NULL)
1108 {
1109 if (dump_file)
1110 fprintf (dump_file, " no cselib val - should be a wild read.\n");
1111 return false;
1112 }
1113 if (dump_file)
1114 fprintf (dump_file, " varying cselib base=%d offset = %d\n",
1115 (*base)->value, (int)*offset);
1116 }
1117 return true;
1118 }
1119
1120
1121 /* Clear the rhs field from the active_local_stores array. */
1122
1123 static void
1124 clear_rhs_from_active_local_stores (void)
1125 {
1126 insn_info_t ptr = active_local_stores;
1127
1128 while (ptr)
1129 {
1130 store_info_t store_info = ptr->store_rec;
1131 /* Skip the clobbers. */
1132 while (!store_info->is_set)
1133 store_info = store_info->next;
1134
1135 store_info->rhs = NULL;
1136
1137 ptr = ptr->next_local_store;
1138 }
1139 }
1140
1141
1142 /* BODY is an instruction pattern that belongs to INSN. Return 1 if
1143 there is a candidate store, after adding it to the appropriate
1144 local store group if so. */
1145
1146 static int
1147 record_store (rtx body, bb_info_t bb_info)
1148 {
1149 rtx mem;
1150 HOST_WIDE_INT offset = 0;
1151 HOST_WIDE_INT width = 0;
1152 alias_set_type spill_alias_set;
1153 insn_info_t insn_info = bb_info->last_insn;
1154 store_info_t store_info = NULL;
1155 int group_id;
1156 cselib_val *base = NULL;
1157 insn_info_t ptr, last;
1158 bool store_is_unused;
1159
1160 if (GET_CODE (body) != SET && GET_CODE (body) != CLOBBER)
1161 return 0;
1162
1163 /* If this is not used, then this cannot be used to keep the insn
1164 from being deleted. On the other hand, it does provide something
1165 that can be used to prove that another store is dead. */
1166 store_is_unused
1167 = (find_reg_note (insn_info->insn, REG_UNUSED, body) != NULL);
1168
1169 /* Check whether that value is a suitable memory location. */
1170 mem = SET_DEST (body);
1171 if (!MEM_P (mem))
1172 {
1173 /* If the set or clobber is unused, then it does not effect our
1174 ability to get rid of the entire insn. */
1175 if (!store_is_unused)
1176 insn_info->cannot_delete = true;
1177 return 0;
1178 }
1179
1180 /* At this point we know mem is a mem. */
1181 if (GET_MODE (mem) == BLKmode)
1182 {
1183 if (GET_CODE (XEXP (mem, 0)) == SCRATCH)
1184 {
1185 if (dump_file)
1186 fprintf (dump_file, " adding wild read for (clobber (mem:BLK (scratch))\n");
1187 add_wild_read (bb_info);
1188 insn_info->cannot_delete = true;
1189 }
1190 else if (!store_is_unused)
1191 {
1192 /* If the set or clobber is unused, then it does not effect our
1193 ability to get rid of the entire insn. */
1194 insn_info->cannot_delete = true;
1195 clear_rhs_from_active_local_stores ();
1196 }
1197 return 0;
1198 }
1199
1200 /* We can still process a volatile mem, we just cannot delete it. */
1201 if (MEM_VOLATILE_P (mem))
1202 insn_info->cannot_delete = true;
1203
1204 if (!canon_address (mem, &spill_alias_set, &group_id, &offset, &base))
1205 {
1206 clear_rhs_from_active_local_stores ();
1207 return 0;
1208 }
1209
1210 width = GET_MODE_SIZE (GET_MODE (mem));
1211
1212 if (spill_alias_set)
1213 {
1214 bitmap store1 = clear_alias_group->store1_p;
1215 bitmap store2 = clear_alias_group->store2_p;
1216
1217 if (bitmap_bit_p (store1, spill_alias_set))
1218 bitmap_set_bit (store2, spill_alias_set);
1219 else
1220 bitmap_set_bit (store1, spill_alias_set);
1221
1222 if (clear_alias_group->offset_map_size_p < spill_alias_set)
1223 clear_alias_group->offset_map_size_p = spill_alias_set;
1224
1225 store_info = pool_alloc (rtx_store_info_pool);
1226
1227 if (dump_file)
1228 fprintf (dump_file, " processing spill store %d(%s)\n",
1229 (int) spill_alias_set, GET_MODE_NAME (GET_MODE (mem)));
1230 }
1231 else if (group_id >= 0)
1232 {
1233 /* In the restrictive case where the base is a constant or the
1234 frame pointer we can do global analysis. */
1235
1236 group_info_t group
1237 = VEC_index (group_info_t, rtx_group_vec, group_id);
1238
1239 store_info = pool_alloc (rtx_store_info_pool);
1240 set_usage_bits (group, offset, width);
1241
1242 if (dump_file)
1243 fprintf (dump_file, " processing const base store gid=%d[%d..%d)\n",
1244 group_id, (int)offset, (int)(offset+width));
1245 }
1246 else
1247 {
1248 rtx base_term = find_base_term (XEXP (mem, 0));
1249 if (!base_term
1250 || (GET_CODE (base_term) == ADDRESS
1251 && GET_MODE (base_term) == Pmode
1252 && XEXP (base_term, 0) == stack_pointer_rtx))
1253 insn_info->stack_pointer_based = true;
1254 insn_info->contains_cselib_groups = true;
1255
1256 store_info = pool_alloc (cse_store_info_pool);
1257 group_id = -1;
1258
1259 if (dump_file)
1260 fprintf (dump_file, " processing cselib store [%d..%d)\n",
1261 (int)offset, (int)(offset+width));
1262 }
1263
1264 /* Check to see if this stores causes some other stores to be
1265 dead. */
1266 ptr = active_local_stores;
1267 last = NULL;
1268
1269 while (ptr)
1270 {
1271 insn_info_t next = ptr->next_local_store;
1272 store_info_t s_info = ptr->store_rec;
1273 bool delete = true;
1274
1275 /* Skip the clobbers. We delete the active insn if this insn
1276 shadows the set. To have been put on the active list, it
1277 has exactly on set. */
1278 while (!s_info->is_set)
1279 s_info = s_info->next;
1280
1281 if (s_info->alias_set != spill_alias_set)
1282 delete = false;
1283 else if (s_info->alias_set)
1284 {
1285 struct clear_alias_mode_holder *entry
1286 = clear_alias_set_lookup (s_info->alias_set);
1287 /* Generally, spills cannot be processed if and of the
1288 references to the slot have a different mode. But if
1289 we are in the same block and mode is exactly the same
1290 between this store and one before in the same block,
1291 we can still delete it. */
1292 if ((GET_MODE (mem) == GET_MODE (s_info->mem))
1293 && (GET_MODE (mem) == entry->mode))
1294 {
1295 delete = true;
1296 s_info->positions_needed = 0;
1297 }
1298 if (dump_file)
1299 fprintf (dump_file, " trying spill store in insn=%d alias_set=%d\n",
1300 INSN_UID (ptr->insn), (int) s_info->alias_set);
1301 }
1302 else if ((s_info->group_id == group_id)
1303 && (s_info->cse_base == base))
1304 {
1305 HOST_WIDE_INT i;
1306 if (dump_file)
1307 fprintf (dump_file, " trying store in insn=%d gid=%d[%d..%d)\n",
1308 INSN_UID (ptr->insn), s_info->group_id,
1309 (int)s_info->begin, (int)s_info->end);
1310 for (i = offset; i < offset+width; i++)
1311 if (i >= s_info->begin && i < s_info->end)
1312 s_info->positions_needed &= ~(1L << (i - s_info->begin));
1313 }
1314 else if (s_info->rhs)
1315 /* Need to see if it is possible for this store to overwrite
1316 the value of store_info. If it is, set the rhs to NULL to
1317 keep it from being used to remove a load. */
1318 {
1319 if (canon_true_dependence (s_info->mem,
1320 GET_MODE (s_info->mem),
1321 s_info->mem_addr,
1322 mem, rtx_varies_p))
1323 s_info->rhs = NULL;
1324 }
1325
1326 /* An insn can be deleted if every position of every one of
1327 its s_infos is zero. */
1328 if (s_info->positions_needed != 0)
1329 delete = false;
1330
1331 if (delete)
1332 {
1333 insn_info_t insn_to_delete = ptr;
1334
1335 if (last)
1336 last->next_local_store = ptr->next_local_store;
1337 else
1338 active_local_stores = ptr->next_local_store;
1339
1340 delete_dead_store_insn (insn_to_delete);
1341 }
1342 else
1343 last = ptr;
1344
1345 ptr = next;
1346 }
1347
1348 gcc_assert ((unsigned) width < sizeof (store_info->positions_needed) * CHAR_BIT);
1349
1350 /* Finish filling in the store_info. */
1351 store_info->next = insn_info->store_rec;
1352 insn_info->store_rec = store_info;
1353 store_info->mem = canon_rtx (mem);
1354 store_info->alias_set = spill_alias_set;
1355 store_info->mem_addr = get_addr (XEXP (mem, 0));
1356 store_info->cse_base = base;
1357 store_info->positions_needed = (1L << width) - 1;
1358 store_info->group_id = group_id;
1359 store_info->begin = offset;
1360 store_info->end = offset + width;
1361 store_info->is_set = GET_CODE (body) == SET;
1362
1363 if (store_info->is_set
1364 /* No place to keep the value after ra. */
1365 && !reload_completed
1366 /* The careful reviewer may wish to comment my checking that the
1367 rhs of a store is always a reg. */
1368 && REG_P (SET_SRC (body))
1369 /* Sometimes the store and reload is used for truncation and
1370 rounding. */
1371 && !(FLOAT_MODE_P (GET_MODE (mem)) && (flag_float_store)))
1372 store_info->rhs = SET_SRC (body);
1373 else
1374 store_info->rhs = NULL;
1375
1376 /* If this is a clobber, we return 0. We will only be able to
1377 delete this insn if there is only one store USED store, but we
1378 can use the clobber to delete other stores earlier. */
1379 return store_info->is_set ? 1 : 0;
1380 }
1381
1382
1383 static void
1384 dump_insn_info (const char * start, insn_info_t insn_info)
1385 {
1386 fprintf (dump_file, "%s insn=%d %s\n", start,
1387 INSN_UID (insn_info->insn),
1388 insn_info->store_rec ? "has store" : "naked");
1389 }
1390
1391
1392 /* If the modes are different and the value's source and target do not
1393 line up, we need to extract the value from lower part of the rhs of
1394 the store, shift it, and then put it into a form that can be shoved
1395 into the read_insn. This function generates a right SHIFT of a
1396 value that is at least ACCESS_SIZE bytes wide of READ_MODE. The
1397 shift sequence is returned or NULL if we failed to find a
1398 shift. */
1399
1400 static rtx
1401 find_shift_sequence (rtx read_reg,
1402 int access_size,
1403 store_info_t store_info,
1404 read_info_t read_info,
1405 int shift)
1406 {
1407 enum machine_mode store_mode = GET_MODE (store_info->mem);
1408 enum machine_mode read_mode = GET_MODE (read_info->mem);
1409 rtx chosen_seq = NULL;
1410
1411 /* Some machines like the x86 have shift insns for each size of
1412 operand. Other machines like the ppc or the ia-64 may only have
1413 shift insns that shift values within 32 or 64 bit registers.
1414 This loop tries to find the smallest shift insn that will right
1415 justify the value we want to read but is available in one insn on
1416 the machine. */
1417
1418 for (; access_size < UNITS_PER_WORD; access_size *= 2)
1419 {
1420 rtx target, new_reg, shift_seq, insn;
1421 enum machine_mode new_mode;
1422 int cost;
1423
1424 /* Try a wider mode if truncating the store mode to ACCESS_SIZE
1425 bytes requires a real instruction. */
1426 if (access_size < GET_MODE_SIZE (store_mode)
1427 && !TRULY_NOOP_TRUNCATION (access_size * BITS_PER_UNIT,
1428 GET_MODE_BITSIZE (store_mode)))
1429 continue;
1430
1431 new_mode = smallest_mode_for_size (access_size * BITS_PER_UNIT,
1432 GET_MODE_CLASS (read_mode));
1433 new_reg = gen_reg_rtx (new_mode);
1434
1435 start_sequence ();
1436
1437 /* In theory we could also check for an ashr. Ian Taylor knows
1438 of one dsp where the cost of these two was not the same. But
1439 this really is a rare case anyway. */
1440 target = expand_binop (new_mode, lshr_optab, new_reg,
1441 GEN_INT (shift), new_reg, 1, OPTAB_DIRECT);
1442
1443 shift_seq = get_insns ();
1444 end_sequence ();
1445
1446 if (target != new_reg || shift_seq == NULL)
1447 continue;
1448
1449 cost = 0;
1450 for (insn = shift_seq; insn != NULL_RTX; insn = NEXT_INSN (insn))
1451 if (INSN_P (insn))
1452 cost += insn_rtx_cost (PATTERN (insn));
1453
1454 /* The computation up to here is essentially independent
1455 of the arguments and could be precomputed. It may
1456 not be worth doing so. We could precompute if
1457 worthwhile or at least cache the results. The result
1458 technically depends on SHIFT, ACCESS_SIZE, and
1459 GET_MODE_CLASS (READ_MODE). But in practice the
1460 answer will depend only on ACCESS_SIZE. */
1461
1462 if (cost > COSTS_N_INSNS (1))
1463 continue;
1464
1465 /* We found an acceptable shift. Generate a move to
1466 take the value from the store and put it into the
1467 shift pseudo, then shift it, then generate another
1468 move to put in into the target of the read. */
1469 start_sequence ();
1470 emit_move_insn (new_reg, gen_lowpart (new_mode, store_info->rhs));
1471 emit_insn (shift_seq);
1472 convert_move (read_reg, new_reg, 1);
1473
1474 if (dump_file)
1475 {
1476 fprintf (dump_file, " -- adding extract insn r%d:%s = r%d:%s\n",
1477 REGNO (new_reg), GET_MODE_NAME (new_mode),
1478 REGNO (store_info->rhs), GET_MODE_NAME (store_mode));
1479
1480 fprintf (dump_file, " -- with shift of r%d by %d\n",
1481 REGNO(new_reg), shift);
1482 fprintf (dump_file, " -- and second extract insn r%d:%s = r%d:%s\n",
1483 REGNO (read_reg), GET_MODE_NAME (read_mode),
1484 REGNO (new_reg), GET_MODE_NAME (new_mode));
1485 }
1486
1487 /* Get the three insn sequence and return it. */
1488 chosen_seq = get_insns ();
1489 end_sequence ();
1490 break;
1491 }
1492
1493 return chosen_seq;
1494 }
1495
1496
1497 /* Take a sequence of:
1498 A <- r1
1499 ...
1500 ... <- A
1501
1502 and change it into
1503 r2 <- r1
1504 A <- r1
1505 ...
1506 ... <- r2
1507
1508 or
1509
1510 r3 <- extract (r1)
1511 r3 <- r3 >> shift
1512 r2 <- extract (r3)
1513 ... <- r2
1514
1515 or
1516
1517 r2 <- extract (r1)
1518 ... <- r2
1519
1520 Depending on the alignment and the mode of the store and
1521 subsequent load.
1522
1523
1524 The STORE_INFO and STORE_INSN are for the store and READ_INFO
1525 and READ_INSN are for the read. Return true if the replacement
1526 went ok. */
1527
1528 static bool
1529 replace_read (store_info_t store_info, insn_info_t store_insn,
1530 read_info_t read_info, insn_info_t read_insn, rtx *loc)
1531 {
1532 enum machine_mode store_mode = GET_MODE (store_info->mem);
1533 enum machine_mode read_mode = GET_MODE (read_info->mem);
1534 int shift;
1535 int access_size; /* In bytes. */
1536 rtx read_reg = gen_reg_rtx (read_mode);
1537 rtx shift_seq = NULL;
1538
1539 if (!dbg_cnt (dse))
1540 return false;
1541
1542 if (GET_MODE_CLASS (read_mode) != GET_MODE_CLASS (store_mode))
1543 return false;
1544
1545 /* To get here the read is within the boundaries of the write so
1546 shift will never be negative. Start out with the shift being in
1547 bytes. */
1548 if (BYTES_BIG_ENDIAN)
1549 shift = store_info->end - read_info->end;
1550 else
1551 shift = read_info->begin - store_info->begin;
1552
1553 access_size = shift + GET_MODE_SIZE (read_mode);
1554
1555 /* From now on it is bits. */
1556 shift *= BITS_PER_UNIT;
1557
1558 /* We need to keep this in perspective. We are replacing a read
1559 with a sequence of insns, but the read will almost certainly be
1560 in cache, so it is not going to be an expensive one. Thus, we
1561 are not willing to do a multi insn shift or worse a subroutine
1562 call to get rid of the read. */
1563 if (shift)
1564 {
1565 if (access_size > UNITS_PER_WORD || FLOAT_MODE_P (store_mode))
1566 return false;
1567
1568 shift_seq = find_shift_sequence (read_reg, access_size, store_info,
1569 read_info, shift);
1570 if (!shift_seq)
1571 return false;
1572 }
1573
1574 if (dump_file)
1575 fprintf (dump_file, "replacing load at %d from store at %d\n",
1576 INSN_UID (read_insn->insn), INSN_UID (store_insn->insn));
1577
1578 if (validate_change (read_insn->insn, loc, read_reg, 0))
1579 {
1580 rtx insns;
1581 deferred_change_t deferred_change = pool_alloc (deferred_change_pool);
1582
1583 if (read_mode == store_mode)
1584 {
1585 start_sequence ();
1586
1587 /* The modes are the same and everything lines up. Just
1588 generate a simple move. */
1589 emit_move_insn (read_reg, store_info->rhs);
1590 if (dump_file)
1591 fprintf (dump_file, " -- adding move insn r%d = r%d\n",
1592 REGNO (read_reg), REGNO (store_info->rhs));
1593 insns = get_insns ();
1594 end_sequence ();
1595 }
1596 else if (shift)
1597 insns = shift_seq;
1598 else
1599 {
1600 /* The modes are different but the lsb are in the same
1601 place, we need to extract the value in the right from the
1602 rhs of the store. */
1603 start_sequence ();
1604 convert_move (read_reg, store_info->rhs, 1);
1605
1606 if (dump_file)
1607 fprintf (dump_file, " -- adding extract insn r%d:%s = r%d:%s\n",
1608 REGNO (read_reg), GET_MODE_NAME (read_mode),
1609 REGNO (store_info->rhs), GET_MODE_NAME (store_mode));
1610 insns = get_insns ();
1611 end_sequence ();
1612 }
1613
1614 /* Insert this right before the store insn where it will be safe
1615 from later insns that might change it before the read. */
1616 emit_insn_before (insns, store_insn->insn);
1617
1618 /* And now for the kludge part: cselib croaks if you just
1619 return at this point. There are two reasons for this:
1620
1621 1) Cselib has an idea of how many pseudos there are and
1622 that does not include the new ones we just added.
1623
1624 2) Cselib does not know about the move insn we added
1625 above the store_info, and there is no way to tell it
1626 about it, because it has "moved on".
1627
1628 Problem (1) is fixable with a certain amount of engineering.
1629 Problem (2) is requires starting the bb from scratch. This
1630 could be expensive.
1631
1632 So we are just going to have to lie. The move/extraction
1633 insns are not really an issue, cselib did not see them. But
1634 the use of the new pseudo read_insn is a real problem because
1635 cselib has not scanned this insn. The way that we solve this
1636 problem is that we are just going to put the mem back for now
1637 and when we are finished with the block, we undo this. We
1638 keep a table of mems to get rid of. At the end of the basic
1639 block we can put them back. */
1640
1641 *loc = read_info->mem;
1642 deferred_change->next = deferred_change_list;
1643 deferred_change_list = deferred_change;
1644 deferred_change->loc = loc;
1645 deferred_change->reg = read_reg;
1646
1647 /* Get rid of the read_info, from the point of view of the
1648 rest of dse, play like this read never happened. */
1649 read_insn->read_rec = read_info->next;
1650 pool_free (read_info_pool, read_info);
1651 return true;
1652 }
1653 else
1654 {
1655 if (dump_file)
1656 fprintf (dump_file, " -- validation failure\n");
1657 return false;
1658 }
1659 }
1660
1661 /* A for_each_rtx callback in which DATA is the bb_info. Check to see
1662 if LOC is a mem and if it is look at the address and kill any
1663 appropriate stores that may be active. */
1664
1665 static int
1666 check_mem_read_rtx (rtx *loc, void *data)
1667 {
1668 rtx mem = *loc;
1669 bb_info_t bb_info;
1670 insn_info_t insn_info;
1671 HOST_WIDE_INT offset = 0;
1672 HOST_WIDE_INT width = 0;
1673 alias_set_type spill_alias_set = 0;
1674 cselib_val *base = NULL;
1675 int group_id;
1676 read_info_t read_info;
1677
1678 if (!mem || !MEM_P (mem))
1679 return 0;
1680
1681 bb_info = (bb_info_t) data;
1682 insn_info = bb_info->last_insn;
1683
1684 if ((MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
1685 || (MEM_VOLATILE_P (mem)))
1686 {
1687 if (dump_file)
1688 fprintf (dump_file, " adding wild read, volatile or barrier.\n");
1689 add_wild_read (bb_info);
1690 insn_info->cannot_delete = true;
1691 return 0;
1692 }
1693
1694 /* If it is reading readonly mem, then there can be no conflict with
1695 another write. */
1696 if (MEM_READONLY_P (mem))
1697 return 0;
1698
1699 if (!canon_address (mem, &spill_alias_set, &group_id, &offset, &base))
1700 {
1701 if (dump_file)
1702 fprintf (dump_file, " adding wild read, canon_address failure.\n");
1703 add_wild_read (bb_info);
1704 return 0;
1705 }
1706
1707 if (GET_MODE (mem) == BLKmode)
1708 width = -1;
1709 else
1710 width = GET_MODE_SIZE (GET_MODE (mem));
1711
1712 read_info = pool_alloc (read_info_pool);
1713 read_info->group_id = group_id;
1714 read_info->mem = mem;
1715 read_info->alias_set = spill_alias_set;
1716 read_info->begin = offset;
1717 read_info->end = offset + width;
1718 read_info->next = insn_info->read_rec;
1719 insn_info->read_rec = read_info;
1720
1721 /* We ignore the clobbers in store_info. The is mildly aggressive,
1722 but there really should not be a clobber followed by a read. */
1723
1724 if (spill_alias_set)
1725 {
1726 insn_info_t i_ptr = active_local_stores;
1727 insn_info_t last = NULL;
1728
1729 if (dump_file)
1730 fprintf (dump_file, " processing spill load %d\n",
1731 (int) spill_alias_set);
1732
1733 while (i_ptr)
1734 {
1735 store_info_t store_info = i_ptr->store_rec;
1736
1737 /* Skip the clobbers. */
1738 while (!store_info->is_set)
1739 store_info = store_info->next;
1740
1741 if (store_info->alias_set == spill_alias_set)
1742 {
1743 if (dump_file)
1744 dump_insn_info ("removing from active", i_ptr);
1745
1746 if (last)
1747 last->next_local_store = i_ptr->next_local_store;
1748 else
1749 active_local_stores = i_ptr->next_local_store;
1750 }
1751 else
1752 last = i_ptr;
1753 i_ptr = i_ptr->next_local_store;
1754 }
1755 }
1756 else if (group_id >= 0)
1757 {
1758 /* This is the restricted case where the base is a constant or
1759 the frame pointer and offset is a constant. */
1760 insn_info_t i_ptr = active_local_stores;
1761 insn_info_t last = NULL;
1762
1763 if (dump_file)
1764 {
1765 if (width == -1)
1766 fprintf (dump_file, " processing const load gid=%d[BLK]\n",
1767 group_id);
1768 else
1769 fprintf (dump_file, " processing const load gid=%d[%d..%d)\n",
1770 group_id, (int)offset, (int)(offset+width));
1771 }
1772
1773 while (i_ptr)
1774 {
1775 bool remove = false;
1776 store_info_t store_info = i_ptr->store_rec;
1777
1778 /* Skip the clobbers. */
1779 while (!store_info->is_set)
1780 store_info = store_info->next;
1781
1782 /* There are three cases here. */
1783 if (store_info->group_id < 0)
1784 /* We have a cselib store followed by a read from a
1785 const base. */
1786 remove
1787 = canon_true_dependence (store_info->mem,
1788 GET_MODE (store_info->mem),
1789 store_info->mem_addr,
1790 mem, rtx_varies_p);
1791
1792 else if (group_id == store_info->group_id)
1793 {
1794 /* This is a block mode load. We may get lucky and
1795 canon_true_dependence may save the day. */
1796 if (width == -1)
1797 remove
1798 = canon_true_dependence (store_info->mem,
1799 GET_MODE (store_info->mem),
1800 store_info->mem_addr,
1801 mem, rtx_varies_p);
1802
1803 /* If this read is just reading back something that we just
1804 stored, rewrite the read. */
1805 else
1806 {
1807 if (store_info->rhs
1808 && (offset >= store_info->begin)
1809 && (offset + width <= store_info->end))
1810 {
1811 int mask = ((1L << width) - 1) << (offset - store_info->begin);
1812
1813 if ((store_info->positions_needed & mask) == mask
1814 && replace_read (store_info, i_ptr,
1815 read_info, insn_info, loc))
1816 return 0;
1817 }
1818 /* The bases are the same, just see if the offsets
1819 overlap. */
1820 if ((offset < store_info->end)
1821 && (offset + width > store_info->begin))
1822 remove = true;
1823 }
1824 }
1825
1826 /* else
1827 The else case that is missing here is that the
1828 bases are constant but different. There is nothing
1829 to do here because there is no overlap. */
1830
1831 if (remove)
1832 {
1833 if (dump_file)
1834 dump_insn_info ("removing from active", i_ptr);
1835
1836 if (last)
1837 last->next_local_store = i_ptr->next_local_store;
1838 else
1839 active_local_stores = i_ptr->next_local_store;
1840 }
1841 else
1842 last = i_ptr;
1843 i_ptr = i_ptr->next_local_store;
1844 }
1845 }
1846 else
1847 {
1848 insn_info_t i_ptr = active_local_stores;
1849 insn_info_t last = NULL;
1850 if (dump_file)
1851 {
1852 fprintf (dump_file, " processing cselib load mem:");
1853 print_inline_rtx (dump_file, mem, 0);
1854 fprintf (dump_file, "\n");
1855 }
1856
1857 while (i_ptr)
1858 {
1859 bool remove = false;
1860 store_info_t store_info = i_ptr->store_rec;
1861
1862 if (dump_file)
1863 fprintf (dump_file, " processing cselib load against insn %d\n",
1864 INSN_UID (i_ptr->insn));
1865
1866 /* Skip the clobbers. */
1867 while (!store_info->is_set)
1868 store_info = store_info->next;
1869
1870 /* If this read is just reading back something that we just
1871 stored, rewrite the read. */
1872 if (store_info->rhs
1873 && store_info->group_id == -1
1874 && store_info->cse_base == base
1875 && (offset >= store_info->begin)
1876 && (offset + width <= store_info->end))
1877 {
1878 int mask = ((1L << width) - 1) << (offset - store_info->begin);
1879
1880 if ((store_info->positions_needed & mask) == mask
1881 && replace_read (store_info, i_ptr,
1882 read_info, insn_info, loc))
1883 return 0;
1884 }
1885
1886 if (!store_info->alias_set)
1887 remove = canon_true_dependence (store_info->mem,
1888 GET_MODE (store_info->mem),
1889 store_info->mem_addr,
1890 mem, rtx_varies_p);
1891
1892 if (remove)
1893 {
1894 if (dump_file)
1895 dump_insn_info ("removing from active", i_ptr);
1896
1897 if (last)
1898 last->next_local_store = i_ptr->next_local_store;
1899 else
1900 active_local_stores = i_ptr->next_local_store;
1901 }
1902 else
1903 last = i_ptr;
1904 i_ptr = i_ptr->next_local_store;
1905 }
1906 }
1907 return 0;
1908 }
1909
1910 /* A for_each_rtx callback in which DATA points the INSN_INFO for
1911 as check_mem_read_rtx. Nullify the pointer if i_m_r_m_r returns
1912 true for any part of *LOC. */
1913
1914 static void
1915 check_mem_read_use (rtx *loc, void *data)
1916 {
1917 for_each_rtx (loc, check_mem_read_rtx, data);
1918 }
1919
1920 /* Apply record_store to all candidate stores in INSN. Mark INSN
1921 if some part of it is not a candidate store and assigns to a
1922 non-register target. */
1923
1924 static void
1925 scan_insn (bb_info_t bb_info, rtx insn)
1926 {
1927 rtx body;
1928 insn_info_t insn_info = pool_alloc (insn_info_pool);
1929 int mems_found = 0;
1930 memset (insn_info, 0, sizeof (struct insn_info));
1931
1932 if (dump_file)
1933 fprintf (dump_file, "\n**scanning insn=%d\n",
1934 INSN_UID (insn));
1935
1936 insn_info->prev_insn = bb_info->last_insn;
1937 insn_info->insn = insn;
1938 bb_info->last_insn = insn_info;
1939
1940
1941 /* Cselib clears the table for this case, so we have to essentially
1942 do the same. */
1943 if (NONJUMP_INSN_P (insn)
1944 && GET_CODE (PATTERN (insn)) == ASM_OPERANDS
1945 && MEM_VOLATILE_P (PATTERN (insn)))
1946 {
1947 add_wild_read (bb_info);
1948 insn_info->cannot_delete = true;
1949 return;
1950 }
1951
1952 /* Look at all of the uses in the insn. */
1953 note_uses (&PATTERN (insn), check_mem_read_use, bb_info);
1954
1955 if (CALL_P (insn))
1956 {
1957 insn_info->cannot_delete = true;
1958
1959 /* Const functions cannot do anything bad i.e. read memory,
1960 however, they can read their parameters which may have
1961 been pushed onto the stack. */
1962 if (CONST_OR_PURE_CALL_P (insn) && !pure_call_p (insn))
1963 {
1964 insn_info_t i_ptr = active_local_stores;
1965 insn_info_t last = NULL;
1966
1967 if (dump_file)
1968 fprintf (dump_file, "const call %d\n", INSN_UID (insn));
1969
1970 while (i_ptr)
1971 {
1972 /* Remove the stack pointer based stores. */
1973 if (i_ptr->stack_pointer_based)
1974 {
1975 if (dump_file)
1976 dump_insn_info ("removing from active", i_ptr);
1977
1978 if (last)
1979 last->next_local_store = i_ptr->next_local_store;
1980 else
1981 active_local_stores = i_ptr->next_local_store;
1982 }
1983 else
1984 last = i_ptr;
1985 i_ptr = i_ptr->next_local_store;
1986 }
1987 }
1988
1989 else
1990 /* Every other call, including pure functions, may read memory. */
1991 add_wild_read (bb_info);
1992
1993 return;
1994 }
1995
1996 /* Assuming that there are sets in these insns, we cannot delete
1997 them. */
1998 if ((GET_CODE (PATTERN (insn)) == CLOBBER)
1999 || volatile_refs_p (PATTERN (insn))
2000 || (flag_non_call_exceptions && may_trap_p (PATTERN (insn)))
2001 || (RTX_FRAME_RELATED_P (insn))
2002 || find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX))
2003 insn_info->cannot_delete = true;
2004
2005 body = PATTERN (insn);
2006 if (GET_CODE (body) == PARALLEL)
2007 {
2008 int i;
2009 for (i = 0; i < XVECLEN (body, 0); i++)
2010 mems_found += record_store (XVECEXP (body, 0, i), bb_info);
2011 }
2012 else
2013 mems_found += record_store (body, bb_info);
2014
2015 if (dump_file)
2016 fprintf (dump_file, "mems_found = %d, cannot_delete = %s\n",
2017 mems_found, insn_info->cannot_delete ? "true" : "false");
2018
2019 /* If we found some sets of mems, and the insn has not been marked
2020 cannot delete, add it into the active_local_stores so that it can
2021 be locally deleted if found dead. Otherwise mark it as cannot
2022 delete. This simplifies the processing later. */
2023 if (mems_found == 1 && !insn_info->cannot_delete)
2024 {
2025 insn_info->next_local_store = active_local_stores;
2026 active_local_stores = insn_info;
2027 }
2028 else
2029 insn_info->cannot_delete = true;
2030 }
2031
2032
2033 /* Remove BASE from the set of active_local_stores. This is a
2034 callback from cselib that is used to get rid of the stores in
2035 active_local_stores. */
2036
2037 static void
2038 remove_useless_values (cselib_val *base)
2039 {
2040 insn_info_t insn_info = active_local_stores;
2041 insn_info_t last = NULL;
2042
2043 while (insn_info)
2044 {
2045 store_info_t store_info = insn_info->store_rec;
2046 bool delete = false;
2047
2048 /* If ANY of the store_infos match the cselib group that is
2049 being deleted, then the insn can not be deleted. */
2050 while (store_info)
2051 {
2052 if ((store_info->group_id == -1)
2053 && (store_info->cse_base == base))
2054 {
2055 delete = true;
2056 break;
2057 }
2058 store_info = store_info->next;
2059 }
2060
2061 if (delete)
2062 {
2063 if (last)
2064 last->next_local_store = insn_info->next_local_store;
2065 else
2066 active_local_stores = insn_info->next_local_store;
2067 free_store_info (insn_info);
2068 }
2069 else
2070 last = insn_info;
2071
2072 insn_info = insn_info->next_local_store;
2073 }
2074 }
2075
2076
2077 /* Do all of step 1. */
2078
2079 static void
2080 dse_step1 (void)
2081 {
2082 basic_block bb;
2083
2084 cselib_init (false);
2085 all_blocks = BITMAP_ALLOC (NULL);
2086 bitmap_set_bit (all_blocks, ENTRY_BLOCK);
2087 bitmap_set_bit (all_blocks, EXIT_BLOCK);
2088
2089 FOR_ALL_BB (bb)
2090 {
2091 insn_info_t ptr;
2092 bb_info_t bb_info = pool_alloc (bb_info_pool);
2093
2094 memset (bb_info, 0, sizeof (struct bb_info));
2095 bitmap_set_bit (all_blocks, bb->index);
2096
2097 bb_table[bb->index] = bb_info;
2098 cselib_discard_hook = remove_useless_values;
2099
2100 if (bb->index >= NUM_FIXED_BLOCKS)
2101 {
2102 rtx insn;
2103
2104 cse_store_info_pool
2105 = create_alloc_pool ("cse_store_info_pool",
2106 sizeof (struct store_info), 100);
2107 active_local_stores = NULL;
2108 cselib_clear_table ();
2109
2110 /* Scan the insns. */
2111 FOR_BB_INSNS (bb, insn)
2112 {
2113 if (INSN_P (insn))
2114 scan_insn (bb_info, insn);
2115 cselib_process_insn (insn);
2116 }
2117
2118 /* This is something of a hack, because the global algorithm
2119 is supposed to take care of the case where stores go dead
2120 at the end of the function. However, the global
2121 algorithm must take a more conservative view of block
2122 mode reads than the local alg does. So to get the case
2123 where you have a store to the frame followed by a non
2124 overlapping block more read, we look at the active local
2125 stores at the end of the function and delete all of the
2126 frame and spill based ones. */
2127 if (stores_off_frame_dead_at_return
2128 && (EDGE_COUNT (bb->succs) == 0
2129 || (single_succ_p (bb)
2130 && single_succ (bb) == EXIT_BLOCK_PTR
2131 && ! current_function_calls_eh_return)))
2132 {
2133 insn_info_t i_ptr = active_local_stores;
2134 while (i_ptr)
2135 {
2136 store_info_t store_info = i_ptr->store_rec;
2137
2138 /* Skip the clobbers. */
2139 while (!store_info->is_set)
2140 store_info = store_info->next;
2141 if (store_info->alias_set)
2142 delete_dead_store_insn (i_ptr);
2143 else
2144 if (store_info->group_id >= 0)
2145 {
2146 group_info_t group
2147 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2148 if (group->frame_related)
2149 delete_dead_store_insn (i_ptr);
2150 }
2151
2152 i_ptr = i_ptr->next_local_store;
2153 }
2154 }
2155
2156 /* Get rid of the loads that were discovered in
2157 replace_read. Cselib is finished with this block. */
2158 while (deferred_change_list)
2159 {
2160 deferred_change_t next = deferred_change_list->next;
2161
2162 /* There is no reason to validate this change. That was
2163 done earlier. */
2164 *deferred_change_list->loc = deferred_change_list->reg;
2165 pool_free (deferred_change_pool, deferred_change_list);
2166 deferred_change_list = next;
2167 }
2168
2169 /* Get rid of all of the cselib based store_infos in this
2170 block and mark the containing insns as not being
2171 deletable. */
2172 ptr = bb_info->last_insn;
2173 while (ptr)
2174 {
2175 if (ptr->contains_cselib_groups)
2176 free_store_info (ptr);
2177 ptr = ptr->prev_insn;
2178 }
2179
2180 free_alloc_pool (cse_store_info_pool);
2181 }
2182 }
2183
2184 cselib_finish ();
2185 htab_empty (rtx_group_table);
2186 }
2187
2188 \f
2189 /*----------------------------------------------------------------------------
2190 Second step.
2191
2192 Assign each byte position in the stores that we are going to
2193 analyze globally to a position in the bitmaps. Returns true if
2194 there are any bit positions assigned.
2195 ----------------------------------------------------------------------------*/
2196
2197 static void
2198 dse_step2_init (void)
2199 {
2200 unsigned int i;
2201 group_info_t group;
2202
2203 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2204 {
2205 /* For all non stack related bases, we only consider a store to
2206 be deletable if there are two or more stores for that
2207 position. This is because it takes one store to make the
2208 other store redundant. However, for the stores that are
2209 stack related, we consider them if there is only one store
2210 for the position. We do this because the stack related
2211 stores can be deleted if their is no read between them and
2212 the end of the function.
2213
2214 To make this work in the current framework, we take the stack
2215 related bases add all of the bits from store1 into store2.
2216 This has the effect of making the eligible even if there is
2217 only one store. */
2218
2219 if (stores_off_frame_dead_at_return && group->frame_related)
2220 {
2221 bitmap_ior_into (group->store2_n, group->store1_n);
2222 bitmap_ior_into (group->store2_p, group->store1_p);
2223 if (dump_file)
2224 fprintf (dump_file, "group %d is frame related ", i);
2225 }
2226
2227 group->offset_map_size_n++;
2228 group->offset_map_n = XNEWVEC (int, group->offset_map_size_n);
2229 group->offset_map_size_p++;
2230 group->offset_map_p = XNEWVEC (int, group->offset_map_size_p);
2231 group->process_globally = false;
2232 if (dump_file)
2233 {
2234 fprintf (dump_file, "group %d(%d+%d): ", i,
2235 (int)bitmap_count_bits (group->store2_n),
2236 (int)bitmap_count_bits (group->store2_p));
2237 bitmap_print (dump_file, group->store2_n, "n ", " ");
2238 bitmap_print (dump_file, group->store2_p, "p ", "\n");
2239 }
2240 }
2241 }
2242
2243
2244 /* Init the offset tables for the normal case. */
2245
2246 static bool
2247 dse_step2_nospill (void)
2248 {
2249 unsigned int i;
2250 group_info_t group;
2251 /* Position 0 is unused because 0 is used in the maps to mean
2252 unused. */
2253 current_position = 1;
2254
2255 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2256 {
2257 bitmap_iterator bi;
2258 unsigned int j;
2259
2260 if (group == clear_alias_group)
2261 continue;
2262
2263 memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
2264 memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
2265 bitmap_clear (group->group_kill);
2266
2267 EXECUTE_IF_SET_IN_BITMAP (group->store2_n, 0, j, bi)
2268 {
2269 bitmap_set_bit (group->group_kill, current_position);
2270 group->offset_map_n[j] = current_position++;
2271 group->process_globally = true;
2272 }
2273 EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
2274 {
2275 bitmap_set_bit (group->group_kill, current_position);
2276 group->offset_map_p[j] = current_position++;
2277 group->process_globally = true;
2278 }
2279 }
2280 return current_position != 1;
2281 }
2282
2283
2284 /* Init the offset tables for the spill case. */
2285
2286 static bool
2287 dse_step2_spill (void)
2288 {
2289 unsigned int j;
2290 group_info_t group = clear_alias_group;
2291 bitmap_iterator bi;
2292
2293 /* Position 0 is unused because 0 is used in the maps to mean
2294 unused. */
2295 current_position = 1;
2296
2297 if (dump_file)
2298 {
2299 bitmap_print (dump_file, clear_alias_sets,
2300 "clear alias sets ", "\n");
2301 bitmap_print (dump_file, disqualified_clear_alias_sets,
2302 "disqualified clear alias sets ", "\n");
2303 }
2304
2305 memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
2306 memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
2307 bitmap_clear (group->group_kill);
2308
2309 /* Remove the disqualified positions from the store2_p set. */
2310 bitmap_and_compl_into (group->store2_p, disqualified_clear_alias_sets);
2311
2312 /* We do not need to process the store2_n set because
2313 alias_sets are always positive. */
2314 EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
2315 {
2316 bitmap_set_bit (group->group_kill, current_position);
2317 group->offset_map_p[j] = current_position++;
2318 group->process_globally = true;
2319 }
2320
2321 return current_position != 1;
2322 }
2323
2324
2325 \f
2326 /*----------------------------------------------------------------------------
2327 Third step.
2328
2329 Build the bit vectors for the transfer functions.
2330 ----------------------------------------------------------------------------*/
2331
2332
2333 /* Note that this is NOT a general purpose function. Any mem that has
2334 an alias set registered here expected to be COMPLETELY unaliased:
2335 i.e it's addresses are not and need not be examined.
2336
2337 It is known that all references to this address will have this
2338 alias set and there are NO other references to this address in the
2339 function.
2340
2341 Currently the only place that is known to be clean enough to use
2342 this interface is the code that assigns the spill locations.
2343
2344 All of the mems that have alias_sets registered are subjected to a
2345 very powerful form of dse where function calls, volatile reads and
2346 writes, and reads from random location are not taken into account.
2347
2348 It is also assumed that these locations go dead when the function
2349 returns. This assumption could be relaxed if there were found to
2350 be places that this assumption was not correct.
2351
2352 The MODE is passed in and saved. The mode of each load or store to
2353 a mem with ALIAS_SET is checked against MEM. If the size of that
2354 load or store is different from MODE, processing is halted on this
2355 alias set. For the vast majority of aliases sets, all of the loads
2356 and stores will use the same mode. But vectors are treated
2357 differently: the alias set is established for the entire vector,
2358 but reload will insert loads and stores for individual elements and
2359 we do not necessarily have the information to track those separate
2360 elements. So when we see a mode mismatch, we just bail. */
2361
2362
2363 void
2364 dse_record_singleton_alias_set (alias_set_type alias_set,
2365 enum machine_mode mode)
2366 {
2367 struct clear_alias_mode_holder tmp_holder;
2368 struct clear_alias_mode_holder *entry;
2369 void **slot;
2370
2371 /* If we are not going to run dse, we need to return now or there
2372 will be problems with allocating the bitmaps. */
2373 if ((!gate_dse()) || !alias_set)
2374 return;
2375
2376 if (!clear_alias_sets)
2377 {
2378 clear_alias_sets = BITMAP_ALLOC (NULL);
2379 disqualified_clear_alias_sets = BITMAP_ALLOC (NULL);
2380 clear_alias_mode_table = htab_create (11, clear_alias_mode_hash,
2381 clear_alias_mode_eq, NULL);
2382 clear_alias_mode_pool = create_alloc_pool ("clear_alias_mode_pool",
2383 sizeof (struct clear_alias_mode_holder), 100);
2384 }
2385
2386 bitmap_set_bit (clear_alias_sets, alias_set);
2387
2388 tmp_holder.alias_set = alias_set;
2389
2390 slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, INSERT);
2391 gcc_assert (*slot == NULL);
2392
2393 *slot = entry = pool_alloc (clear_alias_mode_pool);
2394 entry->alias_set = alias_set;
2395 entry->mode = mode;
2396 }
2397
2398
2399 /* Remove ALIAS_SET from the sets of stack slots being considered. */
2400
2401 void
2402 dse_invalidate_singleton_alias_set (alias_set_type alias_set)
2403 {
2404 if ((!gate_dse()) || !alias_set)
2405 return;
2406
2407 bitmap_clear_bit (clear_alias_sets, alias_set);
2408 }
2409
2410
2411 /* Look up the bitmap index for OFFSET in GROUP_INFO. If it is not
2412 there, return 0. */
2413
2414 static int
2415 get_bitmap_index (group_info_t group_info, HOST_WIDE_INT offset)
2416 {
2417 if (offset < 0)
2418 {
2419 HOST_WIDE_INT offset_p = -offset;
2420 if (offset_p >= group_info->offset_map_size_n)
2421 return 0;
2422 return group_info->offset_map_n[offset_p];
2423 }
2424 else
2425 {
2426 if (offset >= group_info->offset_map_size_p)
2427 return 0;
2428 return group_info->offset_map_p[offset];
2429 }
2430 }
2431
2432
2433 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2434 may be NULL. */
2435
2436 static void
2437 scan_stores_nospill (store_info_t store_info, bitmap gen, bitmap kill)
2438 {
2439 while (store_info)
2440 {
2441 HOST_WIDE_INT i;
2442 group_info_t group_info
2443 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2444 if (group_info->process_globally)
2445 for (i = store_info->begin; i < store_info->end; i++)
2446 {
2447 int index = get_bitmap_index (group_info, i);
2448 if (index != 0)
2449 {
2450 bitmap_set_bit (gen, index);
2451 if (kill)
2452 bitmap_clear_bit (kill, index);
2453 }
2454 }
2455 store_info = store_info->next;
2456 }
2457 }
2458
2459
2460 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2461 may be NULL. */
2462
2463 static void
2464 scan_stores_spill (store_info_t store_info, bitmap gen, bitmap kill)
2465 {
2466 while (store_info)
2467 {
2468 if (store_info->alias_set)
2469 {
2470 int index = get_bitmap_index (clear_alias_group,
2471 store_info->alias_set);
2472 if (index != 0)
2473 {
2474 bitmap_set_bit (gen, index);
2475 if (kill)
2476 bitmap_clear_bit (kill, index);
2477 }
2478 }
2479 store_info = store_info->next;
2480 }
2481 }
2482
2483
2484 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2485 may be NULL. */
2486
2487 static void
2488 scan_reads_nospill (insn_info_t insn_info, bitmap gen, bitmap kill)
2489 {
2490 read_info_t read_info = insn_info->read_rec;
2491 int i;
2492 group_info_t group;
2493
2494 while (read_info)
2495 {
2496 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2497 {
2498 if (group->process_globally)
2499 {
2500 if (i == read_info->group_id)
2501 {
2502 if (read_info->begin > read_info->end)
2503 {
2504 /* Begin > end for block mode reads. */
2505 if (kill)
2506 bitmap_ior_into (kill, group->group_kill);
2507 bitmap_and_compl_into (gen, group->group_kill);
2508 }
2509 else
2510 {
2511 /* The groups are the same, just process the
2512 offsets. */
2513 HOST_WIDE_INT j;
2514 for (j = read_info->begin; j < read_info->end; j++)
2515 {
2516 int index = get_bitmap_index (group, j);
2517 if (index != 0)
2518 {
2519 if (kill)
2520 bitmap_set_bit (kill, index);
2521 bitmap_clear_bit (gen, index);
2522 }
2523 }
2524 }
2525 }
2526 else
2527 {
2528 /* The groups are different, if the alias sets
2529 conflict, clear the entire group. We only need
2530 to apply this test if the read_info is a cselib
2531 read. Anything with a constant base cannot alias
2532 something else with a different constant
2533 base. */
2534 if ((read_info->group_id < 0)
2535 && canon_true_dependence (group->base_mem,
2536 QImode,
2537 group->canon_base_mem,
2538 read_info->mem, rtx_varies_p))
2539 {
2540 if (kill)
2541 bitmap_ior_into (kill, group->group_kill);
2542 bitmap_and_compl_into (gen, group->group_kill);
2543 }
2544 }
2545 }
2546 }
2547
2548 read_info = read_info->next;
2549 }
2550 }
2551
2552 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2553 may be NULL. */
2554
2555 static void
2556 scan_reads_spill (read_info_t read_info, bitmap gen, bitmap kill)
2557 {
2558 while (read_info)
2559 {
2560 if (read_info->alias_set)
2561 {
2562 int index = get_bitmap_index (clear_alias_group,
2563 read_info->alias_set);
2564 if (index != 0)
2565 {
2566 if (kill)
2567 bitmap_set_bit (kill, index);
2568 bitmap_clear_bit (gen, index);
2569 }
2570 }
2571
2572 read_info = read_info->next;
2573 }
2574 }
2575
2576
2577 /* Return the insn in BB_INFO before the first wild read or if there
2578 are no wild reads in the block, return the last insn. */
2579
2580 static insn_info_t
2581 find_insn_before_first_wild_read (bb_info_t bb_info)
2582 {
2583 insn_info_t insn_info = bb_info->last_insn;
2584 insn_info_t last_wild_read = NULL;
2585
2586 while (insn_info)
2587 {
2588 if (insn_info->wild_read)
2589 {
2590 last_wild_read = insn_info->prev_insn;
2591 /* Block starts with wild read. */
2592 if (!last_wild_read)
2593 return NULL;
2594 }
2595
2596 insn_info = insn_info->prev_insn;
2597 }
2598
2599 if (last_wild_read)
2600 return last_wild_read;
2601 else
2602 return bb_info->last_insn;
2603 }
2604
2605
2606 /* Scan the insns in BB_INFO starting at PTR and going to the top of
2607 the block in order to build the gen and kill sets for the block.
2608 We start at ptr which may be the last insn in the block or may be
2609 the first insn with a wild read. In the latter case we are able to
2610 skip the rest of the block because it just does not matter:
2611 anything that happens is hidden by the wild read. */
2612
2613 static void
2614 dse_step3_scan (bool for_spills, basic_block bb)
2615 {
2616 bb_info_t bb_info = bb_table[bb->index];
2617 insn_info_t insn_info;
2618
2619 if (for_spills)
2620 /* There are no wild reads in the spill case. */
2621 insn_info = bb_info->last_insn;
2622 else
2623 insn_info = find_insn_before_first_wild_read (bb_info);
2624
2625 /* In the spill case or in the no_spill case if there is no wild
2626 read in the block, we will need a kill set. */
2627 if (insn_info == bb_info->last_insn)
2628 {
2629 if (bb_info->kill)
2630 bitmap_clear (bb_info->kill);
2631 else
2632 bb_info->kill = BITMAP_ALLOC (NULL);
2633 }
2634 else
2635 if (bb_info->kill)
2636 BITMAP_FREE (bb_info->kill);
2637
2638 while (insn_info)
2639 {
2640 /* There may have been code deleted by the dce pass run before
2641 this phase. */
2642 if (insn_info->insn && INSN_P (insn_info->insn))
2643 {
2644 /* Process the read(s) last. */
2645 if (for_spills)
2646 {
2647 scan_stores_spill (insn_info->store_rec, bb_info->gen, bb_info->kill);
2648 scan_reads_spill (insn_info->read_rec, bb_info->gen, bb_info->kill);
2649 }
2650 else
2651 {
2652 scan_stores_nospill (insn_info->store_rec, bb_info->gen, bb_info->kill);
2653 scan_reads_nospill (insn_info, bb_info->gen, bb_info->kill);
2654 }
2655 }
2656
2657 insn_info = insn_info->prev_insn;
2658 }
2659 }
2660
2661
2662 /* Set the gen set of the exit block, and also any block with no
2663 successors that does not have a wild read. */
2664
2665 static void
2666 dse_step3_exit_block_scan (bb_info_t bb_info)
2667 {
2668 /* The gen set is all 0's for the exit block except for the
2669 frame_pointer_group. */
2670
2671 if (stores_off_frame_dead_at_return)
2672 {
2673 unsigned int i;
2674 group_info_t group;
2675
2676 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2677 {
2678 if (group->process_globally && group->frame_related)
2679 bitmap_ior_into (bb_info->gen, group->group_kill);
2680 }
2681 }
2682 }
2683
2684
2685 /* Find all of the blocks that are not backwards reachable from the
2686 exit block or any block with no successors (BB). These are the
2687 infinite loops or infinite self loops. These blocks will still
2688 have their bits set in UNREACHABLE_BLOCKS. */
2689
2690 static void
2691 mark_reachable_blocks (sbitmap unreachable_blocks, basic_block bb)
2692 {
2693 edge e;
2694 edge_iterator ei;
2695
2696 if (TEST_BIT (unreachable_blocks, bb->index))
2697 {
2698 RESET_BIT (unreachable_blocks, bb->index);
2699 FOR_EACH_EDGE (e, ei, bb->preds)
2700 {
2701 mark_reachable_blocks (unreachable_blocks, e->src);
2702 }
2703 }
2704 }
2705
2706 /* Build the transfer functions for the function. */
2707
2708 static void
2709 dse_step3 (bool for_spills)
2710 {
2711 basic_block bb;
2712 sbitmap unreachable_blocks = sbitmap_alloc (last_basic_block);
2713 sbitmap_iterator sbi;
2714 bitmap all_ones = NULL;
2715 unsigned int i;
2716
2717 sbitmap_ones (unreachable_blocks);
2718
2719 FOR_ALL_BB (bb)
2720 {
2721 bb_info_t bb_info = bb_table[bb->index];
2722 if (bb_info->gen)
2723 bitmap_clear (bb_info->gen);
2724 else
2725 bb_info->gen = BITMAP_ALLOC (NULL);
2726
2727 if (bb->index == ENTRY_BLOCK)
2728 ;
2729 else if (bb->index == EXIT_BLOCK)
2730 dse_step3_exit_block_scan (bb_info);
2731 else
2732 dse_step3_scan (for_spills, bb);
2733 if (EDGE_COUNT (bb->succs) == 0)
2734 mark_reachable_blocks (unreachable_blocks, bb);
2735
2736 /* If this is the second time dataflow is run, delete the old
2737 sets. */
2738 if (bb_info->in)
2739 BITMAP_FREE (bb_info->in);
2740 if (bb_info->out)
2741 BITMAP_FREE (bb_info->out);
2742 }
2743
2744 /* For any block in an infinite loop, we must initialize the out set
2745 to all ones. This could be expensive, but almost never occurs in
2746 practice. However, it is common in regression tests. */
2747 EXECUTE_IF_SET_IN_SBITMAP (unreachable_blocks, 0, i, sbi)
2748 {
2749 if (bitmap_bit_p (all_blocks, i))
2750 {
2751 bb_info_t bb_info = bb_table[i];
2752 if (!all_ones)
2753 {
2754 unsigned int j;
2755 group_info_t group;
2756
2757 all_ones = BITMAP_ALLOC (NULL);
2758 for (j = 0; VEC_iterate (group_info_t, rtx_group_vec, j, group); j++)
2759 bitmap_ior_into (all_ones, group->group_kill);
2760 }
2761 if (!bb_info->out)
2762 {
2763 bb_info->out = BITMAP_ALLOC (NULL);
2764 bitmap_copy (bb_info->out, all_ones);
2765 }
2766 }
2767 }
2768
2769 if (all_ones)
2770 BITMAP_FREE (all_ones);
2771 sbitmap_free (unreachable_blocks);
2772 }
2773
2774
2775 \f
2776 /*----------------------------------------------------------------------------
2777 Fourth step.
2778
2779 Solve the bitvector equations.
2780 ----------------------------------------------------------------------------*/
2781
2782
2783 /* Confluence function for blocks with no successors. Create an out
2784 set from the gen set of the exit block. This block logically has
2785 the exit block as a successor. */
2786
2787
2788
2789 static void
2790 dse_confluence_0 (basic_block bb)
2791 {
2792 bb_info_t bb_info = bb_table[bb->index];
2793
2794 if (bb->index == EXIT_BLOCK)
2795 return;
2796
2797 if (!bb_info->out)
2798 {
2799 bb_info->out = BITMAP_ALLOC (NULL);
2800 bitmap_copy (bb_info->out, bb_table[EXIT_BLOCK]->gen);
2801 }
2802 }
2803
2804 /* Propagate the information from the in set of the dest of E to the
2805 out set of the src of E. If the various in or out sets are not
2806 there, that means they are all ones. */
2807
2808 static void
2809 dse_confluence_n (edge e)
2810 {
2811 bb_info_t src_info = bb_table[e->src->index];
2812 bb_info_t dest_info = bb_table[e->dest->index];
2813
2814 if (dest_info->in)
2815 {
2816 if (src_info->out)
2817 bitmap_and_into (src_info->out, dest_info->in);
2818 else
2819 {
2820 src_info->out = BITMAP_ALLOC (NULL);
2821 bitmap_copy (src_info->out, dest_info->in);
2822 }
2823 }
2824 }
2825
2826
2827 /* Propagate the info from the out to the in set of BB_INDEX's basic
2828 block. There are three cases:
2829
2830 1) The block has no kill set. In this case the kill set is all
2831 ones. It does not matter what the out set of the block is, none of
2832 the info can reach the top. The only thing that reaches the top is
2833 the gen set and we just copy the set.
2834
2835 2) There is a kill set but no out set and bb has successors. In
2836 this case we just return. Eventually an out set will be created and
2837 it is better to wait than to create a set of ones.
2838
2839 3) There is both a kill and out set. We apply the obvious transfer
2840 function.
2841 */
2842
2843 static bool
2844 dse_transfer_function (int bb_index)
2845 {
2846 bb_info_t bb_info = bb_table[bb_index];
2847
2848 if (bb_info->kill)
2849 {
2850 if (bb_info->out)
2851 {
2852 /* Case 3 above. */
2853 if (bb_info->in)
2854 return bitmap_ior_and_compl (bb_info->in, bb_info->gen,
2855 bb_info->out, bb_info->kill);
2856 else
2857 {
2858 bb_info->in = BITMAP_ALLOC (NULL);
2859 bitmap_ior_and_compl (bb_info->in, bb_info->gen,
2860 bb_info->out, bb_info->kill);
2861 return true;
2862 }
2863 }
2864 else
2865 /* Case 2 above. */
2866 return false;
2867 }
2868 else
2869 {
2870 /* Case 1 above. If there is already an in set, nothing
2871 happens. */
2872 if (bb_info->in)
2873 return false;
2874 else
2875 {
2876 bb_info->in = BITMAP_ALLOC (NULL);
2877 bitmap_copy (bb_info->in, bb_info->gen);
2878 return true;
2879 }
2880 }
2881 }
2882
2883 /* Solve the dataflow equations. */
2884
2885 static void
2886 dse_step4 (void)
2887 {
2888 df_simple_dataflow (DF_BACKWARD, NULL, dse_confluence_0,
2889 dse_confluence_n, dse_transfer_function,
2890 all_blocks, df_get_postorder (DF_BACKWARD),
2891 df_get_n_blocks (DF_BACKWARD));
2892 if (dump_file)
2893 {
2894 basic_block bb;
2895
2896 fprintf (dump_file, "\n\n*** Global dataflow info after analysis.\n");
2897 FOR_ALL_BB (bb)
2898 {
2899 bb_info_t bb_info = bb_table[bb->index];
2900
2901 df_print_bb_index (bb, dump_file);
2902 if (bb_info->in)
2903 bitmap_print (dump_file, bb_info->in, " in: ", "\n");
2904 else
2905 fprintf (dump_file, " in: *MISSING*\n");
2906 if (bb_info->gen)
2907 bitmap_print (dump_file, bb_info->gen, " gen: ", "\n");
2908 else
2909 fprintf (dump_file, " gen: *MISSING*\n");
2910 if (bb_info->kill)
2911 bitmap_print (dump_file, bb_info->kill, " kill: ", "\n");
2912 else
2913 fprintf (dump_file, " kill: *MISSING*\n");
2914 if (bb_info->out)
2915 bitmap_print (dump_file, bb_info->out, " out: ", "\n");
2916 else
2917 fprintf (dump_file, " out: *MISSING*\n\n");
2918 }
2919 }
2920 }
2921
2922
2923 \f
2924 /*----------------------------------------------------------------------------
2925 Fifth step.
2926
2927 Delete the stores that can only be deleted using the global information.
2928 ----------------------------------------------------------------------------*/
2929
2930
2931 static void
2932 dse_step5_nospill (void)
2933 {
2934 basic_block bb;
2935 FOR_EACH_BB (bb)
2936 {
2937 bb_info_t bb_info = bb_table[bb->index];
2938 insn_info_t insn_info = bb_info->last_insn;
2939 bitmap v = bb_info->out;
2940
2941 while (insn_info)
2942 {
2943 bool deleted = false;
2944 if (dump_file && insn_info->insn)
2945 {
2946 fprintf (dump_file, "starting to process insn %d\n",
2947 INSN_UID (insn_info->insn));
2948 bitmap_print (dump_file, v, " v: ", "\n");
2949 }
2950
2951 /* There may have been code deleted by the dce pass run before
2952 this phase. */
2953 if (insn_info->insn
2954 && INSN_P (insn_info->insn)
2955 && (!insn_info->cannot_delete)
2956 && (!bitmap_empty_p (v)))
2957 {
2958 store_info_t store_info = insn_info->store_rec;
2959
2960 /* Try to delete the current insn. */
2961 deleted = true;
2962
2963 /* Skip the clobbers. */
2964 while (!store_info->is_set)
2965 store_info = store_info->next;
2966
2967 if (store_info->alias_set)
2968 deleted = false;
2969 else
2970 {
2971 HOST_WIDE_INT i;
2972 group_info_t group_info
2973 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2974
2975 for (i = store_info->begin; i < store_info->end; i++)
2976 {
2977 int index = get_bitmap_index (group_info, i);
2978
2979 if (dump_file)
2980 fprintf (dump_file, "i = %d, index = %d\n", (int)i, index);
2981 if (index == 0 || !bitmap_bit_p (v, index))
2982 {
2983 if (dump_file)
2984 fprintf (dump_file, "failing at i = %d\n", (int)i);
2985 deleted = false;
2986 break;
2987 }
2988 }
2989 }
2990 if (deleted)
2991 {
2992 if (dbg_cnt (dse))
2993 {
2994 check_for_inc_dec (insn_info->insn);
2995 delete_insn (insn_info->insn);
2996 insn_info->insn = NULL;
2997 globally_deleted++;
2998 }
2999 }
3000 }
3001 /* We do want to process the local info if the insn was
3002 deleted. For instance, if the insn did a wild read, we
3003 no longer need to trash the info. */
3004 if (insn_info->insn
3005 && INSN_P (insn_info->insn)
3006 && (!deleted))
3007 {
3008 scan_stores_nospill (insn_info->store_rec, v, NULL);
3009 if (insn_info->wild_read)
3010 {
3011 if (dump_file)
3012 fprintf (dump_file, "wild read\n");
3013 bitmap_clear (v);
3014 }
3015 else if (insn_info->read_rec)
3016 {
3017 if (dump_file)
3018 fprintf (dump_file, "regular read\n");
3019 scan_reads_nospill (insn_info, v, NULL);
3020 }
3021 }
3022
3023 insn_info = insn_info->prev_insn;
3024 }
3025 }
3026 }
3027
3028
3029 static void
3030 dse_step5_spill (void)
3031 {
3032 basic_block bb;
3033 FOR_EACH_BB (bb)
3034 {
3035 bb_info_t bb_info = bb_table[bb->index];
3036 insn_info_t insn_info = bb_info->last_insn;
3037 bitmap v = bb_info->out;
3038
3039 while (insn_info)
3040 {
3041 bool deleted = false;
3042 /* There may have been code deleted by the dce pass run before
3043 this phase. */
3044 if (insn_info->insn
3045 && INSN_P (insn_info->insn)
3046 && (!insn_info->cannot_delete)
3047 && (!bitmap_empty_p (v)))
3048 {
3049 /* Try to delete the current insn. */
3050 store_info_t store_info = insn_info->store_rec;
3051 deleted = true;
3052
3053 while (store_info)
3054 {
3055 if (store_info->alias_set)
3056 {
3057 int index = get_bitmap_index (clear_alias_group,
3058 store_info->alias_set);
3059 if (index == 0 || !bitmap_bit_p (v, index))
3060 {
3061 deleted = false;
3062 break;
3063 }
3064 }
3065 else
3066 deleted = false;
3067 store_info = store_info->next;
3068 }
3069 if (deleted && dbg_cnt (dse))
3070 {
3071 if (dump_file)
3072 fprintf (dump_file, "Spill deleting insn %d\n",
3073 INSN_UID (insn_info->insn));
3074 check_for_inc_dec (insn_info->insn);
3075 delete_insn (insn_info->insn);
3076 spill_deleted++;
3077 insn_info->insn = NULL;
3078 }
3079 }
3080
3081 if (insn_info->insn
3082 && INSN_P (insn_info->insn)
3083 && (!deleted))
3084 {
3085 scan_stores_spill (insn_info->store_rec, v, NULL);
3086 scan_reads_spill (insn_info->read_rec, v, NULL);
3087 }
3088
3089 insn_info = insn_info->prev_insn;
3090 }
3091 }
3092 }
3093
3094
3095 \f
3096 /*----------------------------------------------------------------------------
3097 Sixth step.
3098
3099 Destroy everything left standing.
3100 ----------------------------------------------------------------------------*/
3101
3102 static void
3103 dse_step6 (bool global_done)
3104 {
3105 unsigned int i;
3106 group_info_t group;
3107 basic_block bb;
3108
3109 if (global_done)
3110 {
3111 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
3112 {
3113 free (group->offset_map_n);
3114 free (group->offset_map_p);
3115 BITMAP_FREE (group->store1_n);
3116 BITMAP_FREE (group->store1_p);
3117 BITMAP_FREE (group->store2_n);
3118 BITMAP_FREE (group->store2_p);
3119 BITMAP_FREE (group->group_kill);
3120 }
3121
3122 FOR_ALL_BB (bb)
3123 {
3124 bb_info_t bb_info = bb_table[bb->index];
3125 BITMAP_FREE (bb_info->gen);
3126 if (bb_info->kill)
3127 BITMAP_FREE (bb_info->kill);
3128 if (bb_info->in)
3129 BITMAP_FREE (bb_info->in);
3130 if (bb_info->out)
3131 BITMAP_FREE (bb_info->out);
3132 }
3133 }
3134 else
3135 {
3136 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
3137 {
3138 BITMAP_FREE (group->store1_n);
3139 BITMAP_FREE (group->store1_p);
3140 BITMAP_FREE (group->store2_n);
3141 BITMAP_FREE (group->store2_p);
3142 BITMAP_FREE (group->group_kill);
3143 }
3144 }
3145
3146 if (clear_alias_sets)
3147 {
3148 BITMAP_FREE (clear_alias_sets);
3149 BITMAP_FREE (disqualified_clear_alias_sets);
3150 free_alloc_pool (clear_alias_mode_pool);
3151 htab_delete (clear_alias_mode_table);
3152 }
3153
3154 end_alias_analysis ();
3155 free (bb_table);
3156 htab_delete (rtx_group_table);
3157 VEC_free (group_info_t, heap, rtx_group_vec);
3158 BITMAP_FREE (all_blocks);
3159 BITMAP_FREE (scratch);
3160
3161 free_alloc_pool (rtx_store_info_pool);
3162 free_alloc_pool (read_info_pool);
3163 free_alloc_pool (insn_info_pool);
3164 free_alloc_pool (bb_info_pool);
3165 free_alloc_pool (rtx_group_info_pool);
3166 free_alloc_pool (deferred_change_pool);
3167 }
3168
3169
3170
3171 /* -------------------------------------------------------------------------
3172 DSE
3173 ------------------------------------------------------------------------- */
3174
3175 /* Callback for running pass_rtl_dse. */
3176
3177 static unsigned int
3178 rest_of_handle_dse (void)
3179 {
3180 bool did_global = false;
3181
3182 df_set_flags (DF_DEFER_INSN_RESCAN);
3183
3184 dse_step0 ();
3185 dse_step1 ();
3186 dse_step2_init ();
3187 if (dse_step2_nospill ())
3188 {
3189 df_set_flags (DF_LR_RUN_DCE);
3190 df_analyze ();
3191 did_global = true;
3192 if (dump_file)
3193 fprintf (dump_file, "doing global processing\n");
3194 dse_step3 (false);
3195 dse_step4 ();
3196 dse_step5_nospill ();
3197 }
3198
3199 /* For the instance of dse that runs after reload, we make a special
3200 pass to process the spills. These are special in that they are
3201 totally transparent, i.e, there is no aliasing issues that need
3202 to be considered. This means that the wild reads that kill
3203 everything else do not apply here. */
3204 if (clear_alias_sets && dse_step2_spill ())
3205 {
3206 if (!did_global)
3207 {
3208 df_set_flags (DF_LR_RUN_DCE);
3209 df_analyze ();
3210 }
3211 did_global = true;
3212 if (dump_file)
3213 fprintf (dump_file, "doing global spill processing\n");
3214 dse_step3 (true);
3215 dse_step4 ();
3216 dse_step5_spill ();
3217 }
3218
3219 dse_step6 (did_global);
3220
3221 if (dump_file)
3222 fprintf (dump_file, "dse: local deletions = %d, global deletions = %d, spill deletions = %d\n",
3223 locally_deleted, globally_deleted, spill_deleted);
3224 return 0;
3225 }
3226
3227 static bool
3228 gate_dse (void)
3229 {
3230 return optimize > 0 && flag_dse;
3231 }
3232
3233 struct tree_opt_pass pass_rtl_dse1 =
3234 {
3235 "dse1", /* name */
3236 gate_dse, /* gate */
3237 rest_of_handle_dse, /* execute */
3238 NULL, /* sub */
3239 NULL, /* next */
3240 0, /* static_pass_number */
3241 TV_DSE1, /* tv_id */
3242 0, /* properties_required */
3243 0, /* properties_provided */
3244 0, /* properties_destroyed */
3245 0, /* todo_flags_start */
3246 TODO_dump_func |
3247 TODO_df_finish | TODO_verify_rtl_sharing |
3248 TODO_ggc_collect, /* todo_flags_finish */
3249 'w' /* letter */
3250 };
3251
3252 struct tree_opt_pass pass_rtl_dse2 =
3253 {
3254 "dse2", /* name */
3255 gate_dse, /* gate */
3256 rest_of_handle_dse, /* execute */
3257 NULL, /* sub */
3258 NULL, /* next */
3259 0, /* static_pass_number */
3260 TV_DSE2, /* tv_id */
3261 0, /* properties_required */
3262 0, /* properties_provided */
3263 0, /* properties_destroyed */
3264 0, /* todo_flags_start */
3265 TODO_dump_func |
3266 TODO_df_finish | TODO_verify_rtl_sharing |
3267 TODO_ggc_collect, /* todo_flags_finish */
3268 'w' /* letter */
3269 };