1 /* RTL dead store elimination.
2 Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Sandiford <rsandifor@codesourcery.com>
5 and Kenneth Zadeck <zadeck@naturalbridge.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
33 #include "hard-reg-set.h"
38 #include "tree-pass.h"
39 #include "alloc-pool.h"
41 #include "insn-config.h"
48 /* This file contains three techniques for performing Dead Store
51 * The first technique performs dse locally on any base address. It
52 is based on the cselib which is a local value numbering technique.
53 This technique is local to a basic block but deals with a fairly
56 * The second technique performs dse globally but is restricted to
57 base addresses that are either constant or are relative to the
60 * The third technique, (which is only done after register allocation)
61 processes the spill spill slots. This differs from the second
62 technique because it takes advantage of the fact that spilling is
63 completely free from the effects of aliasing.
65 Logically, dse is a backwards dataflow problem. A store can be
66 deleted if it if cannot be reached in the backward direction by any
67 use of the value being stored. However, the local technique uses a
68 forwards scan of the basic block because cselib requires that the
69 block be processed in that order.
71 The pass is logically broken into 7 steps:
75 1) The local algorithm, as well as scanning the insns for the two
78 2) Analysis to see if the global algs are necessary. In the case
79 of stores base on a constant address, there must be at least two
80 stores to that address, to make it possible to delete some of the
81 stores. In the case of stores off of the frame or spill related
82 stores, only one store to an address is necessary because those
83 stores die at the end of the function.
85 3) Set up the global dataflow equations based on processing the
86 info parsed in the first step.
88 4) Solve the dataflow equations.
90 5) Delete the insns that the global analysis has indicated are
95 This step uses cselib and canon_rtx to build the largest expression
96 possible for each address. This pass is a forwards pass through
97 each basic block. From the point of view of the global technique,
98 the first pass could examine a block in either direction. The
99 forwards ordering is to accommodate cselib.
101 We a simplifying assumption: addresses fall into four broad
104 1) base has rtx_varies_p == false, offset is constant.
105 2) base has rtx_varies_p == false, offset variable.
106 3) base has rtx_varies_p == true, offset constant.
107 4) base has rtx_varies_p == true, offset variable.
109 The local passes are able to process all 4 kinds of addresses. The
110 global pass only handles (1).
112 The global problem is formulated as follows:
114 A store, S1, to address A, where A is not relative to the stack
115 frame, can be eliminated if all paths from S1 to the end of the
116 of the function contain another store to A before a read to A.
118 If the address A is relative to the stack frame, a store S2 to A
119 can be eliminated if there are no paths from S1 that reach the
120 end of the function that read A before another store to A. In
121 this case S2 can be deleted if there are paths to from S2 to the
122 end of the function that have no reads or writes to A. This
123 second case allows stores to the stack frame to be deleted that
124 would otherwise die when the function returns. This cannot be
125 done if stores_off_frame_dead_at_return is not true. See the doc
126 for that variable for when this variable is false.
128 The global problem is formulated as a backwards set union
129 dataflow problem where the stores are the gens and reads are the
130 kills. Set union problems are rare and require some special
131 handling given our representation of bitmaps. A straightforward
132 implementation of requires a lot of bitmaps filled with 1s.
133 These are expensive and cumbersome in our bitmap formulation so
134 care has been taken to avoid large vectors filled with 1s. See
135 the comments in bb_info and in the dataflow confluence functions
138 There are two places for further enhancements to this algorithm:
140 1) The original dse which was embedded in a pass called flow also
141 did local address forwarding. For example in
146 flow would replace the right hand side of the second insn with a
147 reference to r100. Most of the information is available to add this
148 to this pass. It has not done it because it is a lot of work in
149 the case that either r100 is assigned to between the first and
150 second insn and/or the second insn is a load of part of the value
151 stored by the first insn.
153 insn 5 in gcc.c-torture/compile/990203-1.c simple case.
154 insn 15 in gcc.c-torture/execute/20001017-2.c simple case.
155 insn 25 in gcc.c-torture/execute/20001026-1.c simple case.
156 insn 44 in gcc.c-torture/execute/20010910-1.c simple case.
158 2) The cleaning up of spill code is quite profitable. It currently
159 depends on reading tea leaves and chicken entrails left by reload.
160 This pass depends on reload creating a singleton alias set for each
161 spill slot and telling the next dse pass which of these alias sets
162 are the singletons. Rather than analyze the addresses of the
163 spills, dse's spill processing just does analysis of the loads and
164 stores that use those alias sets. There are three cases where this
167 a) Reload sometimes creates the slot for one mode of access, and
168 then inserts loads and/or stores for a smaller mode. In this
169 case, the current code just punts on the slot. The proper thing
170 to do is to back out and use one bit vector position for each
171 byte of the entity associated with the slot. This depends on
172 KNOWING that reload always generates the accesses for each of the
173 bytes in some canonical (read that easy to understand several
174 passes after reload happens) way.
176 b) Reload sometimes decides that spill slot it allocated was not
177 large enough for the mode and goes back and allocates more slots
178 with the same mode and alias set. The backout in this case is a
179 little more graceful than (a). In this case the slot is unmarked
180 as being a spill slot and if final address comes out to be based
181 off the frame pointer, the global algorithm handles this slot.
183 c) For any pass that may prespill, there is currently no
184 mechanism to tell the dse pass that the slot being used has the
185 special properties that reload uses. It may be that all that is
186 required is to have those passes make the same calls that reload
187 does, assuming that the alias sets can be manipulated in the same
190 /* There are limits to the size of constant offsets we model for the
191 global problem. There are certainly test cases, that exceed this
192 limit, however, it is unlikely that there are important programs
193 that really have constant offsets this size. */
194 #define MAX_OFFSET (64 * 1024)
197 static bitmap scratch
= NULL
;
200 /* This structure holds information about a candidate store. */
204 /* False means this is a clobber. */
207 /* The id of the mem group of the base address. If rtx_varies_p is
208 true, this is -1. Otherwise, it is the index into the group
212 /* This is the cselib value. */
213 cselib_val
*cse_base
;
215 /* This canonized mem. */
218 /* The result of get_addr on mem. */
221 /* If this is non-zero, it is the alias set of a spill location. */
222 alias_set_type alias_set
;
224 /* The offset of the first and byte before the last byte associated
225 with the operation. */
228 /* An bitmask as wide as the number of bytes in the word that
229 contains a 1 if the byte may be needed. The store is unused if
230 all of the bits are 0. */
231 long positions_needed
;
233 /* The next store info for this insn. */
234 struct store_info
*next
;
236 /* The right hand side of the store. This is used if there is a
237 subsequent reload of the mems address somewhere later in the
242 typedef struct store_info
*store_info_t
;
243 static alloc_pool cse_store_info_pool
;
244 static alloc_pool rtx_store_info_pool
;
246 /* This structure holds information about a load. These are only
247 built for rtx bases. */
250 /* The id of the mem group of the base address. */
253 /* If this is non-zero, it is the alias set of a spill location. */
254 alias_set_type alias_set
;
256 /* The offset of the first and byte after the last byte associated
257 with the operation. If begin == end == 0, the read did not have
258 a constant offset. */
261 /* The mem being read. */
264 /* The next read_info for this insn. */
265 struct read_info
*next
;
267 typedef struct read_info
*read_info_t
;
268 static alloc_pool read_info_pool
;
271 /* One of these records is created for each insn. */
275 /* Set true if the insn contains a store but the insn itself cannot
276 be deleted. This is set if the insn is a parallel and there is
277 more than one non dead output or if the insn is in some way
281 /* This field is only used by the global algorithm. It is set true
282 if the insn contains any read of mem except for a (1). This is
283 also set if the insn is a call or has a clobber mem. If the insn
284 contains a wild read, the use_rec will be null. */
287 /* This field is only used for the processing of const functions.
288 These functions cannot read memory, but they can read the stack
289 because that is where they may get their parms. It is set to
290 true if the insn may contain a stack pointer based store. */
291 bool stack_pointer_based
;
293 /* This is true if any of the sets within the store contains a
294 cselib base. Such stores can only be deleted by the local
296 bool contains_cselib_groups
;
301 /* The list of mem sets or mem clobbers that are contained in this
302 insn. If the insn is deletable, it contains only one mem set.
303 But it could also contain clobbers. Insns that contain more than
304 one mem set are not deletable, but each of those mems are here in
305 order to provide info to delete other insns. */
306 store_info_t store_rec
;
308 /* The linked list of mem uses in this insn. Only the reads from
309 rtx bases are listed here. The reads to cselib bases are
310 completely processed during the first scan and so are never
312 read_info_t read_rec
;
314 /* The prev insn in the basic block. */
315 struct insn_info
* prev_insn
;
317 /* The linked list of insns that are in consideration for removal in
318 the forwards pass thru the basic block. This pointer may be
319 trash as it is not cleared when a wild read occurs. The only
320 time it is guaranteed to be correct is when the traveral starts
321 at active_local_stores. */
322 struct insn_info
* next_local_store
;
325 typedef struct insn_info
*insn_info_t
;
326 static alloc_pool insn_info_pool
;
328 /* The linked list of stores that are under consideration in this
330 static insn_info_t active_local_stores
;
335 /* Pointer to the insn info for the last insn in the block. These
336 are linked so this is how all of the insns are reached. During
337 scanning this is the current insn being scanned. */
338 insn_info_t last_insn
;
340 /* The info for the global dataflow problem. */
343 /* This is set if the transfer function should and in the wild_read
344 bitmap before applying the kill and gen sets. That vector knocks
345 out most of the bits in the bitmap and thus speeds up the
347 bool apply_wild_read
;
349 /* The set of store positions that exist in this block before a wild read. */
352 /* The set of load positions that exist in this block above the
353 same position of a store. */
356 /* The set of stores that reach the top of the block without being
359 Do not represent the in if it is all ones. Note that this is
360 what the bitvector should logically be initialized to for a set
361 intersection problem. However, like the kill set, this is too
362 expensive. So initially, the in set will only be created for the
363 exit block and any block that contains a wild read. */
366 /* The set of stores that reach the bottom of the block from it's
369 Do not represent the in if it is all ones. Note that this is
370 what the bitvector should logically be initialized to for a set
371 intersection problem. However, like the kill and in set, this is
372 too expensive. So what is done is that the confluence operator
373 just initializes the vector from one of the out sets of the
374 successors of the block. */
378 typedef struct bb_info
*bb_info_t
;
379 static alloc_pool bb_info_pool
;
381 /* Table to hold all bb_infos. */
382 static bb_info_t
*bb_table
;
384 /* There is a group_info for each rtx base that is used to reference
385 memory. There are also not many of the rtx bases because they are
386 very limited in scope. */
390 /* The actual base of the address. */
393 /* The sequential id of the base. This allows us to have a
394 canonical ordering of these that is not based on addresses. */
397 /* A mem wrapped around the base pointer for the group in order to
398 do read dependency. */
401 /* Canonized version of base_mem, most likely the same thing. */
404 /* These two sets of two bitmaps are used to keep track of how many
405 stores are actually referencing that position from this base. We
406 only do this for rtx bases as this will be used to assign
407 positions in the bitmaps for the global problem. Bit N is set in
408 store1 on the first store for offset N. Bit N is set in store2
409 for the second store to offset N. This is all we need since we
410 only care about offsets that have two or more stores for them.
412 The "_n" suffix is for offsets less than 0 and the "_p" suffix is
413 for 0 and greater offsets.
415 There is one special case here, for stores into the stack frame,
416 we will or store1 into store2 before deciding which stores look
417 at globally. This is because stores to the stack frame that have
418 no other reads before the end of the function can also be
420 bitmap store1_n
, store1_p
, store2_n
, store2_p
;
422 /* The positions in this bitmap have the same assignments as the in,
423 out, gen and kill bitmaps. This bitmap is all zeros except for
424 the positions that are occupied by stores for this group. */
427 /* True if there are any positions that are to be processed
429 bool process_globally
;
431 /* True if the base of this group is either the frame_pointer or
432 hard_frame_pointer. */
435 /* The offset_map is used to map the offsets from this base into
436 positions in the global bitmaps. It is only created after all of
437 the all of stores have been scanned and we know which ones we
439 int *offset_map_n
, *offset_map_p
;
440 int offset_map_size_n
, offset_map_size_p
;
442 typedef struct group_info
*group_info_t
;
443 typedef const struct group_info
*const_group_info_t
;
444 static alloc_pool rtx_group_info_pool
;
446 /* Tables of group_info structures, hashed by base value. */
447 static htab_t rtx_group_table
;
449 /* Index into the rtx_group_vec. */
450 static int rtx_group_next_id
;
452 DEF_VEC_P(group_info_t
);
453 DEF_VEC_ALLOC_P(group_info_t
,heap
);
455 static VEC(group_info_t
,heap
) *rtx_group_vec
;
458 /* This structure holds the set of changes that are being deferred
459 when removing read operation. See replace_read. */
460 struct deferred_change
463 /* The mem that is being replaced. */
466 /* The reg it is being replaced with. */
469 struct deferred_change
*next
;
472 typedef struct deferred_change
*deferred_change_t
;
473 static alloc_pool deferred_change_pool
;
475 static deferred_change_t deferred_change_list
= NULL
;
477 /* This are used to hold the alias sets of spill variables. Since
478 these are never aliased and there may be a lot of them, it makes
479 sense to treat them specially. This bitvector is only allocated in
480 calls from dse_record_singleton_alias_set which currently is only
481 made during reload1. So when dse is called before reload this
482 mechanism does nothing. */
484 static bitmap clear_alias_sets
= NULL
;
486 /* The set of clear_alias_sets that have been disqualified because
487 there are loads or stores using a different mode than the alias set
488 was registered with. */
489 static bitmap disqualified_clear_alias_sets
= NULL
;
491 /* The group that holds all of the clear_alias_sets. */
492 static group_info_t clear_alias_group
;
494 /* The modes of the clear_alias_sets. */
495 static htab_t clear_alias_mode_table
;
497 /* Hash table element to look up the mode for an alias set. */
498 struct clear_alias_mode_holder
500 alias_set_type alias_set
;
501 enum machine_mode mode
;
504 static alloc_pool clear_alias_mode_pool
;
506 /* This is true except for two cases:
507 (1) current_function_stdarg -- i.e. we cannot do this
508 for vararg functions because they play games with the frame.
509 (2) In ada, it is sometimes not safe to do assume that any stores
510 based off the stack frame go dead at the exit to a function. */
511 static bool stores_off_frame_dead_at_return
;
513 /* Counter for stats. */
514 static int globally_deleted
;
515 static int locally_deleted
;
516 static int spill_deleted
;
518 static bitmap all_blocks
;
520 /* The number of bits used in the global bitmaps. */
521 static unsigned int current_position
;
524 static bool gate_dse (void);
527 /*----------------------------------------------------------------------------
531 ----------------------------------------------------------------------------*/
533 /* Hashtable callbacks for maintaining the "bases" field of
534 store_group_info, given that the addresses are function invariants. */
537 clear_alias_mode_eq (const void *p1
, const void *p2
)
539 const struct clear_alias_mode_holder
* h1
540 = (const struct clear_alias_mode_holder
*) p1
;
541 const struct clear_alias_mode_holder
* h2
542 = (const struct clear_alias_mode_holder
*) p2
;
543 return h1
->alias_set
== h2
->alias_set
;
548 clear_alias_mode_hash (const void *p
)
550 const struct clear_alias_mode_holder
*holder
551 = (const struct clear_alias_mode_holder
*) p
;
552 return holder
->alias_set
;
556 /* Find the entry associated with ALIAS_SET. */
558 static struct clear_alias_mode_holder
*
559 clear_alias_set_lookup (alias_set_type alias_set
)
561 struct clear_alias_mode_holder tmp_holder
;
564 tmp_holder
.alias_set
= alias_set
;
565 slot
= htab_find_slot (clear_alias_mode_table
, &tmp_holder
, NO_INSERT
);
572 /* Hashtable callbacks for maintaining the "bases" field of
573 store_group_info, given that the addresses are function invariants. */
576 invariant_group_base_eq (const void *p1
, const void *p2
)
578 const_group_info_t gi1
= (const_group_info_t
) p1
;
579 const_group_info_t gi2
= (const_group_info_t
) p2
;
580 return rtx_equal_p (gi1
->rtx_base
, gi2
->rtx_base
);
585 invariant_group_base_hash (const void *p
)
587 const_group_info_t gi
= (const_group_info_t
) p
;
589 return hash_rtx (gi
->rtx_base
, Pmode
, &do_not_record
, NULL
, false);
593 /* Get the GROUP for BASE. Add a new group if it is not there. */
596 get_group_info (rtx base
)
598 struct group_info tmp_gi
;
604 /* Find the store_base_info structure for BASE, creating a new one
606 tmp_gi
.rtx_base
= base
;
607 slot
= htab_find_slot (rtx_group_table
, &tmp_gi
, INSERT
);
608 gi
= (group_info_t
) *slot
;
612 if (!clear_alias_group
)
614 clear_alias_group
= gi
= pool_alloc (rtx_group_info_pool
);
615 memset (gi
, 0, sizeof (struct group_info
));
616 gi
->id
= rtx_group_next_id
++;
617 gi
->store1_n
= BITMAP_ALLOC (NULL
);
618 gi
->store1_p
= BITMAP_ALLOC (NULL
);
619 gi
->store2_n
= BITMAP_ALLOC (NULL
);
620 gi
->store2_p
= BITMAP_ALLOC (NULL
);
621 gi
->group_kill
= BITMAP_ALLOC (NULL
);
622 gi
->process_globally
= false;
623 gi
->offset_map_size_n
= 0;
624 gi
->offset_map_size_p
= 0;
625 gi
->offset_map_n
= NULL
;
626 gi
->offset_map_p
= NULL
;
627 VEC_safe_push (group_info_t
, heap
, rtx_group_vec
, gi
);
629 return clear_alias_group
;
634 *slot
= gi
= pool_alloc (rtx_group_info_pool
);
636 gi
->id
= rtx_group_next_id
++;
637 gi
->base_mem
= gen_rtx_MEM (QImode
, base
);
638 gi
->canon_base_mem
= canon_rtx (gi
->base_mem
);
639 gi
->store1_n
= BITMAP_ALLOC (NULL
);
640 gi
->store1_p
= BITMAP_ALLOC (NULL
);
641 gi
->store2_n
= BITMAP_ALLOC (NULL
);
642 gi
->store2_p
= BITMAP_ALLOC (NULL
);
643 gi
->group_kill
= BITMAP_ALLOC (NULL
);
644 gi
->process_globally
= false;
646 (base
== frame_pointer_rtx
) || (base
== hard_frame_pointer_rtx
);
647 gi
->offset_map_size_n
= 0;
648 gi
->offset_map_size_p
= 0;
649 gi
->offset_map_n
= NULL
;
650 gi
->offset_map_p
= NULL
;
651 VEC_safe_push (group_info_t
, heap
, rtx_group_vec
, gi
);
658 /* Initialization of data structures. */
664 globally_deleted
= 0;
667 scratch
= BITMAP_ALLOC (NULL
);
670 = create_alloc_pool ("rtx_store_info_pool",
671 sizeof (struct store_info
), 100);
673 = create_alloc_pool ("read_info_pool",
674 sizeof (struct read_info
), 100);
676 = create_alloc_pool ("insn_info_pool",
677 sizeof (struct insn_info
), 100);
679 = create_alloc_pool ("bb_info_pool",
680 sizeof (struct bb_info
), 100);
682 = create_alloc_pool ("rtx_group_info_pool",
683 sizeof (struct group_info
), 100);
685 = create_alloc_pool ("deferred_change_pool",
686 sizeof (struct deferred_change
), 10);
688 rtx_group_table
= htab_create (11, invariant_group_base_hash
,
689 invariant_group_base_eq
, NULL
);
691 bb_table
= XCNEWVEC (bb_info_t
, last_basic_block
);
692 rtx_group_next_id
= 0;
694 stores_off_frame_dead_at_return
=
695 (!(TREE_CODE (TREE_TYPE (current_function_decl
)) == FUNCTION_TYPE
696 && (TYPE_RETURNS_STACK_DEPRESSED (TREE_TYPE (current_function_decl
)))))
697 && (!current_function_stdarg
);
699 init_alias_analysis ();
701 if (clear_alias_sets
)
702 clear_alias_group
= get_group_info (NULL
);
704 clear_alias_group
= NULL
;
709 /*----------------------------------------------------------------------------
712 Scan all of the insns. Any random ordering of the blocks is fine.
713 Each block is scanned in forward order to accommodate cselib which
714 is used to remove stores with non-constant bases.
715 ----------------------------------------------------------------------------*/
717 /* Delete all of the store_info recs from INSN_INFO. */
720 free_store_info (insn_info_t insn_info
)
722 store_info_t store_info
= insn_info
->store_rec
;
725 store_info_t next
= store_info
->next
;
726 if (store_info
->cse_base
)
727 pool_free (cse_store_info_pool
, store_info
);
729 pool_free (rtx_store_info_pool
, store_info
);
733 insn_info
->cannot_delete
= true;
734 insn_info
->contains_cselib_groups
= false;
735 insn_info
->store_rec
= NULL
;
745 /* Add an insn to do the add inside a x if it is a
746 PRE/POST-INC/DEC/MODIFY. D is an structure containing the insn and
747 the size of the mode of the MEM that this is inside of. */
750 replace_inc_dec (rtx
*r
, void *d
)
753 struct insn_size
*data
= (struct insn_size
*)d
;
754 switch (GET_CODE (x
))
759 rtx r1
= XEXP (x
, 0);
760 rtx c
= gen_int_mode (Pmode
, data
->size
);
761 add_insn_before (data
->insn
,
762 gen_rtx_SET (Pmode
, r1
,
763 gen_rtx_PLUS (Pmode
, r1
, c
)),
771 rtx r1
= XEXP (x
, 0);
772 rtx c
= gen_int_mode (Pmode
, -data
->size
);
773 add_insn_before (data
->insn
,
774 gen_rtx_SET (Pmode
, r1
,
775 gen_rtx_PLUS (Pmode
, r1
, c
)),
783 /* We can reuse the add because we are about to delete the
784 insn that contained it. */
785 rtx add
= XEXP (x
, 0);
786 rtx r1
= XEXP (add
, 0);
787 add_insn_before (data
->insn
,
788 gen_rtx_SET (Pmode
, r1
, add
), NULL
);
798 /* If X is a MEM, check the address to see if it is PRE/POST-INC/DEC/MODIFY
799 and generate an add to replace that. */
802 replace_inc_dec_mem (rtx
*r
, void *d
)
805 if (GET_CODE (x
) == MEM
)
807 struct insn_size data
;
809 data
.size
= GET_MODE_SIZE (GET_MODE (x
));
812 for_each_rtx (&XEXP (x
, 0), replace_inc_dec
, &data
);
819 /* Before we delete INSN, make sure that the auto inc/dec, if it is
820 there, is split into a separate insn. */
823 check_for_inc_dec (rtx insn
)
825 rtx note
= find_reg_note (insn
, REG_INC
, NULL_RTX
);
827 for_each_rtx (&insn
, replace_inc_dec_mem
, insn
);
831 /* Delete the insn and free all of the fields inside INSN_INFO. */
834 delete_dead_store_insn (insn_info_t insn_info
)
836 read_info_t read_info
;
841 check_for_inc_dec (insn_info
->insn
);
844 fprintf (dump_file
, "Locally deleting insn %d ",
845 INSN_UID (insn_info
->insn
));
846 if (insn_info
->store_rec
->alias_set
)
847 fprintf (dump_file
, "alias set %d\n",
848 (int) insn_info
->store_rec
->alias_set
);
850 fprintf (dump_file
, "\n");
853 free_store_info (insn_info
);
854 read_info
= insn_info
->read_rec
;
858 read_info_t next
= read_info
->next
;
859 pool_free (read_info_pool
, read_info
);
862 insn_info
->read_rec
= NULL
;
864 delete_insn (insn_info
->insn
);
866 insn_info
->insn
= NULL
;
868 insn_info
->wild_read
= false;
872 /* Set the store* bitmaps offset_map_size* fields in GROUP based on
876 set_usage_bits (group_info_t group
, HOST_WIDE_INT offset
, HOST_WIDE_INT width
)
880 if ((offset
> -MAX_OFFSET
) && (offset
< MAX_OFFSET
))
881 for (i
=offset
; i
<offset
+width
; i
++)
888 store1
= group
->store1_n
;
889 store2
= group
->store2_n
;
894 store1
= group
->store1_p
;
895 store2
= group
->store2_p
;
899 if (bitmap_bit_p (store1
, ai
))
900 bitmap_set_bit (store2
, ai
);
903 bitmap_set_bit (store1
, ai
);
906 if (group
->offset_map_size_n
< ai
)
907 group
->offset_map_size_n
= ai
;
911 if (group
->offset_map_size_p
< ai
)
912 group
->offset_map_size_p
= ai
;
919 /* Set the BB_INFO so that the last insn is marked as a wild read. */
922 add_wild_read (bb_info_t bb_info
)
924 insn_info_t insn_info
= bb_info
->last_insn
;
925 read_info_t
*ptr
= &insn_info
->read_rec
;
929 read_info_t next
= (*ptr
)->next
;
930 if ((*ptr
)->alias_set
== 0)
932 pool_free (read_info_pool
, *ptr
);
938 insn_info
->wild_read
= true;
939 active_local_stores
= NULL
;
943 /* Return true if X is a constant or one of the registers that behave
944 as a constant over the life of a function. This is equivalent to
945 !rtx_varies_p for memory addresses. */
948 const_or_frame_p (rtx x
)
950 switch (GET_CODE (x
))
953 return MEM_READONLY_P (x
);
964 /* Note that we have to test for the actual rtx used for the frame
965 and arg pointers and not just the register number in case we have
966 eliminated the frame and/or arg pointer and are using it
968 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
969 /* The arg pointer varies if it is not a fixed register. */
970 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
])
971 || x
== pic_offset_table_rtx
)
980 /* Take all reasonable action to put the address of MEM into the form
981 that we can do analysis on.
983 The gold standard is to get the address into the form: address +
984 OFFSET where address is something that rtx_varies_p considers a
985 constant. When we can get the address in this form, we can do
986 global analysis on it. Note that for constant bases, address is
987 not actually returned, only the group_id. The address can be
990 If that fails, we try cselib to get a value we can at least use
991 locally. If that fails we return false.
993 The GROUP_ID is set to -1 for cselib bases and the index of the
994 group for non_varying bases.
996 FOR_READ is true if this is a mem read and false if not. */
999 canon_address (rtx mem
,
1000 alias_set_type
*alias_set_out
,
1002 HOST_WIDE_INT
*offset
,
1005 rtx mem_address
= XEXP (mem
, 0);
1006 rtx expanded_address
, address
;
1007 /* Make sure that cselib is has initialized all of the operands of
1008 the address before asking it to do the subst. */
1010 if (clear_alias_sets
)
1012 /* If this is a spill, do not do any further processing. */
1013 alias_set_type alias_set
= MEM_ALIAS_SET (mem
);
1015 fprintf (dump_file
, "found alias set %d\n", (int) alias_set
);
1016 if (bitmap_bit_p (clear_alias_sets
, alias_set
))
1018 struct clear_alias_mode_holder
*entry
1019 = clear_alias_set_lookup (alias_set
);
1021 /* If the modes do not match, we cannot process this set. */
1022 if (entry
->mode
!= GET_MODE (mem
))
1026 "disqualifying alias set %d, (%s) != (%s)\n",
1027 (int) alias_set
, GET_MODE_NAME (entry
->mode
),
1028 GET_MODE_NAME (GET_MODE (mem
)));
1030 bitmap_set_bit (disqualified_clear_alias_sets
, alias_set
);
1034 *alias_set_out
= alias_set
;
1035 *group_id
= clear_alias_group
->id
;
1042 cselib_lookup (mem_address
, Pmode
, 1);
1046 fprintf (dump_file
, " mem: ");
1047 print_inline_rtx (dump_file
, mem_address
, 0);
1048 fprintf (dump_file
, "\n");
1051 /* Use cselib to replace all of the reg references with the full
1052 expression. This will take care of the case where we have
1054 r_x = base + offset;
1059 val = *(base + offset);
1062 expanded_address
= cselib_expand_value_rtx (mem_address
, scratch
, 5);
1064 /* If this fails, just go with the mem_address. */
1065 if (!expanded_address
)
1066 expanded_address
= mem_address
;
1068 /* Split the address into canonical BASE + OFFSET terms. */
1069 address
= canon_rtx (expanded_address
);
1075 fprintf (dump_file
, "\n after cselib_expand address: ");
1076 print_inline_rtx (dump_file
, expanded_address
, 0);
1077 fprintf (dump_file
, "\n");
1079 fprintf (dump_file
, "\n after canon_rtx address: ");
1080 print_inline_rtx (dump_file
, address
, 0);
1081 fprintf (dump_file
, "\n");
1084 if (GET_CODE (address
) == CONST
)
1085 address
= XEXP (address
, 0);
1087 if (GET_CODE (address
) == PLUS
&& GET_CODE (XEXP (address
, 1)) == CONST_INT
)
1089 *offset
= INTVAL (XEXP (address
, 1));
1090 address
= XEXP (address
, 0);
1093 if (const_or_frame_p (address
))
1095 group_info_t group
= get_group_info (address
);
1098 fprintf (dump_file
, " gid=%d offset=%d \n", group
->id
, (int)*offset
);
1100 *group_id
= group
->id
;
1104 *base
= cselib_lookup (address
, Pmode
, true);
1110 fprintf (dump_file
, " no cselib val - should be a wild read.\n");
1114 fprintf (dump_file
, " varying cselib base=%d offset = %d\n",
1115 (*base
)->value
, (int)*offset
);
1121 /* Clear the rhs field from the active_local_stores array. */
1124 clear_rhs_from_active_local_stores (void)
1126 insn_info_t ptr
= active_local_stores
;
1130 store_info_t store_info
= ptr
->store_rec
;
1131 /* Skip the clobbers. */
1132 while (!store_info
->is_set
)
1133 store_info
= store_info
->next
;
1135 store_info
->rhs
= NULL
;
1137 ptr
= ptr
->next_local_store
;
1142 /* BODY is an instruction pattern that belongs to INSN. Return 1 if
1143 there is a candidate store, after adding it to the appropriate
1144 local store group if so. */
1147 record_store (rtx body
, bb_info_t bb_info
)
1150 HOST_WIDE_INT offset
= 0;
1151 HOST_WIDE_INT width
= 0;
1152 alias_set_type spill_alias_set
;
1153 insn_info_t insn_info
= bb_info
->last_insn
;
1154 store_info_t store_info
= NULL
;
1156 cselib_val
*base
= NULL
;
1157 insn_info_t ptr
, last
;
1158 bool store_is_unused
;
1160 if (GET_CODE (body
) != SET
&& GET_CODE (body
) != CLOBBER
)
1163 /* If this is not used, then this cannot be used to keep the insn
1164 from being deleted. On the other hand, it does provide something
1165 that can be used to prove that another store is dead. */
1167 = (find_reg_note (insn_info
->insn
, REG_UNUSED
, body
) != NULL
);
1169 /* Check whether that value is a suitable memory location. */
1170 mem
= SET_DEST (body
);
1173 /* If the set or clobber is unused, then it does not effect our
1174 ability to get rid of the entire insn. */
1175 if (!store_is_unused
)
1176 insn_info
->cannot_delete
= true;
1180 /* At this point we know mem is a mem. */
1181 if (GET_MODE (mem
) == BLKmode
)
1183 if (GET_CODE (XEXP (mem
, 0)) == SCRATCH
)
1186 fprintf (dump_file
, " adding wild read for (clobber (mem:BLK (scratch))\n");
1187 add_wild_read (bb_info
);
1188 insn_info
->cannot_delete
= true;
1190 else if (!store_is_unused
)
1192 /* If the set or clobber is unused, then it does not effect our
1193 ability to get rid of the entire insn. */
1194 insn_info
->cannot_delete
= true;
1195 clear_rhs_from_active_local_stores ();
1200 /* We can still process a volatile mem, we just cannot delete it. */
1201 if (MEM_VOLATILE_P (mem
))
1202 insn_info
->cannot_delete
= true;
1204 if (!canon_address (mem
, &spill_alias_set
, &group_id
, &offset
, &base
))
1206 clear_rhs_from_active_local_stores ();
1210 width
= GET_MODE_SIZE (GET_MODE (mem
));
1212 if (spill_alias_set
)
1214 bitmap store1
= clear_alias_group
->store1_p
;
1215 bitmap store2
= clear_alias_group
->store2_p
;
1217 if (bitmap_bit_p (store1
, spill_alias_set
))
1218 bitmap_set_bit (store2
, spill_alias_set
);
1220 bitmap_set_bit (store1
, spill_alias_set
);
1222 if (clear_alias_group
->offset_map_size_p
< spill_alias_set
)
1223 clear_alias_group
->offset_map_size_p
= spill_alias_set
;
1225 store_info
= pool_alloc (rtx_store_info_pool
);
1228 fprintf (dump_file
, " processing spill store %d(%s)\n",
1229 (int) spill_alias_set
, GET_MODE_NAME (GET_MODE (mem
)));
1231 else if (group_id
>= 0)
1233 /* In the restrictive case where the base is a constant or the
1234 frame pointer we can do global analysis. */
1237 = VEC_index (group_info_t
, rtx_group_vec
, group_id
);
1239 store_info
= pool_alloc (rtx_store_info_pool
);
1240 set_usage_bits (group
, offset
, width
);
1243 fprintf (dump_file
, " processing const base store gid=%d[%d..%d)\n",
1244 group_id
, (int)offset
, (int)(offset
+width
));
1248 rtx base_term
= find_base_term (XEXP (mem
, 0));
1250 || (GET_CODE (base_term
) == ADDRESS
1251 && GET_MODE (base_term
) == Pmode
1252 && XEXP (base_term
, 0) == stack_pointer_rtx
))
1253 insn_info
->stack_pointer_based
= true;
1254 insn_info
->contains_cselib_groups
= true;
1256 store_info
= pool_alloc (cse_store_info_pool
);
1260 fprintf (dump_file
, " processing cselib store [%d..%d)\n",
1261 (int)offset
, (int)(offset
+width
));
1264 /* Check to see if this stores causes some other stores to be
1266 ptr
= active_local_stores
;
1271 insn_info_t next
= ptr
->next_local_store
;
1272 store_info_t s_info
= ptr
->store_rec
;
1275 /* Skip the clobbers. We delete the active insn if this insn
1276 shadows the set. To have been put on the active list, it
1277 has exactly on set. */
1278 while (!s_info
->is_set
)
1279 s_info
= s_info
->next
;
1281 if (s_info
->alias_set
!= spill_alias_set
)
1283 else if (s_info
->alias_set
)
1285 struct clear_alias_mode_holder
*entry
1286 = clear_alias_set_lookup (s_info
->alias_set
);
1287 /* Generally, spills cannot be processed if and of the
1288 references to the slot have a different mode. But if
1289 we are in the same block and mode is exactly the same
1290 between this store and one before in the same block,
1291 we can still delete it. */
1292 if ((GET_MODE (mem
) == GET_MODE (s_info
->mem
))
1293 && (GET_MODE (mem
) == entry
->mode
))
1296 s_info
->positions_needed
= 0;
1299 fprintf (dump_file
, " trying spill store in insn=%d alias_set=%d\n",
1300 INSN_UID (ptr
->insn
), (int) s_info
->alias_set
);
1302 else if ((s_info
->group_id
== group_id
)
1303 && (s_info
->cse_base
== base
))
1307 fprintf (dump_file
, " trying store in insn=%d gid=%d[%d..%d)\n",
1308 INSN_UID (ptr
->insn
), s_info
->group_id
,
1309 (int)s_info
->begin
, (int)s_info
->end
);
1310 for (i
= offset
; i
< offset
+width
; i
++)
1311 if (i
>= s_info
->begin
&& i
< s_info
->end
)
1312 s_info
->positions_needed
&= ~(1L << (i
- s_info
->begin
));
1314 else if (s_info
->rhs
)
1315 /* Need to see if it is possible for this store to overwrite
1316 the value of store_info. If it is, set the rhs to NULL to
1317 keep it from being used to remove a load. */
1319 if (canon_true_dependence (s_info
->mem
,
1320 GET_MODE (s_info
->mem
),
1326 /* An insn can be deleted if every position of every one of
1327 its s_infos is zero. */
1328 if (s_info
->positions_needed
!= 0)
1333 insn_info_t insn_to_delete
= ptr
;
1336 last
->next_local_store
= ptr
->next_local_store
;
1338 active_local_stores
= ptr
->next_local_store
;
1340 delete_dead_store_insn (insn_to_delete
);
1348 gcc_assert ((unsigned) width
< sizeof (store_info
->positions_needed
) * CHAR_BIT
);
1350 /* Finish filling in the store_info. */
1351 store_info
->next
= insn_info
->store_rec
;
1352 insn_info
->store_rec
= store_info
;
1353 store_info
->mem
= canon_rtx (mem
);
1354 store_info
->alias_set
= spill_alias_set
;
1355 store_info
->mem_addr
= get_addr (XEXP (mem
, 0));
1356 store_info
->cse_base
= base
;
1357 store_info
->positions_needed
= (1L << width
) - 1;
1358 store_info
->group_id
= group_id
;
1359 store_info
->begin
= offset
;
1360 store_info
->end
= offset
+ width
;
1361 store_info
->is_set
= GET_CODE (body
) == SET
;
1363 if (store_info
->is_set
1364 /* No place to keep the value after ra. */
1365 && !reload_completed
1366 /* The careful reviewer may wish to comment my checking that the
1367 rhs of a store is always a reg. */
1368 && REG_P (SET_SRC (body
))
1369 /* Sometimes the store and reload is used for truncation and
1371 && !(FLOAT_MODE_P (GET_MODE (mem
)) && (flag_float_store
)))
1372 store_info
->rhs
= SET_SRC (body
);
1374 store_info
->rhs
= NULL
;
1376 /* If this is a clobber, we return 0. We will only be able to
1377 delete this insn if there is only one store USED store, but we
1378 can use the clobber to delete other stores earlier. */
1379 return store_info
->is_set
? 1 : 0;
1384 dump_insn_info (const char * start
, insn_info_t insn_info
)
1386 fprintf (dump_file
, "%s insn=%d %s\n", start
,
1387 INSN_UID (insn_info
->insn
),
1388 insn_info
->store_rec
? "has store" : "naked");
1392 /* If the modes are different and the value's source and target do not
1393 line up, we need to extract the value from lower part of the rhs of
1394 the store, shift it, and then put it into a form that can be shoved
1395 into the read_insn. This function generates a right SHIFT of a
1396 value that is at least ACCESS_SIZE bytes wide of READ_MODE. The
1397 shift sequence is returned or NULL if we failed to find a
1401 find_shift_sequence (rtx read_reg
,
1403 store_info_t store_info
,
1404 read_info_t read_info
,
1407 enum machine_mode store_mode
= GET_MODE (store_info
->mem
);
1408 enum machine_mode read_mode
= GET_MODE (read_info
->mem
);
1409 rtx chosen_seq
= NULL
;
1411 /* Some machines like the x86 have shift insns for each size of
1412 operand. Other machines like the ppc or the ia-64 may only have
1413 shift insns that shift values within 32 or 64 bit registers.
1414 This loop tries to find the smallest shift insn that will right
1415 justify the value we want to read but is available in one insn on
1418 for (; access_size
< UNITS_PER_WORD
; access_size
*= 2)
1420 rtx target
, new_reg
, shift_seq
, insn
;
1421 enum machine_mode new_mode
;
1424 /* Try a wider mode if truncating the store mode to ACCESS_SIZE
1425 bytes requires a real instruction. */
1426 if (access_size
< GET_MODE_SIZE (store_mode
)
1427 && !TRULY_NOOP_TRUNCATION (access_size
* BITS_PER_UNIT
,
1428 GET_MODE_BITSIZE (store_mode
)))
1431 new_mode
= smallest_mode_for_size (access_size
* BITS_PER_UNIT
,
1432 GET_MODE_CLASS (read_mode
));
1433 new_reg
= gen_reg_rtx (new_mode
);
1437 /* In theory we could also check for an ashr. Ian Taylor knows
1438 of one dsp where the cost of these two was not the same. But
1439 this really is a rare case anyway. */
1440 target
= expand_binop (new_mode
, lshr_optab
, new_reg
,
1441 GEN_INT (shift
), new_reg
, 1, OPTAB_DIRECT
);
1443 shift_seq
= get_insns ();
1446 if (target
!= new_reg
|| shift_seq
== NULL
)
1450 for (insn
= shift_seq
; insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
1452 cost
+= insn_rtx_cost (PATTERN (insn
));
1454 /* The computation up to here is essentially independent
1455 of the arguments and could be precomputed. It may
1456 not be worth doing so. We could precompute if
1457 worthwhile or at least cache the results. The result
1458 technically depends on SHIFT, ACCESS_SIZE, and
1459 GET_MODE_CLASS (READ_MODE). But in practice the
1460 answer will depend only on ACCESS_SIZE. */
1462 if (cost
> COSTS_N_INSNS (1))
1465 /* We found an acceptable shift. Generate a move to
1466 take the value from the store and put it into the
1467 shift pseudo, then shift it, then generate another
1468 move to put in into the target of the read. */
1470 emit_move_insn (new_reg
, gen_lowpart (new_mode
, store_info
->rhs
));
1471 emit_insn (shift_seq
);
1472 convert_move (read_reg
, new_reg
, 1);
1476 fprintf (dump_file
, " -- adding extract insn r%d:%s = r%d:%s\n",
1477 REGNO (new_reg
), GET_MODE_NAME (new_mode
),
1478 REGNO (store_info
->rhs
), GET_MODE_NAME (store_mode
));
1480 fprintf (dump_file
, " -- with shift of r%d by %d\n",
1481 REGNO(new_reg
), shift
);
1482 fprintf (dump_file
, " -- and second extract insn r%d:%s = r%d:%s\n",
1483 REGNO (read_reg
), GET_MODE_NAME (read_mode
),
1484 REGNO (new_reg
), GET_MODE_NAME (new_mode
));
1487 /* Get the three insn sequence and return it. */
1488 chosen_seq
= get_insns ();
1497 /* Take a sequence of:
1520 Depending on the alignment and the mode of the store and
1524 The STORE_INFO and STORE_INSN are for the store and READ_INFO
1525 and READ_INSN are for the read. Return true if the replacement
1529 replace_read (store_info_t store_info
, insn_info_t store_insn
,
1530 read_info_t read_info
, insn_info_t read_insn
, rtx
*loc
)
1532 enum machine_mode store_mode
= GET_MODE (store_info
->mem
);
1533 enum machine_mode read_mode
= GET_MODE (read_info
->mem
);
1535 int access_size
; /* In bytes. */
1536 rtx read_reg
= gen_reg_rtx (read_mode
);
1537 rtx shift_seq
= NULL
;
1542 if (GET_MODE_CLASS (read_mode
) != GET_MODE_CLASS (store_mode
))
1545 /* To get here the read is within the boundaries of the write so
1546 shift will never be negative. Start out with the shift being in
1548 if (BYTES_BIG_ENDIAN
)
1549 shift
= store_info
->end
- read_info
->end
;
1551 shift
= read_info
->begin
- store_info
->begin
;
1553 access_size
= shift
+ GET_MODE_SIZE (read_mode
);
1555 /* From now on it is bits. */
1556 shift
*= BITS_PER_UNIT
;
1558 /* We need to keep this in perspective. We are replacing a read
1559 with a sequence of insns, but the read will almost certainly be
1560 in cache, so it is not going to be an expensive one. Thus, we
1561 are not willing to do a multi insn shift or worse a subroutine
1562 call to get rid of the read. */
1565 if (access_size
> UNITS_PER_WORD
|| FLOAT_MODE_P (store_mode
))
1568 shift_seq
= find_shift_sequence (read_reg
, access_size
, store_info
,
1575 fprintf (dump_file
, "replacing load at %d from store at %d\n",
1576 INSN_UID (read_insn
->insn
), INSN_UID (store_insn
->insn
));
1578 if (validate_change (read_insn
->insn
, loc
, read_reg
, 0))
1581 deferred_change_t deferred_change
= pool_alloc (deferred_change_pool
);
1583 if (read_mode
== store_mode
)
1587 /* The modes are the same and everything lines up. Just
1588 generate a simple move. */
1589 emit_move_insn (read_reg
, store_info
->rhs
);
1591 fprintf (dump_file
, " -- adding move insn r%d = r%d\n",
1592 REGNO (read_reg
), REGNO (store_info
->rhs
));
1593 insns
= get_insns ();
1600 /* The modes are different but the lsb are in the same
1601 place, we need to extract the value in the right from the
1602 rhs of the store. */
1604 convert_move (read_reg
, store_info
->rhs
, 1);
1607 fprintf (dump_file
, " -- adding extract insn r%d:%s = r%d:%s\n",
1608 REGNO (read_reg
), GET_MODE_NAME (read_mode
),
1609 REGNO (store_info
->rhs
), GET_MODE_NAME (store_mode
));
1610 insns
= get_insns ();
1614 /* Insert this right before the store insn where it will be safe
1615 from later insns that might change it before the read. */
1616 emit_insn_before (insns
, store_insn
->insn
);
1618 /* And now for the kludge part: cselib croaks if you just
1619 return at this point. There are two reasons for this:
1621 1) Cselib has an idea of how many pseudos there are and
1622 that does not include the new ones we just added.
1624 2) Cselib does not know about the move insn we added
1625 above the store_info, and there is no way to tell it
1626 about it, because it has "moved on".
1628 Problem (1) is fixable with a certain amount of engineering.
1629 Problem (2) is requires starting the bb from scratch. This
1632 So we are just going to have to lie. The move/extraction
1633 insns are not really an issue, cselib did not see them. But
1634 the use of the new pseudo read_insn is a real problem because
1635 cselib has not scanned this insn. The way that we solve this
1636 problem is that we are just going to put the mem back for now
1637 and when we are finished with the block, we undo this. We
1638 keep a table of mems to get rid of. At the end of the basic
1639 block we can put them back. */
1641 *loc
= read_info
->mem
;
1642 deferred_change
->next
= deferred_change_list
;
1643 deferred_change_list
= deferred_change
;
1644 deferred_change
->loc
= loc
;
1645 deferred_change
->reg
= read_reg
;
1647 /* Get rid of the read_info, from the point of view of the
1648 rest of dse, play like this read never happened. */
1649 read_insn
->read_rec
= read_info
->next
;
1650 pool_free (read_info_pool
, read_info
);
1656 fprintf (dump_file
, " -- validation failure\n");
1661 /* A for_each_rtx callback in which DATA is the bb_info. Check to see
1662 if LOC is a mem and if it is look at the address and kill any
1663 appropriate stores that may be active. */
1666 check_mem_read_rtx (rtx
*loc
, void *data
)
1670 insn_info_t insn_info
;
1671 HOST_WIDE_INT offset
= 0;
1672 HOST_WIDE_INT width
= 0;
1673 alias_set_type spill_alias_set
= 0;
1674 cselib_val
*base
= NULL
;
1676 read_info_t read_info
;
1678 if (!mem
|| !MEM_P (mem
))
1681 bb_info
= (bb_info_t
) data
;
1682 insn_info
= bb_info
->last_insn
;
1684 if ((MEM_ALIAS_SET (mem
) == ALIAS_SET_MEMORY_BARRIER
)
1685 || (MEM_VOLATILE_P (mem
)))
1688 fprintf (dump_file
, " adding wild read, volatile or barrier.\n");
1689 add_wild_read (bb_info
);
1690 insn_info
->cannot_delete
= true;
1694 /* If it is reading readonly mem, then there can be no conflict with
1696 if (MEM_READONLY_P (mem
))
1699 if (!canon_address (mem
, &spill_alias_set
, &group_id
, &offset
, &base
))
1702 fprintf (dump_file
, " adding wild read, canon_address failure.\n");
1703 add_wild_read (bb_info
);
1707 if (GET_MODE (mem
) == BLKmode
)
1710 width
= GET_MODE_SIZE (GET_MODE (mem
));
1712 read_info
= pool_alloc (read_info_pool
);
1713 read_info
->group_id
= group_id
;
1714 read_info
->mem
= mem
;
1715 read_info
->alias_set
= spill_alias_set
;
1716 read_info
->begin
= offset
;
1717 read_info
->end
= offset
+ width
;
1718 read_info
->next
= insn_info
->read_rec
;
1719 insn_info
->read_rec
= read_info
;
1721 /* We ignore the clobbers in store_info. The is mildly aggressive,
1722 but there really should not be a clobber followed by a read. */
1724 if (spill_alias_set
)
1726 insn_info_t i_ptr
= active_local_stores
;
1727 insn_info_t last
= NULL
;
1730 fprintf (dump_file
, " processing spill load %d\n",
1731 (int) spill_alias_set
);
1735 store_info_t store_info
= i_ptr
->store_rec
;
1737 /* Skip the clobbers. */
1738 while (!store_info
->is_set
)
1739 store_info
= store_info
->next
;
1741 if (store_info
->alias_set
== spill_alias_set
)
1744 dump_insn_info ("removing from active", i_ptr
);
1747 last
->next_local_store
= i_ptr
->next_local_store
;
1749 active_local_stores
= i_ptr
->next_local_store
;
1753 i_ptr
= i_ptr
->next_local_store
;
1756 else if (group_id
>= 0)
1758 /* This is the restricted case where the base is a constant or
1759 the frame pointer and offset is a constant. */
1760 insn_info_t i_ptr
= active_local_stores
;
1761 insn_info_t last
= NULL
;
1766 fprintf (dump_file
, " processing const load gid=%d[BLK]\n",
1769 fprintf (dump_file
, " processing const load gid=%d[%d..%d)\n",
1770 group_id
, (int)offset
, (int)(offset
+width
));
1775 bool remove
= false;
1776 store_info_t store_info
= i_ptr
->store_rec
;
1778 /* Skip the clobbers. */
1779 while (!store_info
->is_set
)
1780 store_info
= store_info
->next
;
1782 /* There are three cases here. */
1783 if (store_info
->group_id
< 0)
1784 /* We have a cselib store followed by a read from a
1787 = canon_true_dependence (store_info
->mem
,
1788 GET_MODE (store_info
->mem
),
1789 store_info
->mem_addr
,
1792 else if (group_id
== store_info
->group_id
)
1794 /* This is a block mode load. We may get lucky and
1795 canon_true_dependence may save the day. */
1798 = canon_true_dependence (store_info
->mem
,
1799 GET_MODE (store_info
->mem
),
1800 store_info
->mem_addr
,
1803 /* If this read is just reading back something that we just
1804 stored, rewrite the read. */
1808 && (offset
>= store_info
->begin
)
1809 && (offset
+ width
<= store_info
->end
))
1811 int mask
= ((1L << width
) - 1) << (offset
- store_info
->begin
);
1813 if ((store_info
->positions_needed
& mask
) == mask
1814 && replace_read (store_info
, i_ptr
,
1815 read_info
, insn_info
, loc
))
1818 /* The bases are the same, just see if the offsets
1820 if ((offset
< store_info
->end
)
1821 && (offset
+ width
> store_info
->begin
))
1827 The else case that is missing here is that the
1828 bases are constant but different. There is nothing
1829 to do here because there is no overlap. */
1834 dump_insn_info ("removing from active", i_ptr
);
1837 last
->next_local_store
= i_ptr
->next_local_store
;
1839 active_local_stores
= i_ptr
->next_local_store
;
1843 i_ptr
= i_ptr
->next_local_store
;
1848 insn_info_t i_ptr
= active_local_stores
;
1849 insn_info_t last
= NULL
;
1852 fprintf (dump_file
, " processing cselib load mem:");
1853 print_inline_rtx (dump_file
, mem
, 0);
1854 fprintf (dump_file
, "\n");
1859 bool remove
= false;
1860 store_info_t store_info
= i_ptr
->store_rec
;
1863 fprintf (dump_file
, " processing cselib load against insn %d\n",
1864 INSN_UID (i_ptr
->insn
));
1866 /* Skip the clobbers. */
1867 while (!store_info
->is_set
)
1868 store_info
= store_info
->next
;
1870 /* If this read is just reading back something that we just
1871 stored, rewrite the read. */
1873 && store_info
->group_id
== -1
1874 && store_info
->cse_base
== base
1875 && (offset
>= store_info
->begin
)
1876 && (offset
+ width
<= store_info
->end
))
1878 int mask
= ((1L << width
) - 1) << (offset
- store_info
->begin
);
1880 if ((store_info
->positions_needed
& mask
) == mask
1881 && replace_read (store_info
, i_ptr
,
1882 read_info
, insn_info
, loc
))
1886 if (!store_info
->alias_set
)
1887 remove
= canon_true_dependence (store_info
->mem
,
1888 GET_MODE (store_info
->mem
),
1889 store_info
->mem_addr
,
1895 dump_insn_info ("removing from active", i_ptr
);
1898 last
->next_local_store
= i_ptr
->next_local_store
;
1900 active_local_stores
= i_ptr
->next_local_store
;
1904 i_ptr
= i_ptr
->next_local_store
;
1910 /* A for_each_rtx callback in which DATA points the INSN_INFO for
1911 as check_mem_read_rtx. Nullify the pointer if i_m_r_m_r returns
1912 true for any part of *LOC. */
1915 check_mem_read_use (rtx
*loc
, void *data
)
1917 for_each_rtx (loc
, check_mem_read_rtx
, data
);
1920 /* Apply record_store to all candidate stores in INSN. Mark INSN
1921 if some part of it is not a candidate store and assigns to a
1922 non-register target. */
1925 scan_insn (bb_info_t bb_info
, rtx insn
)
1928 insn_info_t insn_info
= pool_alloc (insn_info_pool
);
1930 memset (insn_info
, 0, sizeof (struct insn_info
));
1933 fprintf (dump_file
, "\n**scanning insn=%d\n",
1936 insn_info
->prev_insn
= bb_info
->last_insn
;
1937 insn_info
->insn
= insn
;
1938 bb_info
->last_insn
= insn_info
;
1941 /* Cselib clears the table for this case, so we have to essentially
1943 if (NONJUMP_INSN_P (insn
)
1944 && GET_CODE (PATTERN (insn
)) == ASM_OPERANDS
1945 && MEM_VOLATILE_P (PATTERN (insn
)))
1947 add_wild_read (bb_info
);
1948 insn_info
->cannot_delete
= true;
1952 /* Look at all of the uses in the insn. */
1953 note_uses (&PATTERN (insn
), check_mem_read_use
, bb_info
);
1957 insn_info
->cannot_delete
= true;
1959 /* Const functions cannot do anything bad i.e. read memory,
1960 however, they can read their parameters which may have
1961 been pushed onto the stack. */
1962 if (CONST_OR_PURE_CALL_P (insn
) && !pure_call_p (insn
))
1964 insn_info_t i_ptr
= active_local_stores
;
1965 insn_info_t last
= NULL
;
1968 fprintf (dump_file
, "const call %d\n", INSN_UID (insn
));
1972 /* Remove the stack pointer based stores. */
1973 if (i_ptr
->stack_pointer_based
)
1976 dump_insn_info ("removing from active", i_ptr
);
1979 last
->next_local_store
= i_ptr
->next_local_store
;
1981 active_local_stores
= i_ptr
->next_local_store
;
1985 i_ptr
= i_ptr
->next_local_store
;
1990 /* Every other call, including pure functions, may read memory. */
1991 add_wild_read (bb_info
);
1996 /* Assuming that there are sets in these insns, we cannot delete
1998 if ((GET_CODE (PATTERN (insn
)) == CLOBBER
)
1999 || volatile_refs_p (PATTERN (insn
))
2000 || (flag_non_call_exceptions
&& may_trap_p (PATTERN (insn
)))
2001 || (RTX_FRAME_RELATED_P (insn
))
2002 || find_reg_note (insn
, REG_FRAME_RELATED_EXPR
, NULL_RTX
))
2003 insn_info
->cannot_delete
= true;
2005 body
= PATTERN (insn
);
2006 if (GET_CODE (body
) == PARALLEL
)
2009 for (i
= 0; i
< XVECLEN (body
, 0); i
++)
2010 mems_found
+= record_store (XVECEXP (body
, 0, i
), bb_info
);
2013 mems_found
+= record_store (body
, bb_info
);
2016 fprintf (dump_file
, "mems_found = %d, cannot_delete = %s\n",
2017 mems_found
, insn_info
->cannot_delete
? "true" : "false");
2019 /* If we found some sets of mems, and the insn has not been marked
2020 cannot delete, add it into the active_local_stores so that it can
2021 be locally deleted if found dead. Otherwise mark it as cannot
2022 delete. This simplifies the processing later. */
2023 if (mems_found
== 1 && !insn_info
->cannot_delete
)
2025 insn_info
->next_local_store
= active_local_stores
;
2026 active_local_stores
= insn_info
;
2029 insn_info
->cannot_delete
= true;
2033 /* Remove BASE from the set of active_local_stores. This is a
2034 callback from cselib that is used to get rid of the stores in
2035 active_local_stores. */
2038 remove_useless_values (cselib_val
*base
)
2040 insn_info_t insn_info
= active_local_stores
;
2041 insn_info_t last
= NULL
;
2045 store_info_t store_info
= insn_info
->store_rec
;
2046 bool delete = false;
2048 /* If ANY of the store_infos match the cselib group that is
2049 being deleted, then the insn can not be deleted. */
2052 if ((store_info
->group_id
== -1)
2053 && (store_info
->cse_base
== base
))
2058 store_info
= store_info
->next
;
2064 last
->next_local_store
= insn_info
->next_local_store
;
2066 active_local_stores
= insn_info
->next_local_store
;
2067 free_store_info (insn_info
);
2072 insn_info
= insn_info
->next_local_store
;
2077 /* Do all of step 1. */
2084 cselib_init (false);
2085 all_blocks
= BITMAP_ALLOC (NULL
);
2086 bitmap_set_bit (all_blocks
, ENTRY_BLOCK
);
2087 bitmap_set_bit (all_blocks
, EXIT_BLOCK
);
2092 bb_info_t bb_info
= pool_alloc (bb_info_pool
);
2094 memset (bb_info
, 0, sizeof (struct bb_info
));
2095 bitmap_set_bit (all_blocks
, bb
->index
);
2097 bb_table
[bb
->index
] = bb_info
;
2098 cselib_discard_hook
= remove_useless_values
;
2100 if (bb
->index
>= NUM_FIXED_BLOCKS
)
2105 = create_alloc_pool ("cse_store_info_pool",
2106 sizeof (struct store_info
), 100);
2107 active_local_stores
= NULL
;
2108 cselib_clear_table ();
2110 /* Scan the insns. */
2111 FOR_BB_INSNS (bb
, insn
)
2114 scan_insn (bb_info
, insn
);
2115 cselib_process_insn (insn
);
2118 /* This is something of a hack, because the global algorithm
2119 is supposed to take care of the case where stores go dead
2120 at the end of the function. However, the global
2121 algorithm must take a more conservative view of block
2122 mode reads than the local alg does. So to get the case
2123 where you have a store to the frame followed by a non
2124 overlapping block more read, we look at the active local
2125 stores at the end of the function and delete all of the
2126 frame and spill based ones. */
2127 if (stores_off_frame_dead_at_return
2128 && (EDGE_COUNT (bb
->succs
) == 0
2129 || (single_succ_p (bb
)
2130 && single_succ (bb
) == EXIT_BLOCK_PTR
2131 && ! current_function_calls_eh_return
)))
2133 insn_info_t i_ptr
= active_local_stores
;
2136 store_info_t store_info
= i_ptr
->store_rec
;
2138 /* Skip the clobbers. */
2139 while (!store_info
->is_set
)
2140 store_info
= store_info
->next
;
2141 if (store_info
->alias_set
)
2142 delete_dead_store_insn (i_ptr
);
2144 if (store_info
->group_id
>= 0)
2147 = VEC_index (group_info_t
, rtx_group_vec
, store_info
->group_id
);
2148 if (group
->frame_related
)
2149 delete_dead_store_insn (i_ptr
);
2152 i_ptr
= i_ptr
->next_local_store
;
2156 /* Get rid of the loads that were discovered in
2157 replace_read. Cselib is finished with this block. */
2158 while (deferred_change_list
)
2160 deferred_change_t next
= deferred_change_list
->next
;
2162 /* There is no reason to validate this change. That was
2164 *deferred_change_list
->loc
= deferred_change_list
->reg
;
2165 pool_free (deferred_change_pool
, deferred_change_list
);
2166 deferred_change_list
= next
;
2169 /* Get rid of all of the cselib based store_infos in this
2170 block and mark the containing insns as not being
2172 ptr
= bb_info
->last_insn
;
2175 if (ptr
->contains_cselib_groups
)
2176 free_store_info (ptr
);
2177 ptr
= ptr
->prev_insn
;
2180 free_alloc_pool (cse_store_info_pool
);
2185 htab_empty (rtx_group_table
);
2189 /*----------------------------------------------------------------------------
2192 Assign each byte position in the stores that we are going to
2193 analyze globally to a position in the bitmaps. Returns true if
2194 there are any bit positions assigned.
2195 ----------------------------------------------------------------------------*/
2198 dse_step2_init (void)
2203 for (i
= 0; VEC_iterate (group_info_t
, rtx_group_vec
, i
, group
); i
++)
2205 /* For all non stack related bases, we only consider a store to
2206 be deletable if there are two or more stores for that
2207 position. This is because it takes one store to make the
2208 other store redundant. However, for the stores that are
2209 stack related, we consider them if there is only one store
2210 for the position. We do this because the stack related
2211 stores can be deleted if their is no read between them and
2212 the end of the function.
2214 To make this work in the current framework, we take the stack
2215 related bases add all of the bits from store1 into store2.
2216 This has the effect of making the eligible even if there is
2219 if (stores_off_frame_dead_at_return
&& group
->frame_related
)
2221 bitmap_ior_into (group
->store2_n
, group
->store1_n
);
2222 bitmap_ior_into (group
->store2_p
, group
->store1_p
);
2224 fprintf (dump_file
, "group %d is frame related ", i
);
2227 group
->offset_map_size_n
++;
2228 group
->offset_map_n
= XNEWVEC (int, group
->offset_map_size_n
);
2229 group
->offset_map_size_p
++;
2230 group
->offset_map_p
= XNEWVEC (int, group
->offset_map_size_p
);
2231 group
->process_globally
= false;
2234 fprintf (dump_file
, "group %d(%d+%d): ", i
,
2235 (int)bitmap_count_bits (group
->store2_n
),
2236 (int)bitmap_count_bits (group
->store2_p
));
2237 bitmap_print (dump_file
, group
->store2_n
, "n ", " ");
2238 bitmap_print (dump_file
, group
->store2_p
, "p ", "\n");
2244 /* Init the offset tables for the normal case. */
2247 dse_step2_nospill (void)
2251 /* Position 0 is unused because 0 is used in the maps to mean
2253 current_position
= 1;
2255 for (i
= 0; VEC_iterate (group_info_t
, rtx_group_vec
, i
, group
); i
++)
2260 if (group
== clear_alias_group
)
2263 memset (group
->offset_map_n
, 0, sizeof(int) * group
->offset_map_size_n
);
2264 memset (group
->offset_map_p
, 0, sizeof(int) * group
->offset_map_size_p
);
2265 bitmap_clear (group
->group_kill
);
2267 EXECUTE_IF_SET_IN_BITMAP (group
->store2_n
, 0, j
, bi
)
2269 bitmap_set_bit (group
->group_kill
, current_position
);
2270 group
->offset_map_n
[j
] = current_position
++;
2271 group
->process_globally
= true;
2273 EXECUTE_IF_SET_IN_BITMAP (group
->store2_p
, 0, j
, bi
)
2275 bitmap_set_bit (group
->group_kill
, current_position
);
2276 group
->offset_map_p
[j
] = current_position
++;
2277 group
->process_globally
= true;
2280 return current_position
!= 1;
2284 /* Init the offset tables for the spill case. */
2287 dse_step2_spill (void)
2290 group_info_t group
= clear_alias_group
;
2293 /* Position 0 is unused because 0 is used in the maps to mean
2295 current_position
= 1;
2299 bitmap_print (dump_file
, clear_alias_sets
,
2300 "clear alias sets ", "\n");
2301 bitmap_print (dump_file
, disqualified_clear_alias_sets
,
2302 "disqualified clear alias sets ", "\n");
2305 memset (group
->offset_map_n
, 0, sizeof(int) * group
->offset_map_size_n
);
2306 memset (group
->offset_map_p
, 0, sizeof(int) * group
->offset_map_size_p
);
2307 bitmap_clear (group
->group_kill
);
2309 /* Remove the disqualified positions from the store2_p set. */
2310 bitmap_and_compl_into (group
->store2_p
, disqualified_clear_alias_sets
);
2312 /* We do not need to process the store2_n set because
2313 alias_sets are always positive. */
2314 EXECUTE_IF_SET_IN_BITMAP (group
->store2_p
, 0, j
, bi
)
2316 bitmap_set_bit (group
->group_kill
, current_position
);
2317 group
->offset_map_p
[j
] = current_position
++;
2318 group
->process_globally
= true;
2321 return current_position
!= 1;
2326 /*----------------------------------------------------------------------------
2329 Build the bit vectors for the transfer functions.
2330 ----------------------------------------------------------------------------*/
2333 /* Note that this is NOT a general purpose function. Any mem that has
2334 an alias set registered here expected to be COMPLETELY unaliased:
2335 i.e it's addresses are not and need not be examined.
2337 It is known that all references to this address will have this
2338 alias set and there are NO other references to this address in the
2341 Currently the only place that is known to be clean enough to use
2342 this interface is the code that assigns the spill locations.
2344 All of the mems that have alias_sets registered are subjected to a
2345 very powerful form of dse where function calls, volatile reads and
2346 writes, and reads from random location are not taken into account.
2348 It is also assumed that these locations go dead when the function
2349 returns. This assumption could be relaxed if there were found to
2350 be places that this assumption was not correct.
2352 The MODE is passed in and saved. The mode of each load or store to
2353 a mem with ALIAS_SET is checked against MEM. If the size of that
2354 load or store is different from MODE, processing is halted on this
2355 alias set. For the vast majority of aliases sets, all of the loads
2356 and stores will use the same mode. But vectors are treated
2357 differently: the alias set is established for the entire vector,
2358 but reload will insert loads and stores for individual elements and
2359 we do not necessarily have the information to track those separate
2360 elements. So when we see a mode mismatch, we just bail. */
2364 dse_record_singleton_alias_set (alias_set_type alias_set
,
2365 enum machine_mode mode
)
2367 struct clear_alias_mode_holder tmp_holder
;
2368 struct clear_alias_mode_holder
*entry
;
2371 /* If we are not going to run dse, we need to return now or there
2372 will be problems with allocating the bitmaps. */
2373 if ((!gate_dse()) || !alias_set
)
2376 if (!clear_alias_sets
)
2378 clear_alias_sets
= BITMAP_ALLOC (NULL
);
2379 disqualified_clear_alias_sets
= BITMAP_ALLOC (NULL
);
2380 clear_alias_mode_table
= htab_create (11, clear_alias_mode_hash
,
2381 clear_alias_mode_eq
, NULL
);
2382 clear_alias_mode_pool
= create_alloc_pool ("clear_alias_mode_pool",
2383 sizeof (struct clear_alias_mode_holder
), 100);
2386 bitmap_set_bit (clear_alias_sets
, alias_set
);
2388 tmp_holder
.alias_set
= alias_set
;
2390 slot
= htab_find_slot (clear_alias_mode_table
, &tmp_holder
, INSERT
);
2391 gcc_assert (*slot
== NULL
);
2393 *slot
= entry
= pool_alloc (clear_alias_mode_pool
);
2394 entry
->alias_set
= alias_set
;
2399 /* Remove ALIAS_SET from the sets of stack slots being considered. */
2402 dse_invalidate_singleton_alias_set (alias_set_type alias_set
)
2404 if ((!gate_dse()) || !alias_set
)
2407 bitmap_clear_bit (clear_alias_sets
, alias_set
);
2411 /* Look up the bitmap index for OFFSET in GROUP_INFO. If it is not
2415 get_bitmap_index (group_info_t group_info
, HOST_WIDE_INT offset
)
2419 HOST_WIDE_INT offset_p
= -offset
;
2420 if (offset_p
>= group_info
->offset_map_size_n
)
2422 return group_info
->offset_map_n
[offset_p
];
2426 if (offset
>= group_info
->offset_map_size_p
)
2428 return group_info
->offset_map_p
[offset
];
2433 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2437 scan_stores_nospill (store_info_t store_info
, bitmap gen
, bitmap kill
)
2442 group_info_t group_info
2443 = VEC_index (group_info_t
, rtx_group_vec
, store_info
->group_id
);
2444 if (group_info
->process_globally
)
2445 for (i
= store_info
->begin
; i
< store_info
->end
; i
++)
2447 int index
= get_bitmap_index (group_info
, i
);
2450 bitmap_set_bit (gen
, index
);
2452 bitmap_clear_bit (kill
, index
);
2455 store_info
= store_info
->next
;
2460 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2464 scan_stores_spill (store_info_t store_info
, bitmap gen
, bitmap kill
)
2468 if (store_info
->alias_set
)
2470 int index
= get_bitmap_index (clear_alias_group
,
2471 store_info
->alias_set
);
2474 bitmap_set_bit (gen
, index
);
2476 bitmap_clear_bit (kill
, index
);
2479 store_info
= store_info
->next
;
2484 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2488 scan_reads_nospill (insn_info_t insn_info
, bitmap gen
, bitmap kill
)
2490 read_info_t read_info
= insn_info
->read_rec
;
2496 for (i
= 0; VEC_iterate (group_info_t
, rtx_group_vec
, i
, group
); i
++)
2498 if (group
->process_globally
)
2500 if (i
== read_info
->group_id
)
2502 if (read_info
->begin
> read_info
->end
)
2504 /* Begin > end for block mode reads. */
2506 bitmap_ior_into (kill
, group
->group_kill
);
2507 bitmap_and_compl_into (gen
, group
->group_kill
);
2511 /* The groups are the same, just process the
2514 for (j
= read_info
->begin
; j
< read_info
->end
; j
++)
2516 int index
= get_bitmap_index (group
, j
);
2520 bitmap_set_bit (kill
, index
);
2521 bitmap_clear_bit (gen
, index
);
2528 /* The groups are different, if the alias sets
2529 conflict, clear the entire group. We only need
2530 to apply this test if the read_info is a cselib
2531 read. Anything with a constant base cannot alias
2532 something else with a different constant
2534 if ((read_info
->group_id
< 0)
2535 && canon_true_dependence (group
->base_mem
,
2537 group
->canon_base_mem
,
2538 read_info
->mem
, rtx_varies_p
))
2541 bitmap_ior_into (kill
, group
->group_kill
);
2542 bitmap_and_compl_into (gen
, group
->group_kill
);
2548 read_info
= read_info
->next
;
2552 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2556 scan_reads_spill (read_info_t read_info
, bitmap gen
, bitmap kill
)
2560 if (read_info
->alias_set
)
2562 int index
= get_bitmap_index (clear_alias_group
,
2563 read_info
->alias_set
);
2567 bitmap_set_bit (kill
, index
);
2568 bitmap_clear_bit (gen
, index
);
2572 read_info
= read_info
->next
;
2577 /* Return the insn in BB_INFO before the first wild read or if there
2578 are no wild reads in the block, return the last insn. */
2581 find_insn_before_first_wild_read (bb_info_t bb_info
)
2583 insn_info_t insn_info
= bb_info
->last_insn
;
2584 insn_info_t last_wild_read
= NULL
;
2588 if (insn_info
->wild_read
)
2590 last_wild_read
= insn_info
->prev_insn
;
2591 /* Block starts with wild read. */
2592 if (!last_wild_read
)
2596 insn_info
= insn_info
->prev_insn
;
2600 return last_wild_read
;
2602 return bb_info
->last_insn
;
2606 /* Scan the insns in BB_INFO starting at PTR and going to the top of
2607 the block in order to build the gen and kill sets for the block.
2608 We start at ptr which may be the last insn in the block or may be
2609 the first insn with a wild read. In the latter case we are able to
2610 skip the rest of the block because it just does not matter:
2611 anything that happens is hidden by the wild read. */
2614 dse_step3_scan (bool for_spills
, basic_block bb
)
2616 bb_info_t bb_info
= bb_table
[bb
->index
];
2617 insn_info_t insn_info
;
2620 /* There are no wild reads in the spill case. */
2621 insn_info
= bb_info
->last_insn
;
2623 insn_info
= find_insn_before_first_wild_read (bb_info
);
2625 /* In the spill case or in the no_spill case if there is no wild
2626 read in the block, we will need a kill set. */
2627 if (insn_info
== bb_info
->last_insn
)
2630 bitmap_clear (bb_info
->kill
);
2632 bb_info
->kill
= BITMAP_ALLOC (NULL
);
2636 BITMAP_FREE (bb_info
->kill
);
2640 /* There may have been code deleted by the dce pass run before
2642 if (insn_info
->insn
&& INSN_P (insn_info
->insn
))
2644 /* Process the read(s) last. */
2647 scan_stores_spill (insn_info
->store_rec
, bb_info
->gen
, bb_info
->kill
);
2648 scan_reads_spill (insn_info
->read_rec
, bb_info
->gen
, bb_info
->kill
);
2652 scan_stores_nospill (insn_info
->store_rec
, bb_info
->gen
, bb_info
->kill
);
2653 scan_reads_nospill (insn_info
, bb_info
->gen
, bb_info
->kill
);
2657 insn_info
= insn_info
->prev_insn
;
2662 /* Set the gen set of the exit block, and also any block with no
2663 successors that does not have a wild read. */
2666 dse_step3_exit_block_scan (bb_info_t bb_info
)
2668 /* The gen set is all 0's for the exit block except for the
2669 frame_pointer_group. */
2671 if (stores_off_frame_dead_at_return
)
2676 for (i
= 0; VEC_iterate (group_info_t
, rtx_group_vec
, i
, group
); i
++)
2678 if (group
->process_globally
&& group
->frame_related
)
2679 bitmap_ior_into (bb_info
->gen
, group
->group_kill
);
2685 /* Find all of the blocks that are not backwards reachable from the
2686 exit block or any block with no successors (BB). These are the
2687 infinite loops or infinite self loops. These blocks will still
2688 have their bits set in UNREACHABLE_BLOCKS. */
2691 mark_reachable_blocks (sbitmap unreachable_blocks
, basic_block bb
)
2696 if (TEST_BIT (unreachable_blocks
, bb
->index
))
2698 RESET_BIT (unreachable_blocks
, bb
->index
);
2699 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2701 mark_reachable_blocks (unreachable_blocks
, e
->src
);
2706 /* Build the transfer functions for the function. */
2709 dse_step3 (bool for_spills
)
2712 sbitmap unreachable_blocks
= sbitmap_alloc (last_basic_block
);
2713 sbitmap_iterator sbi
;
2714 bitmap all_ones
= NULL
;
2717 sbitmap_ones (unreachable_blocks
);
2721 bb_info_t bb_info
= bb_table
[bb
->index
];
2723 bitmap_clear (bb_info
->gen
);
2725 bb_info
->gen
= BITMAP_ALLOC (NULL
);
2727 if (bb
->index
== ENTRY_BLOCK
)
2729 else if (bb
->index
== EXIT_BLOCK
)
2730 dse_step3_exit_block_scan (bb_info
);
2732 dse_step3_scan (for_spills
, bb
);
2733 if (EDGE_COUNT (bb
->succs
) == 0)
2734 mark_reachable_blocks (unreachable_blocks
, bb
);
2736 /* If this is the second time dataflow is run, delete the old
2739 BITMAP_FREE (bb_info
->in
);
2741 BITMAP_FREE (bb_info
->out
);
2744 /* For any block in an infinite loop, we must initialize the out set
2745 to all ones. This could be expensive, but almost never occurs in
2746 practice. However, it is common in regression tests. */
2747 EXECUTE_IF_SET_IN_SBITMAP (unreachable_blocks
, 0, i
, sbi
)
2749 if (bitmap_bit_p (all_blocks
, i
))
2751 bb_info_t bb_info
= bb_table
[i
];
2757 all_ones
= BITMAP_ALLOC (NULL
);
2758 for (j
= 0; VEC_iterate (group_info_t
, rtx_group_vec
, j
, group
); j
++)
2759 bitmap_ior_into (all_ones
, group
->group_kill
);
2763 bb_info
->out
= BITMAP_ALLOC (NULL
);
2764 bitmap_copy (bb_info
->out
, all_ones
);
2770 BITMAP_FREE (all_ones
);
2771 sbitmap_free (unreachable_blocks
);
2776 /*----------------------------------------------------------------------------
2779 Solve the bitvector equations.
2780 ----------------------------------------------------------------------------*/
2783 /* Confluence function for blocks with no successors. Create an out
2784 set from the gen set of the exit block. This block logically has
2785 the exit block as a successor. */
2790 dse_confluence_0 (basic_block bb
)
2792 bb_info_t bb_info
= bb_table
[bb
->index
];
2794 if (bb
->index
== EXIT_BLOCK
)
2799 bb_info
->out
= BITMAP_ALLOC (NULL
);
2800 bitmap_copy (bb_info
->out
, bb_table
[EXIT_BLOCK
]->gen
);
2804 /* Propagate the information from the in set of the dest of E to the
2805 out set of the src of E. If the various in or out sets are not
2806 there, that means they are all ones. */
2809 dse_confluence_n (edge e
)
2811 bb_info_t src_info
= bb_table
[e
->src
->index
];
2812 bb_info_t dest_info
= bb_table
[e
->dest
->index
];
2817 bitmap_and_into (src_info
->out
, dest_info
->in
);
2820 src_info
->out
= BITMAP_ALLOC (NULL
);
2821 bitmap_copy (src_info
->out
, dest_info
->in
);
2827 /* Propagate the info from the out to the in set of BB_INDEX's basic
2828 block. There are three cases:
2830 1) The block has no kill set. In this case the kill set is all
2831 ones. It does not matter what the out set of the block is, none of
2832 the info can reach the top. The only thing that reaches the top is
2833 the gen set and we just copy the set.
2835 2) There is a kill set but no out set and bb has successors. In
2836 this case we just return. Eventually an out set will be created and
2837 it is better to wait than to create a set of ones.
2839 3) There is both a kill and out set. We apply the obvious transfer
2844 dse_transfer_function (int bb_index
)
2846 bb_info_t bb_info
= bb_table
[bb_index
];
2854 return bitmap_ior_and_compl (bb_info
->in
, bb_info
->gen
,
2855 bb_info
->out
, bb_info
->kill
);
2858 bb_info
->in
= BITMAP_ALLOC (NULL
);
2859 bitmap_ior_and_compl (bb_info
->in
, bb_info
->gen
,
2860 bb_info
->out
, bb_info
->kill
);
2870 /* Case 1 above. If there is already an in set, nothing
2876 bb_info
->in
= BITMAP_ALLOC (NULL
);
2877 bitmap_copy (bb_info
->in
, bb_info
->gen
);
2883 /* Solve the dataflow equations. */
2888 df_simple_dataflow (DF_BACKWARD
, NULL
, dse_confluence_0
,
2889 dse_confluence_n
, dse_transfer_function
,
2890 all_blocks
, df_get_postorder (DF_BACKWARD
),
2891 df_get_n_blocks (DF_BACKWARD
));
2896 fprintf (dump_file
, "\n\n*** Global dataflow info after analysis.\n");
2899 bb_info_t bb_info
= bb_table
[bb
->index
];
2901 df_print_bb_index (bb
, dump_file
);
2903 bitmap_print (dump_file
, bb_info
->in
, " in: ", "\n");
2905 fprintf (dump_file
, " in: *MISSING*\n");
2907 bitmap_print (dump_file
, bb_info
->gen
, " gen: ", "\n");
2909 fprintf (dump_file
, " gen: *MISSING*\n");
2911 bitmap_print (dump_file
, bb_info
->kill
, " kill: ", "\n");
2913 fprintf (dump_file
, " kill: *MISSING*\n");
2915 bitmap_print (dump_file
, bb_info
->out
, " out: ", "\n");
2917 fprintf (dump_file
, " out: *MISSING*\n\n");
2924 /*----------------------------------------------------------------------------
2927 Delete the stores that can only be deleted using the global information.
2928 ----------------------------------------------------------------------------*/
2932 dse_step5_nospill (void)
2937 bb_info_t bb_info
= bb_table
[bb
->index
];
2938 insn_info_t insn_info
= bb_info
->last_insn
;
2939 bitmap v
= bb_info
->out
;
2943 bool deleted
= false;
2944 if (dump_file
&& insn_info
->insn
)
2946 fprintf (dump_file
, "starting to process insn %d\n",
2947 INSN_UID (insn_info
->insn
));
2948 bitmap_print (dump_file
, v
, " v: ", "\n");
2951 /* There may have been code deleted by the dce pass run before
2954 && INSN_P (insn_info
->insn
)
2955 && (!insn_info
->cannot_delete
)
2956 && (!bitmap_empty_p (v
)))
2958 store_info_t store_info
= insn_info
->store_rec
;
2960 /* Try to delete the current insn. */
2963 /* Skip the clobbers. */
2964 while (!store_info
->is_set
)
2965 store_info
= store_info
->next
;
2967 if (store_info
->alias_set
)
2972 group_info_t group_info
2973 = VEC_index (group_info_t
, rtx_group_vec
, store_info
->group_id
);
2975 for (i
= store_info
->begin
; i
< store_info
->end
; i
++)
2977 int index
= get_bitmap_index (group_info
, i
);
2980 fprintf (dump_file
, "i = %d, index = %d\n", (int)i
, index
);
2981 if (index
== 0 || !bitmap_bit_p (v
, index
))
2984 fprintf (dump_file
, "failing at i = %d\n", (int)i
);
2994 check_for_inc_dec (insn_info
->insn
);
2995 delete_insn (insn_info
->insn
);
2996 insn_info
->insn
= NULL
;
3001 /* We do want to process the local info if the insn was
3002 deleted. For instance, if the insn did a wild read, we
3003 no longer need to trash the info. */
3005 && INSN_P (insn_info
->insn
)
3008 scan_stores_nospill (insn_info
->store_rec
, v
, NULL
);
3009 if (insn_info
->wild_read
)
3012 fprintf (dump_file
, "wild read\n");
3015 else if (insn_info
->read_rec
)
3018 fprintf (dump_file
, "regular read\n");
3019 scan_reads_nospill (insn_info
, v
, NULL
);
3023 insn_info
= insn_info
->prev_insn
;
3030 dse_step5_spill (void)
3035 bb_info_t bb_info
= bb_table
[bb
->index
];
3036 insn_info_t insn_info
= bb_info
->last_insn
;
3037 bitmap v
= bb_info
->out
;
3041 bool deleted
= false;
3042 /* There may have been code deleted by the dce pass run before
3045 && INSN_P (insn_info
->insn
)
3046 && (!insn_info
->cannot_delete
)
3047 && (!bitmap_empty_p (v
)))
3049 /* Try to delete the current insn. */
3050 store_info_t store_info
= insn_info
->store_rec
;
3055 if (store_info
->alias_set
)
3057 int index
= get_bitmap_index (clear_alias_group
,
3058 store_info
->alias_set
);
3059 if (index
== 0 || !bitmap_bit_p (v
, index
))
3067 store_info
= store_info
->next
;
3069 if (deleted
&& dbg_cnt (dse
))
3072 fprintf (dump_file
, "Spill deleting insn %d\n",
3073 INSN_UID (insn_info
->insn
));
3074 check_for_inc_dec (insn_info
->insn
);
3075 delete_insn (insn_info
->insn
);
3077 insn_info
->insn
= NULL
;
3082 && INSN_P (insn_info
->insn
)
3085 scan_stores_spill (insn_info
->store_rec
, v
, NULL
);
3086 scan_reads_spill (insn_info
->read_rec
, v
, NULL
);
3089 insn_info
= insn_info
->prev_insn
;
3096 /*----------------------------------------------------------------------------
3099 Destroy everything left standing.
3100 ----------------------------------------------------------------------------*/
3103 dse_step6 (bool global_done
)
3111 for (i
= 0; VEC_iterate (group_info_t
, rtx_group_vec
, i
, group
); i
++)
3113 free (group
->offset_map_n
);
3114 free (group
->offset_map_p
);
3115 BITMAP_FREE (group
->store1_n
);
3116 BITMAP_FREE (group
->store1_p
);
3117 BITMAP_FREE (group
->store2_n
);
3118 BITMAP_FREE (group
->store2_p
);
3119 BITMAP_FREE (group
->group_kill
);
3124 bb_info_t bb_info
= bb_table
[bb
->index
];
3125 BITMAP_FREE (bb_info
->gen
);
3127 BITMAP_FREE (bb_info
->kill
);
3129 BITMAP_FREE (bb_info
->in
);
3131 BITMAP_FREE (bb_info
->out
);
3136 for (i
= 0; VEC_iterate (group_info_t
, rtx_group_vec
, i
, group
); i
++)
3138 BITMAP_FREE (group
->store1_n
);
3139 BITMAP_FREE (group
->store1_p
);
3140 BITMAP_FREE (group
->store2_n
);
3141 BITMAP_FREE (group
->store2_p
);
3142 BITMAP_FREE (group
->group_kill
);
3146 if (clear_alias_sets
)
3148 BITMAP_FREE (clear_alias_sets
);
3149 BITMAP_FREE (disqualified_clear_alias_sets
);
3150 free_alloc_pool (clear_alias_mode_pool
);
3151 htab_delete (clear_alias_mode_table
);
3154 end_alias_analysis ();
3156 htab_delete (rtx_group_table
);
3157 VEC_free (group_info_t
, heap
, rtx_group_vec
);
3158 BITMAP_FREE (all_blocks
);
3159 BITMAP_FREE (scratch
);
3161 free_alloc_pool (rtx_store_info_pool
);
3162 free_alloc_pool (read_info_pool
);
3163 free_alloc_pool (insn_info_pool
);
3164 free_alloc_pool (bb_info_pool
);
3165 free_alloc_pool (rtx_group_info_pool
);
3166 free_alloc_pool (deferred_change_pool
);
3171 /* -------------------------------------------------------------------------
3173 ------------------------------------------------------------------------- */
3175 /* Callback for running pass_rtl_dse. */
3178 rest_of_handle_dse (void)
3180 bool did_global
= false;
3182 df_set_flags (DF_DEFER_INSN_RESCAN
);
3187 if (dse_step2_nospill ())
3189 df_set_flags (DF_LR_RUN_DCE
);
3193 fprintf (dump_file
, "doing global processing\n");
3196 dse_step5_nospill ();
3199 /* For the instance of dse that runs after reload, we make a special
3200 pass to process the spills. These are special in that they are
3201 totally transparent, i.e, there is no aliasing issues that need
3202 to be considered. This means that the wild reads that kill
3203 everything else do not apply here. */
3204 if (clear_alias_sets
&& dse_step2_spill ())
3208 df_set_flags (DF_LR_RUN_DCE
);
3213 fprintf (dump_file
, "doing global spill processing\n");
3219 dse_step6 (did_global
);
3222 fprintf (dump_file
, "dse: local deletions = %d, global deletions = %d, spill deletions = %d\n",
3223 locally_deleted
, globally_deleted
, spill_deleted
);
3230 return optimize
> 0 && flag_dse
;
3233 struct tree_opt_pass pass_rtl_dse1
=
3236 gate_dse
, /* gate */
3237 rest_of_handle_dse
, /* execute */
3240 0, /* static_pass_number */
3241 TV_DSE1
, /* tv_id */
3242 0, /* properties_required */
3243 0, /* properties_provided */
3244 0, /* properties_destroyed */
3245 0, /* todo_flags_start */
3247 TODO_df_finish
| TODO_verify_rtl_sharing
|
3248 TODO_ggc_collect
, /* todo_flags_finish */
3252 struct tree_opt_pass pass_rtl_dse2
=
3255 gate_dse
, /* gate */
3256 rest_of_handle_dse
, /* execute */
3259 0, /* static_pass_number */
3260 TV_DSE2
, /* tv_id */
3261 0, /* properties_required */
3262 0, /* properties_provided */
3263 0, /* properties_destroyed */
3264 0, /* todo_flags_start */
3266 TODO_df_finish
| TODO_verify_rtl_sharing
|
3267 TODO_ggc_collect
, /* todo_flags_finish */