1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The purpose of the store merging pass is to combine multiple memory
22 stores of constant values, values loaded from memory or bitwise operations
23 on those to consecutive memory locations into fewer wider stores.
24 For example, if we have a sequence peforming four byte stores to
25 consecutive memory locations:
30 we can transform this into a single 4-byte store if the target supports it:
31 [p] := imm1:imm2:imm3:imm4 //concatenated immediates according to endianness.
38 if there is no overlap can be transformed into a single 4-byte
39 load followed by single 4-byte store.
43 [p + 1B] := [q + 1B] ^ imm2;
44 [p + 2B] := [q + 2B] ^ imm3;
45 [p + 3B] := [q + 3B] ^ imm4;
46 if there is no overlap can be transformed into a single 4-byte
47 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
49 The algorithm is applied to each basic block in three phases:
51 1) Scan through the basic block recording assignments to
52 destinations that can be expressed as a store to memory of a certain size
53 at a certain bit offset from expressions we can handle. For bit-fields
54 we also note the surrounding bit region, bits that could be stored in
55 a read-modify-write operation when storing the bit-field. Record store
56 chains to different bases in a hash_map (m_stores) and make sure to
57 terminate such chains when appropriate (for example when when the stored
58 values get used subsequently).
59 These stores can be a result of structure element initializers, array stores
60 etc. A store_immediate_info object is recorded for every such store.
61 Record as many such assignments to a single base as possible until a
62 statement that interferes with the store sequence is encountered.
63 Each store has up to 2 operands, which can be an immediate constant
64 or a memory load, from which the value to be stored can be computed.
65 At most one of the operands can be a constant. The operands are recorded
66 in store_operand_info struct.
68 2) Analyze the chain of stores recorded in phase 1) (i.e. the vector of
69 store_immediate_info objects) and coalesce contiguous stores into
70 merged_store_group objects. For bit-fields stores, we don't need to
71 require the stores to be contiguous, just their surrounding bit regions
72 have to be contiguous. If the expression being stored is different
73 between adjacent stores, such as one store storing a constant and
74 following storing a value loaded from memory, or if the loaded memory
75 objects are not adjacent, a new merged_store_group is created as well.
77 For example, given the stores:
84 This phase would produce two merged_store_group objects, one recording the
85 two bytes stored in the memory region [p : p + 1] and another
86 recording the four bytes stored in the memory region [p + 3 : p + 6].
88 3) The merged_store_group objects produced in phase 2) are processed
89 to generate the sequence of wider stores that set the contiguous memory
90 regions to the sequence of bytes that correspond to it. This may emit
91 multiple stores per store group to handle contiguous stores that are not
92 of a size that is a power of 2. For example it can try to emit a 40-bit
93 store as a 32-bit store followed by an 8-bit store.
94 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT or
95 TARGET_SLOW_UNALIGNED_ACCESS rules.
97 Note on endianness and example:
98 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
104 The memory layout for little-endian (LE) and big-endian (BE) must be:
114 To merge these into a single 48-bit merged value 'val' in phase 2)
115 on little-endian we insert stores to higher (consecutive) bitpositions
116 into the most significant bits of the merged value.
117 The final merged value would be: 0xcdab56781234
119 For big-endian we insert stores to higher bitpositions into the least
120 significant bits of the merged value.
121 The final merged value would be: 0x12345678abcd
123 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
124 followed by a 16-bit store. Again, we must consider endianness when
125 breaking down the 48-bit value 'val' computed above.
126 For little endian we emit:
127 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
128 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
130 Whereas for big-endian we emit:
131 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
132 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
136 #include "coretypes.h"
140 #include "builtins.h"
141 #include "fold-const.h"
142 #include "tree-pass.h"
144 #include "gimple-pretty-print.h"
146 #include "fold-const.h"
148 #include "print-tree.h"
149 #include "tree-hash-traits.h"
150 #include "gimple-iterator.h"
151 #include "gimplify.h"
152 #include "stor-layout.h"
154 #include "tree-cfg.h"
157 #include "gimplify-me.h"
159 #include "expr.h" /* For get_bit_range. */
160 #include "optabs-tree.h"
161 #include "selftest.h"
163 /* The maximum size (in bits) of the stores this pass should generate. */
164 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
165 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
167 /* Limit to bound the number of aliasing checks for loads with the same
168 vuse as the corresponding store. */
169 #define MAX_STORE_ALIAS_CHECKS 64
175 /* Number of hand-written 16-bit nop / bswaps found. */
178 /* Number of hand-written 32-bit nop / bswaps found. */
181 /* Number of hand-written 64-bit nop / bswaps found. */
183 } nop_stats
, bswap_stats
;
185 /* A symbolic number structure is used to detect byte permutation and selection
186 patterns of a source. To achieve that, its field N contains an artificial
187 number consisting of BITS_PER_MARKER sized markers tracking where does each
188 byte come from in the source:
190 0 - target byte has the value 0
191 FF - target byte has an unknown value (eg. due to sign extension)
192 1..size - marker value is the byte index in the source (0 for lsb).
194 To detect permutations on memory sources (arrays and structures), a symbolic
195 number is also associated:
196 - a base address BASE_ADDR and an OFFSET giving the address of the source;
197 - a range which gives the difference between the highest and lowest accessed
198 memory location to make such a symbolic number;
199 - the address SRC of the source element of lowest address as a convenience
200 to easily get BASE_ADDR + offset + lowest bytepos;
201 - number of expressions N_OPS bitwise ored together to represent
202 approximate cost of the computation.
204 Note 1: the range is different from size as size reflects the size of the
205 type of the current expression. For instance, for an array char a[],
206 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
207 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
210 Note 2: for non-memory sources, range holds the same value as size.
212 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
214 struct symbolic_number
{
219 HOST_WIDE_INT bytepos
;
223 unsigned HOST_WIDE_INT range
;
227 #define BITS_PER_MARKER 8
228 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
229 #define MARKER_BYTE_UNKNOWN MARKER_MASK
230 #define HEAD_MARKER(n, size) \
231 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
233 /* The number which the find_bswap_or_nop_1 result should match in
234 order to have a nop. The number is masked according to the size of
235 the symbolic number before using it. */
236 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
237 (uint64_t)0x08070605 << 32 | 0x04030201)
239 /* The number which the find_bswap_or_nop_1 result should match in
240 order to have a byte swap. The number is masked according to the
241 size of the symbolic number before using it. */
242 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
243 (uint64_t)0x01020304 << 32 | 0x05060708)
245 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
246 number N. Return false if the requested operation is not permitted
247 on a symbolic number. */
250 do_shift_rotate (enum tree_code code
,
251 struct symbolic_number
*n
,
254 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
255 unsigned head_marker
;
257 if (count
% BITS_PER_UNIT
!= 0)
259 count
= (count
/ BITS_PER_UNIT
) * BITS_PER_MARKER
;
261 /* Zero out the extra bits of N in order to avoid them being shifted
262 into the significant bits. */
263 if (size
< 64 / BITS_PER_MARKER
)
264 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
272 head_marker
= HEAD_MARKER (n
->n
, size
);
274 /* Arithmetic shift of signed type: result is dependent on the value. */
275 if (!TYPE_UNSIGNED (n
->type
) && head_marker
)
276 for (i
= 0; i
< count
/ BITS_PER_MARKER
; i
++)
277 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
278 << ((size
- 1 - i
) * BITS_PER_MARKER
);
281 n
->n
= (n
->n
<< count
) | (n
->n
>> ((size
* BITS_PER_MARKER
) - count
));
284 n
->n
= (n
->n
>> count
) | (n
->n
<< ((size
* BITS_PER_MARKER
) - count
));
289 /* Zero unused bits for size. */
290 if (size
< 64 / BITS_PER_MARKER
)
291 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
295 /* Perform sanity checking for the symbolic number N and the gimple
299 verify_symbolic_number_p (struct symbolic_number
*n
, gimple
*stmt
)
303 lhs_type
= gimple_expr_type (stmt
);
305 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
)
308 if (TYPE_PRECISION (lhs_type
) != TYPE_PRECISION (n
->type
))
314 /* Initialize the symbolic number N for the bswap pass from the base element
315 SRC manipulated by the bitwise OR expression. */
318 init_symbolic_number (struct symbolic_number
*n
, tree src
)
322 if (! INTEGRAL_TYPE_P (TREE_TYPE (src
)))
325 n
->base_addr
= n
->offset
= n
->alias_set
= n
->vuse
= NULL_TREE
;
328 /* Set up the symbolic number N by setting each byte to a value between 1 and
329 the byte size of rhs1. The highest order byte is set to n->size and the
330 lowest order byte to 1. */
331 n
->type
= TREE_TYPE (src
);
332 size
= TYPE_PRECISION (n
->type
);
333 if (size
% BITS_PER_UNIT
!= 0)
335 size
/= BITS_PER_UNIT
;
336 if (size
> 64 / BITS_PER_MARKER
)
342 if (size
< 64 / BITS_PER_MARKER
)
343 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
348 /* Check if STMT might be a byte swap or a nop from a memory source and returns
349 the answer. If so, REF is that memory source and the base of the memory area
350 accessed and the offset of the access from that base are recorded in N. */
353 find_bswap_or_nop_load (gimple
*stmt
, tree ref
, struct symbolic_number
*n
)
355 /* Leaf node is an array or component ref. Memorize its base and
356 offset from base to compare to other such leaf node. */
357 HOST_WIDE_INT bitsize
, bitpos
;
359 int unsignedp
, reversep
, volatilep
;
360 tree offset
, base_addr
;
362 /* Not prepared to handle PDP endian. */
363 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
366 if (!gimple_assign_load_p (stmt
) || gimple_has_volatile_ops (stmt
))
369 base_addr
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
370 &unsignedp
, &reversep
, &volatilep
);
372 if (TREE_CODE (base_addr
) == MEM_REF
)
374 offset_int bit_offset
= 0;
375 tree off
= TREE_OPERAND (base_addr
, 1);
377 if (!integer_zerop (off
))
379 offset_int boff
, coff
= mem_ref_offset (base_addr
);
380 boff
= coff
<< LOG2_BITS_PER_UNIT
;
384 base_addr
= TREE_OPERAND (base_addr
, 0);
386 /* Avoid returning a negative bitpos as this may wreak havoc later. */
387 if (wi::neg_p (bit_offset
))
389 offset_int mask
= wi::mask
<offset_int
> (LOG2_BITS_PER_UNIT
, false);
390 offset_int tem
= wi::bit_and_not (bit_offset
, mask
);
391 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
392 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
394 tem
>>= LOG2_BITS_PER_UNIT
;
396 offset
= size_binop (PLUS_EXPR
, offset
,
397 wide_int_to_tree (sizetype
, tem
));
399 offset
= wide_int_to_tree (sizetype
, tem
);
402 bitpos
+= bit_offset
.to_shwi ();
405 if (bitpos
% BITS_PER_UNIT
)
407 if (bitsize
% BITS_PER_UNIT
)
412 if (!init_symbolic_number (n
, ref
))
414 n
->base_addr
= base_addr
;
416 n
->bytepos
= bitpos
/ BITS_PER_UNIT
;
417 n
->alias_set
= reference_alias_ptr_type (ref
);
418 n
->vuse
= gimple_vuse (stmt
);
422 /* Compute the symbolic number N representing the result of a bitwise OR on 2
423 symbolic number N1 and N2 whose source statements are respectively
424 SOURCE_STMT1 and SOURCE_STMT2. */
427 perform_symbolic_merge (gimple
*source_stmt1
, struct symbolic_number
*n1
,
428 gimple
*source_stmt2
, struct symbolic_number
*n2
,
429 struct symbolic_number
*n
)
434 struct symbolic_number
*n_start
;
436 tree rhs1
= gimple_assign_rhs1 (source_stmt1
);
437 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
438 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
439 rhs1
= TREE_OPERAND (rhs1
, 0);
440 tree rhs2
= gimple_assign_rhs1 (source_stmt2
);
441 if (TREE_CODE (rhs2
) == BIT_FIELD_REF
442 && TREE_CODE (TREE_OPERAND (rhs2
, 0)) == SSA_NAME
)
443 rhs2
= TREE_OPERAND (rhs2
, 0);
445 /* Sources are different, cancel bswap if they are not memory location with
446 the same base (array, structure, ...). */
450 HOST_WIDE_INT start_sub
, end_sub
, end1
, end2
, end
;
451 struct symbolic_number
*toinc_n_ptr
, *n_end
;
452 basic_block bb1
, bb2
;
454 if (!n1
->base_addr
|| !n2
->base_addr
455 || !operand_equal_p (n1
->base_addr
, n2
->base_addr
, 0))
458 if (!n1
->offset
!= !n2
->offset
459 || (n1
->offset
&& !operand_equal_p (n1
->offset
, n2
->offset
, 0)))
462 if (n1
->bytepos
< n2
->bytepos
)
465 start_sub
= n2
->bytepos
- n1
->bytepos
;
470 start_sub
= n1
->bytepos
- n2
->bytepos
;
473 bb1
= gimple_bb (source_stmt1
);
474 bb2
= gimple_bb (source_stmt2
);
475 if (dominated_by_p (CDI_DOMINATORS
, bb1
, bb2
))
476 source_stmt
= source_stmt1
;
478 source_stmt
= source_stmt2
;
480 /* Find the highest address at which a load is performed and
481 compute related info. */
482 end1
= n1
->bytepos
+ (n1
->range
- 1);
483 end2
= n2
->bytepos
+ (n2
->range
- 1);
487 end_sub
= end2
- end1
;
492 end_sub
= end1
- end2
;
494 n_end
= (end2
> end1
) ? n2
: n1
;
496 /* Find symbolic number whose lsb is the most significant. */
497 if (BYTES_BIG_ENDIAN
)
498 toinc_n_ptr
= (n_end
== n1
) ? n2
: n1
;
500 toinc_n_ptr
= (n_start
== n1
) ? n2
: n1
;
502 n
->range
= end
- n_start
->bytepos
+ 1;
504 /* Check that the range of memory covered can be represented by
505 a symbolic number. */
506 if (n
->range
> 64 / BITS_PER_MARKER
)
509 /* Reinterpret byte marks in symbolic number holding the value of
510 bigger weight according to target endianness. */
511 inc
= BYTES_BIG_ENDIAN
? end_sub
: start_sub
;
512 size
= TYPE_PRECISION (n1
->type
) / BITS_PER_UNIT
;
513 for (i
= 0; i
< size
; i
++, inc
<<= BITS_PER_MARKER
)
516 = (toinc_n_ptr
->n
>> (i
* BITS_PER_MARKER
)) & MARKER_MASK
;
517 if (marker
&& marker
!= MARKER_BYTE_UNKNOWN
)
518 toinc_n_ptr
->n
+= inc
;
523 n
->range
= n1
->range
;
525 source_stmt
= source_stmt1
;
529 || alias_ptr_types_compatible_p (n1
->alias_set
, n2
->alias_set
))
530 n
->alias_set
= n1
->alias_set
;
532 n
->alias_set
= ptr_type_node
;
533 n
->vuse
= n_start
->vuse
;
534 n
->base_addr
= n_start
->base_addr
;
535 n
->offset
= n_start
->offset
;
536 n
->src
= n_start
->src
;
537 n
->bytepos
= n_start
->bytepos
;
538 n
->type
= n_start
->type
;
539 size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
541 for (i
= 0, mask
= MARKER_MASK
; i
< size
; i
++, mask
<<= BITS_PER_MARKER
)
543 uint64_t masked1
, masked2
;
545 masked1
= n1
->n
& mask
;
546 masked2
= n2
->n
& mask
;
547 if (masked1
&& masked2
&& masked1
!= masked2
)
550 n
->n
= n1
->n
| n2
->n
;
551 n
->n_ops
= n1
->n_ops
+ n2
->n_ops
;
556 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
557 the operation given by the rhs of STMT on the result. If the operation
558 could successfully be executed the function returns a gimple stmt whose
559 rhs's first tree is the expression of the source operand and NULL
563 find_bswap_or_nop_1 (gimple
*stmt
, struct symbolic_number
*n
, int limit
)
566 tree rhs1
, rhs2
= NULL
;
567 gimple
*rhs1_stmt
, *rhs2_stmt
, *source_stmt1
;
568 enum gimple_rhs_class rhs_class
;
570 if (!limit
|| !is_gimple_assign (stmt
))
573 rhs1
= gimple_assign_rhs1 (stmt
);
575 if (find_bswap_or_nop_load (stmt
, rhs1
, n
))
578 /* Handle BIT_FIELD_REF. */
579 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
580 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
582 unsigned HOST_WIDE_INT bitsize
= tree_to_uhwi (TREE_OPERAND (rhs1
, 1));
583 unsigned HOST_WIDE_INT bitpos
= tree_to_uhwi (TREE_OPERAND (rhs1
, 2));
584 if (bitpos
% BITS_PER_UNIT
== 0
585 && bitsize
% BITS_PER_UNIT
== 0
586 && init_symbolic_number (n
, TREE_OPERAND (rhs1
, 0)))
588 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
589 if (BYTES_BIG_ENDIAN
)
590 bitpos
= TYPE_PRECISION (n
->type
) - bitpos
- bitsize
;
593 if (!do_shift_rotate (RSHIFT_EXPR
, n
, bitpos
))
598 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
599 for (unsigned i
= 0; i
< bitsize
/ BITS_PER_UNIT
;
600 i
++, tmp
<<= BITS_PER_UNIT
)
601 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
605 n
->type
= TREE_TYPE (rhs1
);
607 n
->range
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
609 return verify_symbolic_number_p (n
, stmt
) ? stmt
: NULL
;
615 if (TREE_CODE (rhs1
) != SSA_NAME
)
618 code
= gimple_assign_rhs_code (stmt
);
619 rhs_class
= gimple_assign_rhs_class (stmt
);
620 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
622 if (rhs_class
== GIMPLE_BINARY_RHS
)
623 rhs2
= gimple_assign_rhs2 (stmt
);
625 /* Handle unary rhs and binary rhs with integer constants as second
628 if (rhs_class
== GIMPLE_UNARY_RHS
629 || (rhs_class
== GIMPLE_BINARY_RHS
630 && TREE_CODE (rhs2
) == INTEGER_CST
))
632 if (code
!= BIT_AND_EXPR
633 && code
!= LSHIFT_EXPR
634 && code
!= RSHIFT_EXPR
635 && code
!= LROTATE_EXPR
636 && code
!= RROTATE_EXPR
637 && !CONVERT_EXPR_CODE_P (code
))
640 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, n
, limit
- 1);
642 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
643 we have to initialize the symbolic number. */
646 if (gimple_assign_load_p (stmt
)
647 || !init_symbolic_number (n
, rhs1
))
656 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
657 uint64_t val
= int_cst_value (rhs2
), mask
= 0;
658 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
660 /* Only constants masking full bytes are allowed. */
661 for (i
= 0; i
< size
; i
++, tmp
<<= BITS_PER_UNIT
)
662 if ((val
& tmp
) != 0 && (val
& tmp
) != tmp
)
665 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
674 if (!do_shift_rotate (code
, n
, (int) TREE_INT_CST_LOW (rhs2
)))
679 int i
, type_size
, old_type_size
;
682 type
= gimple_expr_type (stmt
);
683 type_size
= TYPE_PRECISION (type
);
684 if (type_size
% BITS_PER_UNIT
!= 0)
686 type_size
/= BITS_PER_UNIT
;
687 if (type_size
> 64 / BITS_PER_MARKER
)
690 /* Sign extension: result is dependent on the value. */
691 old_type_size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
692 if (!TYPE_UNSIGNED (n
->type
) && type_size
> old_type_size
693 && HEAD_MARKER (n
->n
, old_type_size
))
694 for (i
= 0; i
< type_size
- old_type_size
; i
++)
695 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
696 << ((type_size
- 1 - i
) * BITS_PER_MARKER
);
698 if (type_size
< 64 / BITS_PER_MARKER
)
700 /* If STMT casts to a smaller type mask out the bits not
701 belonging to the target type. */
702 n
->n
&= ((uint64_t) 1 << (type_size
* BITS_PER_MARKER
)) - 1;
706 n
->range
= type_size
;
712 return verify_symbolic_number_p (n
, stmt
) ? source_stmt1
: NULL
;
715 /* Handle binary rhs. */
717 if (rhs_class
== GIMPLE_BINARY_RHS
)
719 struct symbolic_number n1
, n2
;
720 gimple
*source_stmt
, *source_stmt2
;
722 if (code
!= BIT_IOR_EXPR
)
725 if (TREE_CODE (rhs2
) != SSA_NAME
)
728 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
733 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, &n1
, limit
- 1);
738 source_stmt2
= find_bswap_or_nop_1 (rhs2_stmt
, &n2
, limit
- 1);
743 if (TYPE_PRECISION (n1
.type
) != TYPE_PRECISION (n2
.type
))
746 if (!n1
.vuse
!= !n2
.vuse
747 || (n1
.vuse
&& !operand_equal_p (n1
.vuse
, n2
.vuse
, 0)))
751 = perform_symbolic_merge (source_stmt1
, &n1
, source_stmt2
, &n2
, n
);
756 if (!verify_symbolic_number_p (n
, stmt
))
768 /* Check if STMT completes a bswap implementation or a read in a given
769 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
770 accordingly. It also sets N to represent the kind of operations
771 performed: size of the resulting expression and whether it works on
772 a memory source, and if so alias-set and vuse. At last, the
773 function returns a stmt whose rhs's first tree is the source
777 find_bswap_or_nop (gimple
*stmt
, struct symbolic_number
*n
, bool *bswap
)
781 /* The number which the find_bswap_or_nop_1 result should match in order
782 to have a full byte swap. The number is shifted to the right
783 according to the size of the symbolic number before using it. */
784 uint64_t cmpxchg
= CMPXCHG
;
785 uint64_t cmpnop
= CMPNOP
;
790 /* The last parameter determines the depth search limit. It usually
791 correlates directly to the number n of bytes to be touched. We
792 increase that number by log2(n) + 1 here in order to also
793 cover signed -> unsigned conversions of the src operand as can be seen
794 in libgcc, and for initial shift/and operation of the src operand. */
795 limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
796 limit
+= 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
);
797 ins_stmt
= find_bswap_or_nop_1 (stmt
, n
, limit
);
802 /* Find real size of result (highest non-zero byte). */
804 for (tmpn
= n
->n
, rsize
= 0; tmpn
; tmpn
>>= BITS_PER_MARKER
, rsize
++);
808 /* Zero out the bits corresponding to untouched bytes in original gimple
810 if (n
->range
< (int) sizeof (int64_t))
812 mask
= ((uint64_t) 1 << (n
->range
* BITS_PER_MARKER
)) - 1;
813 cmpxchg
>>= (64 / BITS_PER_MARKER
- n
->range
) * BITS_PER_MARKER
;
817 /* Zero out the bits corresponding to unused bytes in the result of the
818 gimple expression. */
819 if (rsize
< n
->range
)
821 if (BYTES_BIG_ENDIAN
)
823 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
825 cmpnop
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
829 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
830 cmpxchg
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
836 /* A complete byte swap should make the symbolic number to start with
837 the largest digit in the highest order byte. Unchanged symbolic
838 number indicates a read with same endianness as target architecture. */
841 else if (n
->n
== cmpxchg
)
846 /* Useless bit manipulation performed by code. */
847 if (!n
->base_addr
&& n
->n
== cmpnop
&& n
->n_ops
== 1)
850 n
->range
*= BITS_PER_UNIT
;
854 const pass_data pass_data_optimize_bswap
=
856 GIMPLE_PASS
, /* type */
858 OPTGROUP_NONE
, /* optinfo_flags */
860 PROP_ssa
, /* properties_required */
861 0, /* properties_provided */
862 0, /* properties_destroyed */
863 0, /* todo_flags_start */
864 0, /* todo_flags_finish */
867 class pass_optimize_bswap
: public gimple_opt_pass
870 pass_optimize_bswap (gcc::context
*ctxt
)
871 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
874 /* opt_pass methods: */
875 virtual bool gate (function
*)
877 return flag_expensive_optimizations
&& optimize
&& BITS_PER_UNIT
== 8;
880 virtual unsigned int execute (function
*);
882 }; // class pass_optimize_bswap
884 /* Perform the bswap optimization: replace the expression computed in the rhs
885 of CUR_STMT by an equivalent bswap, load or load + bswap expression.
886 Which of these alternatives replace the rhs is given by N->base_addr (non
887 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
888 load to perform are also given in N while the builtin bswap invoke is given
889 in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
890 load statements involved to construct the rhs in CUR_STMT and N->range gives
891 the size of the rhs expression for maintaining some statistics.
893 Note that if the replacement involve a load, CUR_STMT is moved just after
894 SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
895 changing of basic block. */
898 bswap_replace (gimple
*cur_stmt
, gimple
*ins_stmt
, tree fndecl
,
899 tree bswap_type
, tree load_type
, struct symbolic_number
*n
,
902 gimple_stmt_iterator gsi
;
906 gsi
= gsi_for_stmt (cur_stmt
);
908 tgt
= gimple_assign_lhs (cur_stmt
);
910 /* Need to load the value from memory first. */
913 gimple_stmt_iterator gsi_ins
= gsi_for_stmt (ins_stmt
);
914 tree addr_expr
, addr_tmp
, val_expr
, val_tmp
;
915 tree load_offset_ptr
, aligned_load_type
;
916 gimple
*addr_stmt
, *load_stmt
;
918 HOST_WIDE_INT load_offset
= 0;
919 basic_block ins_bb
, cur_bb
;
921 ins_bb
= gimple_bb (ins_stmt
);
922 cur_bb
= gimple_bb (cur_stmt
);
923 if (!dominated_by_p (CDI_DOMINATORS
, cur_bb
, ins_bb
))
926 align
= get_object_alignment (src
);
928 /* Move cur_stmt just before one of the load of the original
929 to ensure it has the same VUSE. See PR61517 for what could
931 if (gimple_bb (cur_stmt
) != gimple_bb (ins_stmt
))
932 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt
));
933 gsi_move_before (&gsi
, &gsi_ins
);
934 gsi
= gsi_for_stmt (cur_stmt
);
936 /* Compute address to load from and cast according to the size
938 addr_expr
= build_fold_addr_expr (unshare_expr (src
));
939 if (is_gimple_mem_ref_addr (addr_expr
))
940 addr_tmp
= addr_expr
;
943 addr_tmp
= make_temp_ssa_name (TREE_TYPE (addr_expr
), NULL
,
945 addr_stmt
= gimple_build_assign (addr_tmp
, addr_expr
);
946 gsi_insert_before (&gsi
, addr_stmt
, GSI_SAME_STMT
);
949 /* Perform the load. */
950 aligned_load_type
= load_type
;
951 if (align
< TYPE_ALIGN (load_type
))
952 aligned_load_type
= build_aligned_type (load_type
, align
);
953 load_offset_ptr
= build_int_cst (n
->alias_set
, load_offset
);
954 val_expr
= fold_build2 (MEM_REF
, aligned_load_type
, addr_tmp
,
960 nop_stats
.found_16bit
++;
961 else if (n
->range
== 32)
962 nop_stats
.found_32bit
++;
965 gcc_assert (n
->range
== 64);
966 nop_stats
.found_64bit
++;
969 /* Convert the result of load if necessary. */
970 if (!useless_type_conversion_p (TREE_TYPE (tgt
), load_type
))
972 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
,
974 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
975 gimple_set_vuse (load_stmt
, n
->vuse
);
976 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
977 gimple_assign_set_rhs_with_ops (&gsi
, NOP_EXPR
, val_tmp
);
981 gimple_assign_set_rhs_with_ops (&gsi
, MEM_REF
, val_expr
);
982 gimple_set_vuse (cur_stmt
, n
->vuse
);
984 update_stmt (cur_stmt
);
989 "%d bit load in target endianness found at: ",
991 print_gimple_stmt (dump_file
, cur_stmt
, 0);
997 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
, "load_dst");
998 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
999 gimple_set_vuse (load_stmt
, n
->vuse
);
1000 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1007 if (!useless_type_conversion_p (TREE_TYPE (tgt
), TREE_TYPE (src
)))
1009 if (!is_gimple_val (src
))
1011 g
= gimple_build_assign (tgt
, NOP_EXPR
, src
);
1014 g
= gimple_build_assign (tgt
, src
);
1016 nop_stats
.found_16bit
++;
1017 else if (n
->range
== 32)
1018 nop_stats
.found_32bit
++;
1021 gcc_assert (n
->range
== 64);
1022 nop_stats
.found_64bit
++;
1027 "%d bit reshuffle in target endianness found at: ",
1029 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1031 gsi_replace (&gsi
, g
, true);
1034 else if (TREE_CODE (src
) == BIT_FIELD_REF
)
1035 src
= TREE_OPERAND (src
, 0);
1038 bswap_stats
.found_16bit
++;
1039 else if (n
->range
== 32)
1040 bswap_stats
.found_32bit
++;
1043 gcc_assert (n
->range
== 64);
1044 bswap_stats
.found_64bit
++;
1049 /* Convert the src expression if necessary. */
1050 if (!useless_type_conversion_p (TREE_TYPE (tmp
), bswap_type
))
1052 gimple
*convert_stmt
;
1054 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
1055 convert_stmt
= gimple_build_assign (tmp
, NOP_EXPR
, src
);
1056 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1059 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1060 are considered as rotation of 2N bit values by N bits is generally not
1061 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1062 gives 0x03040102 while a bswap for that value is 0x04030201. */
1063 if (bswap
&& n
->range
== 16)
1065 tree count
= build_int_cst (NULL
, BITS_PER_UNIT
);
1066 src
= fold_build2 (LROTATE_EXPR
, bswap_type
, tmp
, count
);
1067 bswap_stmt
= gimple_build_assign (NULL
, src
);
1070 bswap_stmt
= gimple_build_call (fndecl
, 1, tmp
);
1074 /* Convert the result if necessary. */
1075 if (!useless_type_conversion_p (TREE_TYPE (tgt
), bswap_type
))
1077 gimple
*convert_stmt
;
1079 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
1080 convert_stmt
= gimple_build_assign (tgt
, NOP_EXPR
, tmp
);
1081 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1084 gimple_set_lhs (bswap_stmt
, tmp
);
1088 fprintf (dump_file
, "%d bit bswap implementation found at: ",
1090 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1093 gsi_insert_after (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1094 gsi_remove (&gsi
, true);
1098 /* Find manual byte swap implementations as well as load in a given
1099 endianness. Byte swaps are turned into a bswap builtin invokation
1100 while endian loads are converted to bswap builtin invokation or
1101 simple load according to the target endianness. */
1104 pass_optimize_bswap::execute (function
*fun
)
1107 bool bswap32_p
, bswap64_p
;
1108 bool changed
= false;
1109 tree bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
1111 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
1112 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
1113 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
1114 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
1115 || (bswap32_p
&& word_mode
== SImode
)));
1117 /* Determine the argument type of the builtins. The code later on
1118 assumes that the return and argument type are the same. */
1121 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1122 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1127 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1128 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1131 memset (&nop_stats
, 0, sizeof (nop_stats
));
1132 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
1133 calculate_dominance_info (CDI_DOMINATORS
);
1135 FOR_EACH_BB_FN (bb
, fun
)
1137 gimple_stmt_iterator gsi
;
1139 /* We do a reverse scan for bswap patterns to make sure we get the
1140 widest match. As bswap pattern matching doesn't handle previously
1141 inserted smaller bswap replacements as sub-patterns, the wider
1142 variant wouldn't be detected. */
1143 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
);)
1145 gimple
*ins_stmt
, *cur_stmt
= gsi_stmt (gsi
);
1146 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
1147 enum tree_code code
;
1148 struct symbolic_number n
;
1151 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1152 might be moved to a different basic block by bswap_replace and gsi
1153 must not points to it if that's the case. Moving the gsi_prev
1154 there make sure that gsi points to the statement previous to
1155 cur_stmt while still making sure that all statements are
1156 considered in this basic block. */
1159 if (!is_gimple_assign (cur_stmt
))
1162 code
= gimple_assign_rhs_code (cur_stmt
);
1167 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt
))
1168 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt
))
1178 ins_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
1186 /* Already in canonical form, nothing to do. */
1187 if (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
1189 load_type
= bswap_type
= uint16_type_node
;
1192 load_type
= uint32_type_node
;
1195 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1196 bswap_type
= bswap32_type
;
1200 load_type
= uint64_type_node
;
1203 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1204 bswap_type
= bswap64_type
;
1211 if (bswap
&& !fndecl
&& n
.range
!= 16)
1214 if (bswap_replace (cur_stmt
, ins_stmt
, fndecl
, bswap_type
, load_type
,
1220 statistics_counter_event (fun
, "16-bit nop implementations found",
1221 nop_stats
.found_16bit
);
1222 statistics_counter_event (fun
, "32-bit nop implementations found",
1223 nop_stats
.found_32bit
);
1224 statistics_counter_event (fun
, "64-bit nop implementations found",
1225 nop_stats
.found_64bit
);
1226 statistics_counter_event (fun
, "16-bit bswap implementations found",
1227 bswap_stats
.found_16bit
);
1228 statistics_counter_event (fun
, "32-bit bswap implementations found",
1229 bswap_stats
.found_32bit
);
1230 statistics_counter_event (fun
, "64-bit bswap implementations found",
1231 bswap_stats
.found_64bit
);
1233 return (changed
? TODO_update_ssa
: 0);
1239 make_pass_optimize_bswap (gcc::context
*ctxt
)
1241 return new pass_optimize_bswap (ctxt
);
1246 /* Struct recording one operand for the store, which is either a constant,
1247 then VAL represents the constant and all the other fields are zero,
1248 or a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1249 and the other fields also reflect the memory load. */
1251 struct store_operand_info
1255 unsigned HOST_WIDE_INT bitsize
;
1256 unsigned HOST_WIDE_INT bitpos
;
1257 unsigned HOST_WIDE_INT bitregion_start
;
1258 unsigned HOST_WIDE_INT bitregion_end
;
1261 store_operand_info ();
1264 store_operand_info::store_operand_info ()
1265 : val (NULL_TREE
), base_addr (NULL_TREE
), bitsize (0), bitpos (0),
1266 bitregion_start (0), bitregion_end (0), stmt (NULL
), bit_not_p (false)
1270 /* Struct recording the information about a single store of an immediate
1271 to memory. These are created in the first phase and coalesced into
1272 merged_store_group objects in the second phase. */
1274 struct store_immediate_info
1276 unsigned HOST_WIDE_INT bitsize
;
1277 unsigned HOST_WIDE_INT bitpos
;
1278 unsigned HOST_WIDE_INT bitregion_start
;
1279 /* This is one past the last bit of the bit region. */
1280 unsigned HOST_WIDE_INT bitregion_end
;
1283 /* INTEGER_CST for constant stores, MEM_REF for memory copy or
1284 BIT_*_EXPR for logical bitwise operation. */
1285 enum tree_code rhs_code
;
1286 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1288 /* True if ops have been swapped and thus ops[1] represents
1289 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1291 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1292 just the first one. */
1293 store_operand_info ops
[2];
1294 store_immediate_info (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1295 unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1296 gimple
*, unsigned int, enum tree_code
, bool,
1297 const store_operand_info
&,
1298 const store_operand_info
&);
1301 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs
,
1302 unsigned HOST_WIDE_INT bp
,
1303 unsigned HOST_WIDE_INT brs
,
1304 unsigned HOST_WIDE_INT bre
,
1307 enum tree_code rhscode
,
1309 const store_operand_info
&op0r
,
1310 const store_operand_info
&op1r
)
1311 : bitsize (bs
), bitpos (bp
), bitregion_start (brs
), bitregion_end (bre
),
1312 stmt (st
), order (ord
), rhs_code (rhscode
), bit_not_p (bitnotp
),
1313 ops_swapped_p (false)
1314 #if __cplusplus >= 201103L
1315 , ops
{ op0r
, op1r
}
1325 /* Struct representing a group of stores to contiguous memory locations.
1326 These are produced by the second phase (coalescing) and consumed in the
1327 third phase that outputs the widened stores. */
1329 struct merged_store_group
1331 unsigned HOST_WIDE_INT start
;
1332 unsigned HOST_WIDE_INT width
;
1333 unsigned HOST_WIDE_INT bitregion_start
;
1334 unsigned HOST_WIDE_INT bitregion_end
;
1335 /* The size of the allocated memory for val and mask. */
1336 unsigned HOST_WIDE_INT buf_size
;
1337 unsigned HOST_WIDE_INT align_base
;
1338 unsigned HOST_WIDE_INT load_align_base
[2];
1341 unsigned int load_align
[2];
1342 unsigned int first_order
;
1343 unsigned int last_order
;
1345 auto_vec
<store_immediate_info
*> stores
;
1346 /* We record the first and last original statements in the sequence because
1347 we'll need their vuse/vdef and replacement position. It's easier to keep
1348 track of them separately as 'stores' is reordered by apply_stores. */
1352 unsigned char *mask
;
1354 merged_store_group (store_immediate_info
*);
1355 ~merged_store_group ();
1356 void merge_into (store_immediate_info
*);
1357 void merge_overlapping (store_immediate_info
*);
1358 bool apply_stores ();
1360 void do_merge (store_immediate_info
*);
1363 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1366 dump_char_array (FILE *fd
, unsigned char *ptr
, unsigned int len
)
1371 for (unsigned int i
= 0; i
< len
; i
++)
1372 fprintf (fd
, "%x ", ptr
[i
]);
1376 /* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the
1377 bits between adjacent elements. AMNT should be within
1380 00011111|11100000 << 2 = 01111111|10000000
1381 PTR[1] | PTR[0] PTR[1] | PTR[0]. */
1384 shift_bytes_in_array (unsigned char *ptr
, unsigned int sz
, unsigned int amnt
)
1389 unsigned char carry_over
= 0U;
1390 unsigned char carry_mask
= (~0U) << (unsigned char) (BITS_PER_UNIT
- amnt
);
1391 unsigned char clear_mask
= (~0U) << amnt
;
1393 for (unsigned int i
= 0; i
< sz
; i
++)
1395 unsigned prev_carry_over
= carry_over
;
1396 carry_over
= (ptr
[i
] & carry_mask
) >> (BITS_PER_UNIT
- amnt
);
1401 ptr
[i
] &= clear_mask
;
1402 ptr
[i
] |= prev_carry_over
;
1407 /* Like shift_bytes_in_array but for big-endian.
1408 Shift right the bytes in PTR of SZ elements by AMNT bits, carrying over the
1409 bits between adjacent elements. AMNT should be within
1412 00011111|11100000 >> 2 = 00000111|11111000
1413 PTR[0] | PTR[1] PTR[0] | PTR[1]. */
1416 shift_bytes_in_array_right (unsigned char *ptr
, unsigned int sz
,
1422 unsigned char carry_over
= 0U;
1423 unsigned char carry_mask
= ~(~0U << amnt
);
1425 for (unsigned int i
= 0; i
< sz
; i
++)
1427 unsigned prev_carry_over
= carry_over
;
1428 carry_over
= ptr
[i
] & carry_mask
;
1430 carry_over
<<= (unsigned char) BITS_PER_UNIT
- amnt
;
1432 ptr
[i
] |= prev_carry_over
;
1436 /* Clear out LEN bits starting from bit START in the byte array
1437 PTR. This clears the bits to the *right* from START.
1438 START must be within [0, BITS_PER_UNIT) and counts starting from
1439 the least significant bit. */
1442 clear_bit_region_be (unsigned char *ptr
, unsigned int start
,
1447 /* Clear len bits to the right of start. */
1448 else if (len
<= start
+ 1)
1450 unsigned char mask
= (~(~0U << len
));
1451 mask
= mask
<< (start
+ 1U - len
);
1454 else if (start
!= BITS_PER_UNIT
- 1)
1456 clear_bit_region_be (ptr
, start
, (start
% BITS_PER_UNIT
) + 1);
1457 clear_bit_region_be (ptr
+ 1, BITS_PER_UNIT
- 1,
1458 len
- (start
% BITS_PER_UNIT
) - 1);
1460 else if (start
== BITS_PER_UNIT
- 1
1461 && len
> BITS_PER_UNIT
)
1463 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1464 memset (ptr
, 0, nbytes
);
1465 if (len
% BITS_PER_UNIT
!= 0)
1466 clear_bit_region_be (ptr
+ nbytes
, BITS_PER_UNIT
- 1,
1467 len
% BITS_PER_UNIT
);
1473 /* In the byte array PTR clear the bit region starting at bit
1474 START and is LEN bits wide.
1475 For regions spanning multiple bytes do this recursively until we reach
1476 zero LEN or a region contained within a single byte. */
1479 clear_bit_region (unsigned char *ptr
, unsigned int start
,
1482 /* Degenerate base case. */
1485 else if (start
>= BITS_PER_UNIT
)
1486 clear_bit_region (ptr
+ 1, start
- BITS_PER_UNIT
, len
);
1487 /* Second base case. */
1488 else if ((start
+ len
) <= BITS_PER_UNIT
)
1490 unsigned char mask
= (~0U) << (unsigned char) (BITS_PER_UNIT
- len
);
1491 mask
>>= BITS_PER_UNIT
- (start
+ len
);
1497 /* Clear most significant bits in a byte and proceed with the next byte. */
1498 else if (start
!= 0)
1500 clear_bit_region (ptr
, start
, BITS_PER_UNIT
- start
);
1501 clear_bit_region (ptr
+ 1, 0, len
- (BITS_PER_UNIT
- start
));
1503 /* Whole bytes need to be cleared. */
1504 else if (start
== 0 && len
> BITS_PER_UNIT
)
1506 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1507 /* We could recurse on each byte but we clear whole bytes, so a simple
1509 memset (ptr
, '\0', nbytes
);
1510 /* Clear the remaining sub-byte region if there is one. */
1511 if (len
% BITS_PER_UNIT
!= 0)
1512 clear_bit_region (ptr
+ nbytes
, 0, len
% BITS_PER_UNIT
);
1518 /* Write BITLEN bits of EXPR to the byte array PTR at
1519 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1520 Return true if the operation succeeded. */
1523 encode_tree_to_bitpos (tree expr
, unsigned char *ptr
, int bitlen
, int bitpos
,
1524 unsigned int total_bytes
)
1526 unsigned int first_byte
= bitpos
/ BITS_PER_UNIT
;
1527 tree tmp_int
= expr
;
1528 bool sub_byte_op_p
= ((bitlen
% BITS_PER_UNIT
)
1529 || (bitpos
% BITS_PER_UNIT
)
1530 || !int_mode_for_size (bitlen
, 0).exists ());
1533 return native_encode_expr (tmp_int
, ptr
+ first_byte
, total_bytes
) != 0;
1536 We are writing a non byte-sized quantity or at a position that is not
1538 |--------|--------|--------| ptr + first_byte
1540 xxx xxxxxxxx xxx< bp>
1543 First native_encode_expr EXPR into a temporary buffer and shift each
1544 byte in the buffer by 'bp' (carrying the bits over as necessary).
1545 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1546 <------bitlen---->< bp>
1547 Then we clear the destination bits:
1548 |---00000|00000000|000-----| ptr + first_byte
1549 <-------bitlen--->< bp>
1551 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1552 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1555 We are writing a non byte-sized quantity or at a position that is not
1557 ptr + first_byte |--------|--------|--------|
1559 <bp >xxx xxxxxxxx xxx
1562 First native_encode_expr EXPR into a temporary buffer and shift each
1563 byte in the buffer to the right by (carrying the bits over as necessary).
1564 We shift by as much as needed to align the most significant bit of EXPR
1566 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1567 <---bitlen----> <bp ><-----bitlen----->
1568 Then we clear the destination bits:
1569 ptr + first_byte |-----000||00000000||00000---|
1570 <bp ><-------bitlen----->
1572 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1573 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1574 The awkwardness comes from the fact that bitpos is counted from the
1575 most significant bit of a byte. */
1577 /* We must be dealing with fixed-size data at this point, since the
1578 total size is also fixed. */
1579 fixed_size_mode mode
= as_a
<fixed_size_mode
> (TYPE_MODE (TREE_TYPE (expr
)));
1580 /* Allocate an extra byte so that we have space to shift into. */
1581 unsigned int byte_size
= GET_MODE_SIZE (mode
) + 1;
1582 unsigned char *tmpbuf
= XALLOCAVEC (unsigned char, byte_size
);
1583 memset (tmpbuf
, '\0', byte_size
);
1584 /* The store detection code should only have allowed constants that are
1585 accepted by native_encode_expr. */
1586 if (native_encode_expr (expr
, tmpbuf
, byte_size
- 1) == 0)
1589 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1590 bytes to write. This means it can write more than
1591 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1592 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1593 bitlen and zero out the bits that are not relevant as well (that may
1594 contain a sign bit due to sign-extension). */
1595 unsigned int padding
1596 = byte_size
- ROUND_UP (bitlen
, BITS_PER_UNIT
) / BITS_PER_UNIT
- 1;
1597 /* On big-endian the padding is at the 'front' so just skip the initial
1599 if (BYTES_BIG_ENDIAN
)
1602 byte_size
-= padding
;
1604 if (bitlen
% BITS_PER_UNIT
!= 0)
1606 if (BYTES_BIG_ENDIAN
)
1607 clear_bit_region_be (tmpbuf
, BITS_PER_UNIT
- 1,
1608 BITS_PER_UNIT
- (bitlen
% BITS_PER_UNIT
));
1610 clear_bit_region (tmpbuf
, bitlen
,
1611 byte_size
* BITS_PER_UNIT
- bitlen
);
1613 /* Left shifting relies on the last byte being clear if bitlen is
1614 a multiple of BITS_PER_UNIT, which might not be clear if
1615 there are padding bytes. */
1616 else if (!BYTES_BIG_ENDIAN
)
1617 tmpbuf
[byte_size
- 1] = '\0';
1619 /* Clear the bit region in PTR where the bits from TMPBUF will be
1621 if (BYTES_BIG_ENDIAN
)
1622 clear_bit_region_be (ptr
+ first_byte
,
1623 BITS_PER_UNIT
- 1 - (bitpos
% BITS_PER_UNIT
), bitlen
);
1625 clear_bit_region (ptr
+ first_byte
, bitpos
% BITS_PER_UNIT
, bitlen
);
1628 int bitlen_mod
= bitlen
% BITS_PER_UNIT
;
1629 int bitpos_mod
= bitpos
% BITS_PER_UNIT
;
1631 bool skip_byte
= false;
1632 if (BYTES_BIG_ENDIAN
)
1634 /* BITPOS and BITLEN are exactly aligned and no shifting
1636 if (bitpos_mod
+ bitlen_mod
== BITS_PER_UNIT
1637 || (bitpos_mod
== 0 && bitlen_mod
== 0))
1639 /* |. . . . . . . .|
1641 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1642 of the value until it aligns with 'bp' in the next byte over. */
1643 else if (bitpos_mod
+ bitlen_mod
< BITS_PER_UNIT
)
1645 shift_amnt
= bitlen_mod
+ bitpos_mod
;
1646 skip_byte
= bitlen_mod
!= 0;
1648 /* |. . . . . . . .|
1651 Shift the value right within the same byte so it aligns with 'bp'. */
1653 shift_amnt
= bitlen_mod
+ bitpos_mod
- BITS_PER_UNIT
;
1656 shift_amnt
= bitpos
% BITS_PER_UNIT
;
1658 /* Create the shifted version of EXPR. */
1659 if (!BYTES_BIG_ENDIAN
)
1661 shift_bytes_in_array (tmpbuf
, byte_size
, shift_amnt
);
1662 if (shift_amnt
== 0)
1667 gcc_assert (BYTES_BIG_ENDIAN
);
1668 shift_bytes_in_array_right (tmpbuf
, byte_size
, shift_amnt
);
1669 /* If shifting right forced us to move into the next byte skip the now
1678 /* Insert the bits from TMPBUF. */
1679 for (unsigned int i
= 0; i
< byte_size
; i
++)
1680 ptr
[first_byte
+ i
] |= tmpbuf
[i
];
1685 /* Sorting function for store_immediate_info objects.
1686 Sorts them by bitposition. */
1689 sort_by_bitpos (const void *x
, const void *y
)
1691 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1692 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1694 if ((*tmp
)->bitpos
< (*tmp2
)->bitpos
)
1696 else if ((*tmp
)->bitpos
> (*tmp2
)->bitpos
)
1699 /* If they are the same let's use the order which is guaranteed to
1701 return (*tmp
)->order
- (*tmp2
)->order
;
1704 /* Sorting function for store_immediate_info objects.
1705 Sorts them by the order field. */
1708 sort_by_order (const void *x
, const void *y
)
1710 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1711 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1713 if ((*tmp
)->order
< (*tmp2
)->order
)
1715 else if ((*tmp
)->order
> (*tmp2
)->order
)
1721 /* Initialize a merged_store_group object from a store_immediate_info
1724 merged_store_group::merged_store_group (store_immediate_info
*info
)
1726 start
= info
->bitpos
;
1727 width
= info
->bitsize
;
1728 bitregion_start
= info
->bitregion_start
;
1729 bitregion_end
= info
->bitregion_end
;
1730 /* VAL has memory allocated for it in apply_stores once the group
1731 width has been finalized. */
1734 unsigned HOST_WIDE_INT align_bitpos
= 0;
1735 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
1736 &align
, &align_bitpos
);
1737 align_base
= start
- align_bitpos
;
1738 for (int i
= 0; i
< 2; ++i
)
1740 store_operand_info
&op
= info
->ops
[i
];
1741 if (op
.base_addr
== NULL_TREE
)
1744 load_align_base
[i
] = 0;
1748 get_object_alignment_1 (op
.val
, &load_align
[i
], &align_bitpos
);
1749 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
1753 stores
.safe_push (info
);
1754 last_stmt
= info
->stmt
;
1755 last_order
= info
->order
;
1756 first_stmt
= last_stmt
;
1757 first_order
= last_order
;
1761 merged_store_group::~merged_store_group ()
1767 /* Helper method for merge_into and merge_overlapping to do
1770 merged_store_group::do_merge (store_immediate_info
*info
)
1772 bitregion_start
= MIN (bitregion_start
, info
->bitregion_start
);
1773 bitregion_end
= MAX (bitregion_end
, info
->bitregion_end
);
1775 unsigned int this_align
;
1776 unsigned HOST_WIDE_INT align_bitpos
= 0;
1777 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
1778 &this_align
, &align_bitpos
);
1779 if (this_align
> align
)
1782 align_base
= info
->bitpos
- align_bitpos
;
1784 for (int i
= 0; i
< 2; ++i
)
1786 store_operand_info
&op
= info
->ops
[i
];
1790 get_object_alignment_1 (op
.val
, &this_align
, &align_bitpos
);
1791 if (this_align
> load_align
[i
])
1793 load_align
[i
] = this_align
;
1794 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
1798 gimple
*stmt
= info
->stmt
;
1799 stores
.safe_push (info
);
1800 if (info
->order
> last_order
)
1802 last_order
= info
->order
;
1805 else if (info
->order
< first_order
)
1807 first_order
= info
->order
;
1812 /* Merge a store recorded by INFO into this merged store.
1813 The store is not overlapping with the existing recorded
1817 merged_store_group::merge_into (store_immediate_info
*info
)
1819 unsigned HOST_WIDE_INT wid
= info
->bitsize
;
1820 /* Make sure we're inserting in the position we think we're inserting. */
1821 gcc_assert (info
->bitpos
>= start
+ width
1822 && info
->bitregion_start
<= bitregion_end
);
1828 /* Merge a store described by INFO into this merged store.
1829 INFO overlaps in some way with the current store (i.e. it's not contiguous
1830 which is handled by merged_store_group::merge_into). */
1833 merged_store_group::merge_overlapping (store_immediate_info
*info
)
1835 /* If the store extends the size of the group, extend the width. */
1836 if (info
->bitpos
+ info
->bitsize
> start
+ width
)
1837 width
+= info
->bitpos
+ info
->bitsize
- (start
+ width
);
1842 /* Go through all the recorded stores in this group in program order and
1843 apply their values to the VAL byte array to create the final merged
1844 value. Return true if the operation succeeded. */
1847 merged_store_group::apply_stores ()
1849 /* Make sure we have more than one store in the group, otherwise we cannot
1851 if (bitregion_start
% BITS_PER_UNIT
!= 0
1852 || bitregion_end
% BITS_PER_UNIT
!= 0
1853 || stores
.length () == 1)
1856 stores
.qsort (sort_by_order
);
1857 store_immediate_info
*info
;
1859 /* Create a buffer of a size that is 2 times the number of bytes we're
1860 storing. That way native_encode_expr can write power-of-2-sized
1861 chunks without overrunning. */
1862 buf_size
= 2 * ((bitregion_end
- bitregion_start
) / BITS_PER_UNIT
);
1863 val
= XNEWVEC (unsigned char, 2 * buf_size
);
1864 mask
= val
+ buf_size
;
1865 memset (val
, 0, buf_size
);
1866 memset (mask
, ~0U, buf_size
);
1868 FOR_EACH_VEC_ELT (stores
, i
, info
)
1870 unsigned int pos_in_buffer
= info
->bitpos
- bitregion_start
;
1871 tree cst
= NULL_TREE
;
1872 if (info
->ops
[0].val
&& info
->ops
[0].base_addr
== NULL_TREE
)
1873 cst
= info
->ops
[0].val
;
1874 else if (info
->ops
[1].val
&& info
->ops
[1].base_addr
== NULL_TREE
)
1875 cst
= info
->ops
[1].val
;
1878 ret
= encode_tree_to_bitpos (cst
, val
, info
->bitsize
,
1879 pos_in_buffer
, buf_size
);
1880 if (cst
&& dump_file
&& (dump_flags
& TDF_DETAILS
))
1884 fprintf (dump_file
, "After writing ");
1885 print_generic_expr (dump_file
, cst
, 0);
1886 fprintf (dump_file
, " of size " HOST_WIDE_INT_PRINT_DEC
1887 " at position %d the merged region contains:\n",
1888 info
->bitsize
, pos_in_buffer
);
1889 dump_char_array (dump_file
, val
, buf_size
);
1892 fprintf (dump_file
, "Failed to merge stores\n");
1896 unsigned char *m
= mask
+ (pos_in_buffer
/ BITS_PER_UNIT
);
1897 if (BYTES_BIG_ENDIAN
)
1898 clear_bit_region_be (m
, (BITS_PER_UNIT
- 1
1899 - (pos_in_buffer
% BITS_PER_UNIT
)),
1902 clear_bit_region (m
, pos_in_buffer
% BITS_PER_UNIT
, info
->bitsize
);
1907 /* Structure describing the store chain. */
1909 struct imm_store_chain_info
1911 /* Doubly-linked list that imposes an order on chain processing.
1912 PNXP (prev's next pointer) points to the head of a list, or to
1913 the next field in the previous chain in the list.
1914 See pass_store_merging::m_stores_head for more rationale. */
1915 imm_store_chain_info
*next
, **pnxp
;
1917 auto_vec
<store_immediate_info
*> m_store_info
;
1918 auto_vec
<merged_store_group
*> m_merged_store_groups
;
1920 imm_store_chain_info (imm_store_chain_info
*&inspt
, tree b_a
)
1921 : next (inspt
), pnxp (&inspt
), base_addr (b_a
)
1926 gcc_checking_assert (pnxp
== next
->pnxp
);
1930 ~imm_store_chain_info ()
1935 gcc_checking_assert (&next
== next
->pnxp
);
1939 bool terminate_and_process_chain ();
1940 bool coalesce_immediate_stores ();
1941 bool output_merged_store (merged_store_group
*);
1942 bool output_merged_stores ();
1945 const pass_data pass_data_tree_store_merging
= {
1946 GIMPLE_PASS
, /* type */
1947 "store-merging", /* name */
1948 OPTGROUP_NONE
, /* optinfo_flags */
1949 TV_GIMPLE_STORE_MERGING
, /* tv_id */
1950 PROP_ssa
, /* properties_required */
1951 0, /* properties_provided */
1952 0, /* properties_destroyed */
1953 0, /* todo_flags_start */
1954 TODO_update_ssa
, /* todo_flags_finish */
1957 class pass_store_merging
: public gimple_opt_pass
1960 pass_store_merging (gcc::context
*ctxt
)
1961 : gimple_opt_pass (pass_data_tree_store_merging
, ctxt
), m_stores_head ()
1965 /* Pass not supported for PDP-endianness, nor for insane hosts
1966 or target character sizes where native_{encode,interpret}_expr
1967 doesn't work properly. */
1971 return flag_store_merging
1972 && WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
1974 && BITS_PER_UNIT
== 8;
1977 virtual unsigned int execute (function
*);
1980 hash_map
<tree_operand_hash
, struct imm_store_chain_info
*> m_stores
;
1982 /* Form a doubly-linked stack of the elements of m_stores, so that
1983 we can iterate over them in a predictable way. Using this order
1984 avoids extraneous differences in the compiler output just because
1985 of tree pointer variations (e.g. different chains end up in
1986 different positions of m_stores, so they are handled in different
1987 orders, so they allocate or release SSA names in different
1988 orders, and when they get reused, subsequent passes end up
1989 getting different SSA names, which may ultimately change
1990 decisions when going out of SSA). */
1991 imm_store_chain_info
*m_stores_head
;
1993 void process_store (gimple
*);
1994 bool terminate_and_process_all_chains ();
1995 bool terminate_all_aliasing_chains (imm_store_chain_info
**, gimple
*);
1996 bool terminate_and_release_chain (imm_store_chain_info
*);
1997 }; // class pass_store_merging
1999 /* Terminate and process all recorded chains. Return true if any changes
2003 pass_store_merging::terminate_and_process_all_chains ()
2006 while (m_stores_head
)
2007 ret
|= terminate_and_release_chain (m_stores_head
);
2008 gcc_assert (m_stores
.elements () == 0);
2009 gcc_assert (m_stores_head
== NULL
);
2014 /* Terminate all chains that are affected by the statement STMT.
2015 CHAIN_INFO is the chain we should ignore from the checks if
2019 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
2025 /* If the statement doesn't touch memory it can't alias. */
2026 if (!gimple_vuse (stmt
))
2029 tree store_lhs
= gimple_store_p (stmt
) ? gimple_get_lhs (stmt
) : NULL_TREE
;
2030 for (imm_store_chain_info
*next
= m_stores_head
, *cur
= next
; cur
; cur
= next
)
2034 /* We already checked all the stores in chain_info and terminated the
2035 chain if necessary. Skip it here. */
2036 if (chain_info
&& *chain_info
== cur
)
2039 store_immediate_info
*info
;
2041 FOR_EACH_VEC_ELT (cur
->m_store_info
, i
, info
)
2043 tree lhs
= gimple_assign_lhs (info
->stmt
);
2044 if (ref_maybe_used_by_stmt_p (stmt
, lhs
)
2045 || stmt_may_clobber_ref_p (stmt
, lhs
)
2046 || (store_lhs
&& refs_output_dependent_p (store_lhs
, lhs
)))
2048 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2050 fprintf (dump_file
, "stmt causes chain termination:\n");
2051 print_gimple_stmt (dump_file
, stmt
, 0);
2053 terminate_and_release_chain (cur
);
2063 /* Helper function. Terminate the recorded chain storing to base object
2064 BASE. Return true if the merging and output was successful. The m_stores
2065 entry is removed after the processing in any case. */
2068 pass_store_merging::terminate_and_release_chain (imm_store_chain_info
*chain_info
)
2070 bool ret
= chain_info
->terminate_and_process_chain ();
2071 m_stores
.remove (chain_info
->base_addr
);
2076 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2077 may clobber REF. FIRST and LAST must be in the same basic block and
2078 have non-NULL vdef. */
2081 stmts_may_clobber_ref_p (gimple
*first
, gimple
*last
, tree ref
)
2084 ao_ref_init (&r
, ref
);
2085 unsigned int count
= 0;
2086 tree vop
= gimple_vdef (last
);
2089 gcc_checking_assert (gimple_bb (first
) == gimple_bb (last
));
2092 stmt
= SSA_NAME_DEF_STMT (vop
);
2093 if (stmt_may_clobber_ref_p_1 (stmt
, &r
))
2095 /* Avoid quadratic compile time by bounding the number of checks
2097 if (++count
> MAX_STORE_ALIAS_CHECKS
)
2099 vop
= gimple_vuse (stmt
);
2101 while (stmt
!= first
);
2105 /* Return true if INFO->ops[IDX] is mergeable with the
2106 corresponding loads already in MERGED_STORE group.
2107 BASE_ADDR is the base address of the whole store group. */
2110 compatible_load_p (merged_store_group
*merged_store
,
2111 store_immediate_info
*info
,
2112 tree base_addr
, int idx
)
2114 store_immediate_info
*infof
= merged_store
->stores
[0];
2115 if (!info
->ops
[idx
].base_addr
2116 || (info
->ops
[idx
].bitpos
- infof
->ops
[idx
].bitpos
2117 != info
->bitpos
- infof
->bitpos
)
2118 || !operand_equal_p (info
->ops
[idx
].base_addr
,
2119 infof
->ops
[idx
].base_addr
, 0))
2122 store_immediate_info
*infol
= merged_store
->stores
.last ();
2123 tree load_vuse
= gimple_vuse (info
->ops
[idx
].stmt
);
2124 /* In this case all vuses should be the same, e.g.
2125 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2127 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2128 and we can emit the coalesced load next to any of those loads. */
2129 if (gimple_vuse (infof
->ops
[idx
].stmt
) == load_vuse
2130 && gimple_vuse (infol
->ops
[idx
].stmt
) == load_vuse
)
2133 /* Otherwise, at least for now require that the load has the same
2134 vuse as the store. See following examples. */
2135 if (gimple_vuse (info
->stmt
) != load_vuse
)
2138 if (gimple_vuse (infof
->stmt
) != gimple_vuse (infof
->ops
[idx
].stmt
)
2140 && gimple_vuse (infol
->stmt
) != gimple_vuse (infol
->ops
[idx
].stmt
)))
2143 /* If the load is from the same location as the store, already
2144 the construction of the immediate chain info guarantees no intervening
2145 stores, so no further checks are needed. Example:
2146 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2147 if (info
->ops
[idx
].bitpos
== info
->bitpos
2148 && operand_equal_p (info
->ops
[idx
].base_addr
, base_addr
, 0))
2151 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2152 of the stores in the group, or any other stores in between those.
2153 Previous calls to compatible_load_p ensured that for all the
2154 merged_store->stores IDX loads, no stmts starting with
2155 merged_store->first_stmt and ending right before merged_store->last_stmt
2156 clobbers those loads. */
2157 gimple
*first
= merged_store
->first_stmt
;
2158 gimple
*last
= merged_store
->last_stmt
;
2160 store_immediate_info
*infoc
;
2161 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2162 comes before the so far first load, we'll be changing
2163 merged_store->first_stmt. In that case we need to give up if
2164 any of the earlier processed loads clobber with the stmts in the new
2166 if (info
->order
< merged_store
->first_order
)
2168 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2169 if (stmts_may_clobber_ref_p (info
->stmt
, first
, infoc
->ops
[idx
].val
))
2173 /* Similarly, we could change merged_store->last_stmt, so ensure
2174 in that case no stmts in the new range clobber any of the earlier
2176 else if (info
->order
> merged_store
->last_order
)
2178 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2179 if (stmts_may_clobber_ref_p (last
, info
->stmt
, infoc
->ops
[idx
].val
))
2183 /* And finally, we'd be adding a new load to the set, ensure it isn't
2184 clobbered in the new range. */
2185 if (stmts_may_clobber_ref_p (first
, last
, info
->ops
[idx
].val
))
2188 /* Otherwise, we are looking for:
2189 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2191 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2195 /* Go through the candidate stores recorded in m_store_info and merge them
2196 into merged_store_group objects recorded into m_merged_store_groups
2197 representing the widened stores. Return true if coalescing was successful
2198 and the number of widened stores is fewer than the original number
2202 imm_store_chain_info::coalesce_immediate_stores ()
2204 /* Anything less can't be processed. */
2205 if (m_store_info
.length () < 2)
2208 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2209 fprintf (dump_file
, "Attempting to coalesce %u stores in chain.\n",
2210 m_store_info
.length ());
2212 store_immediate_info
*info
;
2215 /* Order the stores by the bitposition they write to. */
2216 m_store_info
.qsort (sort_by_bitpos
);
2218 info
= m_store_info
[0];
2219 merged_store_group
*merged_store
= new merged_store_group (info
);
2221 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
2223 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2225 fprintf (dump_file
, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
2226 " bitpos:" HOST_WIDE_INT_PRINT_DEC
" val:\n",
2227 i
, info
->bitsize
, info
->bitpos
);
2228 print_generic_expr (dump_file
, gimple_assign_rhs1 (info
->stmt
));
2229 fprintf (dump_file
, "\n------------\n");
2237 Overlapping stores. */
2238 unsigned HOST_WIDE_INT start
= info
->bitpos
;
2239 if (IN_RANGE (start
, merged_store
->start
,
2240 merged_store
->start
+ merged_store
->width
- 1))
2242 /* Only allow overlapping stores of constants. */
2243 if (info
->rhs_code
== INTEGER_CST
2244 && merged_store
->stores
[0]->rhs_code
== INTEGER_CST
)
2246 merged_store
->merge_overlapping (info
);
2250 /* |---store 1---||---store 2---|
2251 This store is consecutive to the previous one.
2252 Merge it into the current store group. There can be gaps in between
2253 the stores, but there can't be gaps in between bitregions. */
2254 else if (info
->bitregion_start
<= merged_store
->bitregion_end
2255 && info
->rhs_code
== merged_store
->stores
[0]->rhs_code
)
2257 store_immediate_info
*infof
= merged_store
->stores
[0];
2259 /* All the rhs_code ops that take 2 operands are commutative,
2260 swap the operands if it could make the operands compatible. */
2261 if (infof
->ops
[0].base_addr
2262 && infof
->ops
[1].base_addr
2263 && info
->ops
[0].base_addr
2264 && info
->ops
[1].base_addr
2265 && (info
->ops
[1].bitpos
- infof
->ops
[0].bitpos
2266 == info
->bitpos
- infof
->bitpos
)
2267 && operand_equal_p (info
->ops
[1].base_addr
,
2268 infof
->ops
[0].base_addr
, 0))
2270 std::swap (info
->ops
[0], info
->ops
[1]);
2271 info
->ops_swapped_p
= true;
2273 if ((infof
->ops
[0].base_addr
2274 ? compatible_load_p (merged_store
, info
, base_addr
, 0)
2275 : !info
->ops
[0].base_addr
)
2276 && (infof
->ops
[1].base_addr
2277 ? compatible_load_p (merged_store
, info
, base_addr
, 1)
2278 : !info
->ops
[1].base_addr
))
2280 merged_store
->merge_into (info
);
2285 /* |---store 1---| <gap> |---store 2---|.
2286 Gap between stores or the rhs not compatible. Start a new group. */
2288 /* Try to apply all the stores recorded for the group to determine
2289 the bitpattern they write and discard it if that fails.
2290 This will also reject single-store groups. */
2291 if (!merged_store
->apply_stores ())
2292 delete merged_store
;
2294 m_merged_store_groups
.safe_push (merged_store
);
2296 merged_store
= new merged_store_group (info
);
2299 /* Record or discard the last store group. */
2300 if (!merged_store
->apply_stores ())
2301 delete merged_store
;
2303 m_merged_store_groups
.safe_push (merged_store
);
2305 gcc_assert (m_merged_store_groups
.length () <= m_store_info
.length ());
2307 = !m_merged_store_groups
.is_empty ()
2308 && m_merged_store_groups
.length () < m_store_info
.length ();
2310 if (success
&& dump_file
)
2311 fprintf (dump_file
, "Coalescing successful!\n"
2312 "Merged into %u stores\n",
2313 m_merged_store_groups
.length ());
2318 /* Return the type to use for the merged stores or loads described by STMTS.
2319 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
2320 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
2321 of the MEM_REFs if any. */
2324 get_alias_type_for_stmts (vec
<gimple
*> &stmts
, bool is_load
,
2325 unsigned short *cliquep
, unsigned short *basep
)
2329 tree type
= NULL_TREE
;
2330 tree ret
= NULL_TREE
;
2334 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
2336 tree ref
= is_load
? gimple_assign_rhs1 (stmt
)
2337 : gimple_assign_lhs (stmt
);
2338 tree type1
= reference_alias_ptr_type (ref
);
2339 tree base
= get_base_address (ref
);
2343 if (TREE_CODE (base
) == MEM_REF
)
2345 *cliquep
= MR_DEPENDENCE_CLIQUE (base
);
2346 *basep
= MR_DEPENDENCE_BASE (base
);
2351 if (!alias_ptr_types_compatible_p (type
, type1
))
2352 ret
= ptr_type_node
;
2353 if (TREE_CODE (base
) != MEM_REF
2354 || *cliquep
!= MR_DEPENDENCE_CLIQUE (base
)
2355 || *basep
!= MR_DEPENDENCE_BASE (base
))
2364 /* Return the location_t information we can find among the statements
2368 get_location_for_stmts (vec
<gimple
*> &stmts
)
2373 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
2374 if (gimple_has_location (stmt
))
2375 return gimple_location (stmt
);
2377 return UNKNOWN_LOCATION
;
2380 /* Used to decribe a store resulting from splitting a wide store in smaller
2381 regularly-sized stores in split_group. */
2385 unsigned HOST_WIDE_INT bytepos
;
2386 unsigned HOST_WIDE_INT size
;
2387 unsigned HOST_WIDE_INT align
;
2388 auto_vec
<store_immediate_info
*> orig_stores
;
2389 /* True if there is a single orig stmt covering the whole split store. */
2391 split_store (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
2392 unsigned HOST_WIDE_INT
);
2395 /* Simple constructor. */
2397 split_store::split_store (unsigned HOST_WIDE_INT bp
,
2398 unsigned HOST_WIDE_INT sz
,
2399 unsigned HOST_WIDE_INT al
)
2400 : bytepos (bp
), size (sz
), align (al
), orig (false)
2402 orig_stores
.create (0);
2405 /* Record all stores in GROUP that write to the region starting at BITPOS and
2406 is of size BITSIZE. Record infos for such statements in STORES if
2407 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
2408 if there is exactly one original store in the range. */
2410 static store_immediate_info
*
2411 find_constituent_stores (struct merged_store_group
*group
,
2412 vec
<store_immediate_info
*> *stores
,
2413 unsigned int *first
,
2414 unsigned HOST_WIDE_INT bitpos
,
2415 unsigned HOST_WIDE_INT bitsize
)
2417 store_immediate_info
*info
, *ret
= NULL
;
2419 bool second
= false;
2420 bool update_first
= true;
2421 unsigned HOST_WIDE_INT end
= bitpos
+ bitsize
;
2422 for (i
= *first
; group
->stores
.iterate (i
, &info
); ++i
)
2424 unsigned HOST_WIDE_INT stmt_start
= info
->bitpos
;
2425 unsigned HOST_WIDE_INT stmt_end
= stmt_start
+ info
->bitsize
;
2426 if (stmt_end
<= bitpos
)
2428 /* BITPOS passed to this function never decreases from within the
2429 same split_group call, so optimize and don't scan info records
2430 which are known to end before or at BITPOS next time.
2431 Only do it if all stores before this one also pass this. */
2437 update_first
= false;
2439 /* The stores in GROUP are ordered by bitposition so if we're past
2440 the region for this group return early. */
2441 if (stmt_start
>= end
)
2446 stores
->safe_push (info
);
2461 /* Return how many SSA_NAMEs used to compute value to store in the INFO
2462 store have multiple uses. If any SSA_NAME has multiple uses, also
2463 count statements needed to compute it. */
2466 count_multiple_uses (store_immediate_info
*info
)
2468 gimple
*stmt
= info
->stmt
;
2470 switch (info
->rhs_code
)
2477 if (info
->bit_not_p
)
2479 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2480 ret
= 1; /* Fall through below to return
2481 the BIT_NOT_EXPR stmt and then
2482 BIT_{AND,IOR,XOR}_EXPR and anything it
2485 /* stmt is after this the BIT_NOT_EXPR. */
2486 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
2488 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2490 ret
+= 1 + info
->ops
[0].bit_not_p
;
2491 if (info
->ops
[1].base_addr
)
2492 ret
+= 1 + info
->ops
[1].bit_not_p
;
2495 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
2496 /* stmt is now the BIT_*_EXPR. */
2497 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2498 ret
+= 1 + info
->ops
[info
->ops_swapped_p
].bit_not_p
;
2499 else if (info
->ops
[info
->ops_swapped_p
].bit_not_p
)
2501 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
2502 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
2505 if (info
->ops
[1].base_addr
== NULL_TREE
)
2507 gcc_checking_assert (!info
->ops_swapped_p
);
2510 if (!has_single_use (gimple_assign_rhs2 (stmt
)))
2511 ret
+= 1 + info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
;
2512 else if (info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
)
2514 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
2515 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
2520 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2521 return 1 + info
->ops
[0].bit_not_p
;
2522 else if (info
->ops
[0].bit_not_p
)
2524 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
2525 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2534 /* Split a merged store described by GROUP by populating the SPLIT_STORES
2535 vector (if non-NULL) with split_store structs describing the byte offset
2536 (from the base), the bit size and alignment of each store as well as the
2537 original statements involved in each such split group.
2538 This is to separate the splitting strategy from the statement
2539 building/emission/linking done in output_merged_store.
2540 Return number of new stores.
2541 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
2542 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
2543 If SPLIT_STORES is NULL, it is just a dry run to count number of
2547 split_group (merged_store_group
*group
, bool allow_unaligned_store
,
2548 bool allow_unaligned_load
,
2549 vec
<struct split_store
*> *split_stores
,
2550 unsigned *total_orig
,
2551 unsigned *total_new
)
2553 unsigned HOST_WIDE_INT pos
= group
->bitregion_start
;
2554 unsigned HOST_WIDE_INT size
= group
->bitregion_end
- pos
;
2555 unsigned HOST_WIDE_INT bytepos
= pos
/ BITS_PER_UNIT
;
2556 unsigned HOST_WIDE_INT group_align
= group
->align
;
2557 unsigned HOST_WIDE_INT align_base
= group
->align_base
;
2558 unsigned HOST_WIDE_INT group_load_align
= group_align
;
2559 bool any_orig
= false;
2561 gcc_assert ((size
% BITS_PER_UNIT
== 0) && (pos
% BITS_PER_UNIT
== 0));
2563 unsigned int ret
= 0, first
= 0;
2564 unsigned HOST_WIDE_INT try_pos
= bytepos
;
2565 group
->stores
.qsort (sort_by_bitpos
);
2570 store_immediate_info
*info
= group
->stores
[0];
2573 total_orig
[0] = 1; /* The orig store. */
2574 info
= group
->stores
[0];
2575 if (info
->ops
[0].base_addr
)
2577 if (info
->ops
[1].base_addr
)
2579 switch (info
->rhs_code
)
2584 total_orig
[0]++; /* The orig BIT_*_EXPR stmt. */
2589 total_orig
[0] *= group
->stores
.length ();
2591 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
2593 total_new
[0] += count_multiple_uses (info
);
2594 total_orig
[0] += (info
->bit_not_p
2595 + info
->ops
[0].bit_not_p
2596 + info
->ops
[1].bit_not_p
);
2600 if (!allow_unaligned_load
)
2601 for (int i
= 0; i
< 2; ++i
)
2602 if (group
->load_align
[i
])
2603 group_load_align
= MIN (group_load_align
, group
->load_align
[i
]);
2607 if ((allow_unaligned_store
|| group_align
<= BITS_PER_UNIT
)
2608 && group
->mask
[try_pos
- bytepos
] == (unsigned char) ~0U)
2610 /* Skip padding bytes. */
2612 size
-= BITS_PER_UNIT
;
2616 unsigned HOST_WIDE_INT try_bitpos
= try_pos
* BITS_PER_UNIT
;
2617 unsigned int try_size
= MAX_STORE_BITSIZE
, nonmasked
;
2618 unsigned HOST_WIDE_INT align_bitpos
2619 = (try_bitpos
- align_base
) & (group_align
- 1);
2620 unsigned HOST_WIDE_INT align
= group_align
;
2622 align
= least_bit_hwi (align_bitpos
);
2623 if (!allow_unaligned_store
)
2624 try_size
= MIN (try_size
, align
);
2625 if (!allow_unaligned_load
)
2627 /* If we can't do or don't want to do unaligned stores
2628 as well as loads, we need to take the loads into account
2630 unsigned HOST_WIDE_INT load_align
= group_load_align
;
2631 align_bitpos
= (try_bitpos
- align_base
) & (load_align
- 1);
2633 load_align
= least_bit_hwi (align_bitpos
);
2634 for (int i
= 0; i
< 2; ++i
)
2635 if (group
->load_align
[i
])
2637 align_bitpos
= try_bitpos
- group
->stores
[0]->bitpos
;
2638 align_bitpos
+= group
->stores
[0]->ops
[i
].bitpos
;
2639 align_bitpos
-= group
->load_align_base
[i
];
2640 align_bitpos
&= (group_load_align
- 1);
2643 unsigned HOST_WIDE_INT a
= least_bit_hwi (align_bitpos
);
2644 load_align
= MIN (load_align
, a
);
2647 try_size
= MIN (try_size
, load_align
);
2649 store_immediate_info
*info
2650 = find_constituent_stores (group
, NULL
, &first
, try_bitpos
, try_size
);
2653 /* If there is just one original statement for the range, see if
2654 we can just reuse the original store which could be even larger
2656 unsigned HOST_WIDE_INT stmt_end
2657 = ROUND_UP (info
->bitpos
+ info
->bitsize
, BITS_PER_UNIT
);
2658 info
= find_constituent_stores (group
, NULL
, &first
, try_bitpos
,
2659 stmt_end
- try_bitpos
);
2660 if (info
&& info
->bitpos
>= try_bitpos
)
2662 try_size
= stmt_end
- try_bitpos
;
2667 /* Approximate store bitsize for the case when there are no padding
2669 while (try_size
> size
)
2671 /* Now look for whole padding bytes at the end of that bitsize. */
2672 for (nonmasked
= try_size
/ BITS_PER_UNIT
; nonmasked
> 0; --nonmasked
)
2673 if (group
->mask
[try_pos
- bytepos
+ nonmasked
- 1]
2674 != (unsigned char) ~0U)
2678 /* If entire try_size range is padding, skip it. */
2679 try_pos
+= try_size
/ BITS_PER_UNIT
;
2683 /* Otherwise try to decrease try_size if second half, last 3 quarters
2684 etc. are padding. */
2685 nonmasked
*= BITS_PER_UNIT
;
2686 while (nonmasked
<= try_size
/ 2)
2688 if (!allow_unaligned_store
&& group_align
> BITS_PER_UNIT
)
2690 /* Now look for whole padding bytes at the start of that bitsize. */
2691 unsigned int try_bytesize
= try_size
/ BITS_PER_UNIT
, masked
;
2692 for (masked
= 0; masked
< try_bytesize
; ++masked
)
2693 if (group
->mask
[try_pos
- bytepos
+ masked
] != (unsigned char) ~0U)
2695 masked
*= BITS_PER_UNIT
;
2696 gcc_assert (masked
< try_size
);
2697 if (masked
>= try_size
/ 2)
2699 while (masked
>= try_size
/ 2)
2702 try_pos
+= try_size
/ BITS_PER_UNIT
;
2706 /* Need to recompute the alignment, so just retry at the new
2717 struct split_store
*store
2718 = new split_store (try_pos
, try_size
, align
);
2719 info
= find_constituent_stores (group
, &store
->orig_stores
,
2720 &first
, try_bitpos
, try_size
);
2722 && info
->bitpos
>= try_bitpos
2723 && info
->bitpos
+ info
->bitsize
<= try_bitpos
+ try_size
)
2728 split_stores
->safe_push (store
);
2731 try_pos
+= try_size
/ BITS_PER_UNIT
;
2738 struct split_store
*store
;
2739 /* If we are reusing some original stores and any of the
2740 original SSA_NAMEs had multiple uses, we need to subtract
2741 those now before we add the new ones. */
2742 if (total_new
[0] && any_orig
)
2744 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
2746 total_new
[0] -= count_multiple_uses (store
->orig_stores
[0]);
2748 total_new
[0] += ret
; /* The new store. */
2749 store_immediate_info
*info
= group
->stores
[0];
2750 if (info
->ops
[0].base_addr
)
2751 total_new
[0] += ret
;
2752 if (info
->ops
[1].base_addr
)
2753 total_new
[0] += ret
;
2754 switch (info
->rhs_code
)
2759 total_new
[0] += ret
; /* The new BIT_*_EXPR stmt. */
2764 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
2767 bool bit_not_p
[3] = { false, false, false };
2768 /* If all orig_stores have certain bit_not_p set, then
2769 we'd use a BIT_NOT_EXPR stmt and need to account for it.
2770 If some orig_stores have certain bit_not_p set, then
2771 we'd use a BIT_XOR_EXPR with a mask and need to account for
2773 FOR_EACH_VEC_ELT (store
->orig_stores
, j
, info
)
2775 if (info
->ops
[0].bit_not_p
)
2776 bit_not_p
[0] = true;
2777 if (info
->ops
[1].bit_not_p
)
2778 bit_not_p
[1] = true;
2779 if (info
->bit_not_p
)
2780 bit_not_p
[2] = true;
2782 total_new
[0] += bit_not_p
[0] + bit_not_p
[1] + bit_not_p
[2];
2790 /* Return the operation through which the operand IDX (if < 2) or
2791 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
2792 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
2793 the bits should be xored with mask. */
2795 static enum tree_code
2796 invert_op (split_store
*split_store
, int idx
, tree int_type
, tree
&mask
)
2799 store_immediate_info
*info
;
2800 unsigned int cnt
= 0;
2801 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
2803 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
2810 if (cnt
== split_store
->orig_stores
.length ())
2811 return BIT_NOT_EXPR
;
2813 unsigned HOST_WIDE_INT try_bitpos
= split_store
->bytepos
* BITS_PER_UNIT
;
2814 unsigned buf_size
= split_store
->size
/ BITS_PER_UNIT
;
2816 = XALLOCAVEC (unsigned char, buf_size
);
2817 memset (buf
, ~0U, buf_size
);
2818 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
2820 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
2823 /* Clear regions with bit_not_p and invert afterwards, rather than
2824 clear regions with !bit_not_p, so that gaps in between stores aren't
2826 unsigned HOST_WIDE_INT bitsize
= info
->bitsize
;
2827 unsigned int pos_in_buffer
= 0;
2828 if (info
->bitpos
< try_bitpos
)
2830 gcc_assert (info
->bitpos
+ bitsize
> try_bitpos
);
2831 bitsize
-= (try_bitpos
- info
->bitpos
);
2834 pos_in_buffer
= info
->bitpos
- try_bitpos
;
2835 if (pos_in_buffer
+ bitsize
> split_store
->size
)
2836 bitsize
= split_store
->size
- pos_in_buffer
;
2837 unsigned char *p
= buf
+ (pos_in_buffer
/ BITS_PER_UNIT
);
2838 if (BYTES_BIG_ENDIAN
)
2839 clear_bit_region_be (p
, (BITS_PER_UNIT
- 1
2840 - (pos_in_buffer
% BITS_PER_UNIT
)), bitsize
);
2842 clear_bit_region (p
, pos_in_buffer
% BITS_PER_UNIT
, bitsize
);
2844 for (unsigned int i
= 0; i
< buf_size
; ++i
)
2846 mask
= native_interpret_expr (int_type
, buf
, buf_size
);
2847 return BIT_XOR_EXPR
;
2850 /* Given a merged store group GROUP output the widened version of it.
2851 The store chain is against the base object BASE.
2852 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
2853 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
2854 Make sure that the number of statements output is less than the number of
2855 original statements. If a better sequence is possible emit it and
2859 imm_store_chain_info::output_merged_store (merged_store_group
*group
)
2861 unsigned HOST_WIDE_INT start_byte_pos
2862 = group
->bitregion_start
/ BITS_PER_UNIT
;
2864 unsigned int orig_num_stmts
= group
->stores
.length ();
2865 if (orig_num_stmts
< 2)
2868 auto_vec
<struct split_store
*, 32> split_stores
;
2869 split_stores
.create (0);
2870 bool allow_unaligned_store
2871 = !STRICT_ALIGNMENT
&& PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED
);
2872 bool allow_unaligned_load
= allow_unaligned_store
;
2873 if (allow_unaligned_store
)
2875 /* If unaligned stores are allowed, see how many stores we'd emit
2876 for unaligned and how many stores we'd emit for aligned stores.
2877 Only use unaligned stores if it allows fewer stores than aligned. */
2878 unsigned aligned_cnt
2879 = split_group (group
, false, allow_unaligned_load
, NULL
, NULL
, NULL
);
2880 unsigned unaligned_cnt
2881 = split_group (group
, true, allow_unaligned_load
, NULL
, NULL
, NULL
);
2882 if (aligned_cnt
<= unaligned_cnt
)
2883 allow_unaligned_store
= false;
2885 unsigned total_orig
, total_new
;
2886 split_group (group
, allow_unaligned_store
, allow_unaligned_load
,
2887 &split_stores
, &total_orig
, &total_new
);
2889 if (split_stores
.length () >= orig_num_stmts
)
2891 /* We didn't manage to reduce the number of statements. Bail out. */
2892 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2893 fprintf (dump_file
, "Exceeded original number of stmts (%u)."
2894 " Not profitable to emit new sequence.\n",
2898 if (total_orig
<= total_new
)
2900 /* If number of estimated new statements is above estimated original
2901 statements, bail out too. */
2902 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2903 fprintf (dump_file
, "Estimated number of original stmts (%u)"
2904 " not larger than estimated number of new"
2906 total_orig
, total_new
);
2909 gimple_stmt_iterator last_gsi
= gsi_for_stmt (group
->last_stmt
);
2910 gimple_seq seq
= NULL
;
2911 tree last_vdef
, new_vuse
;
2912 last_vdef
= gimple_vdef (group
->last_stmt
);
2913 new_vuse
= gimple_vuse (group
->last_stmt
);
2915 gimple
*stmt
= NULL
;
2916 split_store
*split_store
;
2918 auto_vec
<gimple
*, 32> orig_stmts
;
2919 tree addr
= force_gimple_operand_1 (unshare_expr (base_addr
), &seq
,
2920 is_gimple_mem_ref_addr
, NULL_TREE
);
2922 tree load_addr
[2] = { NULL_TREE
, NULL_TREE
};
2923 gimple_seq load_seq
[2] = { NULL
, NULL
};
2924 gimple_stmt_iterator load_gsi
[2] = { gsi_none (), gsi_none () };
2925 for (int j
= 0; j
< 2; ++j
)
2927 store_operand_info
&op
= group
->stores
[0]->ops
[j
];
2928 if (op
.base_addr
== NULL_TREE
)
2931 store_immediate_info
*infol
= group
->stores
.last ();
2932 if (gimple_vuse (op
.stmt
) == gimple_vuse (infol
->ops
[j
].stmt
))
2934 load_gsi
[j
] = gsi_for_stmt (op
.stmt
);
2936 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
2937 &load_seq
[j
], is_gimple_mem_ref_addr
,
2940 else if (operand_equal_p (base_addr
, op
.base_addr
, 0))
2941 load_addr
[j
] = addr
;
2944 gimple_seq this_seq
;
2946 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
2947 &this_seq
, is_gimple_mem_ref_addr
,
2949 gimple_seq_add_seq_without_update (&seq
, this_seq
);
2953 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
2955 unsigned HOST_WIDE_INT try_size
= split_store
->size
;
2956 unsigned HOST_WIDE_INT try_pos
= split_store
->bytepos
;
2957 unsigned HOST_WIDE_INT align
= split_store
->align
;
2960 if (split_store
->orig
)
2962 /* If there is just a single constituent store which covers
2963 the whole area, just reuse the lhs and rhs. */
2964 gimple
*orig_stmt
= split_store
->orig_stores
[0]->stmt
;
2965 dest
= gimple_assign_lhs (orig_stmt
);
2966 src
= gimple_assign_rhs1 (orig_stmt
);
2967 loc
= gimple_location (orig_stmt
);
2971 store_immediate_info
*info
;
2972 unsigned short clique
, base
;
2974 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
2975 orig_stmts
.safe_push (info
->stmt
);
2977 = get_alias_type_for_stmts (orig_stmts
, false, &clique
, &base
);
2978 loc
= get_location_for_stmts (orig_stmts
);
2979 orig_stmts
.truncate (0);
2981 tree int_type
= build_nonstandard_integer_type (try_size
, UNSIGNED
);
2982 int_type
= build_aligned_type (int_type
, align
);
2983 dest
= fold_build2 (MEM_REF
, int_type
, addr
,
2984 build_int_cst (offset_type
, try_pos
));
2985 if (TREE_CODE (dest
) == MEM_REF
)
2987 MR_DEPENDENCE_CLIQUE (dest
) = clique
;
2988 MR_DEPENDENCE_BASE (dest
) = base
;
2992 = native_interpret_expr (int_type
,
2993 group
->mask
+ try_pos
- start_byte_pos
,
2998 j
< 1 + (split_store
->orig_stores
[0]->ops
[1].val
!= NULL_TREE
);
3001 store_operand_info
&op
= split_store
->orig_stores
[0]->ops
[j
];
3004 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3005 orig_stmts
.safe_push (info
->ops
[j
].stmt
);
3007 offset_type
= get_alias_type_for_stmts (orig_stmts
, true,
3009 location_t load_loc
= get_location_for_stmts (orig_stmts
);
3010 orig_stmts
.truncate (0);
3012 unsigned HOST_WIDE_INT load_align
= group
->load_align
[j
];
3013 unsigned HOST_WIDE_INT align_bitpos
3014 = (try_pos
* BITS_PER_UNIT
3015 - split_store
->orig_stores
[0]->bitpos
3016 + op
.bitpos
) & (load_align
- 1);
3018 load_align
= least_bit_hwi (align_bitpos
);
3021 = build_nonstandard_integer_type (try_size
, UNSIGNED
);
3023 = build_aligned_type (load_int_type
, load_align
);
3025 unsigned HOST_WIDE_INT load_pos
3026 = (try_pos
* BITS_PER_UNIT
3027 - split_store
->orig_stores
[0]->bitpos
3028 + op
.bitpos
) / BITS_PER_UNIT
;
3029 ops
[j
] = fold_build2 (MEM_REF
, load_int_type
, load_addr
[j
],
3030 build_int_cst (offset_type
, load_pos
));
3031 if (TREE_CODE (ops
[j
]) == MEM_REF
)
3033 MR_DEPENDENCE_CLIQUE (ops
[j
]) = clique
;
3034 MR_DEPENDENCE_BASE (ops
[j
]) = base
;
3036 if (!integer_zerop (mask
))
3037 /* The load might load some bits (that will be masked off
3038 later on) uninitialized, avoid -W*uninitialized
3039 warnings in that case. */
3040 TREE_NO_WARNING (ops
[j
]) = 1;
3042 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3044 gimple_set_location (stmt
, load_loc
);
3045 if (gsi_bb (load_gsi
[j
]))
3047 gimple_set_vuse (stmt
, gimple_vuse (op
.stmt
));
3048 gimple_seq_add_stmt_without_update (&load_seq
[j
], stmt
);
3052 gimple_set_vuse (stmt
, new_vuse
);
3053 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3055 ops
[j
] = gimple_assign_lhs (stmt
);
3057 enum tree_code inv_op
3058 = invert_op (split_store
, j
, int_type
, xor_mask
);
3059 if (inv_op
!= NOP_EXPR
)
3061 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3062 inv_op
, ops
[j
], xor_mask
);
3063 gimple_set_location (stmt
, load_loc
);
3064 ops
[j
] = gimple_assign_lhs (stmt
);
3066 if (gsi_bb (load_gsi
[j
]))
3067 gimple_seq_add_stmt_without_update (&load_seq
[j
],
3070 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3074 ops
[j
] = native_interpret_expr (int_type
,
3075 group
->val
+ try_pos
3080 switch (split_store
->orig_stores
[0]->rhs_code
)
3085 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3087 tree rhs1
= gimple_assign_rhs1 (info
->stmt
);
3088 orig_stmts
.safe_push (SSA_NAME_DEF_STMT (rhs1
));
3091 bit_loc
= get_location_for_stmts (orig_stmts
);
3092 orig_stmts
.truncate (0);
3095 = gimple_build_assign (make_ssa_name (int_type
),
3096 split_store
->orig_stores
[0]->rhs_code
,
3098 gimple_set_location (stmt
, bit_loc
);
3099 /* If there is just one load and there is a separate
3100 load_seq[0], emit the bitwise op right after it. */
3101 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
3102 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
3103 /* Otherwise, if at least one load is in seq, we need to
3104 emit the bitwise op right before the store. If there
3105 are two loads and are emitted somewhere else, it would
3106 be better to emit the bitwise op as early as possible;
3107 we don't track where that would be possible right now
3110 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3111 src
= gimple_assign_lhs (stmt
);
3113 enum tree_code inv_op
;
3114 inv_op
= invert_op (split_store
, 2, int_type
, xor_mask
);
3115 if (inv_op
!= NOP_EXPR
)
3117 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3118 inv_op
, src
, xor_mask
);
3119 gimple_set_location (stmt
, bit_loc
);
3120 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
3121 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
3123 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3124 src
= gimple_assign_lhs (stmt
);
3132 if (!integer_zerop (mask
))
3134 tree tem
= make_ssa_name (int_type
);
3135 tree load_src
= unshare_expr (dest
);
3136 /* The load might load some or all bits uninitialized,
3137 avoid -W*uninitialized warnings in that case.
3138 As optimization, it would be nice if all the bits are
3139 provably uninitialized (no stores at all yet or previous
3140 store a CLOBBER) we'd optimize away the load and replace
3142 TREE_NO_WARNING (load_src
) = 1;
3143 stmt
= gimple_build_assign (tem
, load_src
);
3144 gimple_set_location (stmt
, loc
);
3145 gimple_set_vuse (stmt
, new_vuse
);
3146 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3148 /* FIXME: If there is a single chunk of zero bits in mask,
3149 perhaps use BIT_INSERT_EXPR instead? */
3150 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3151 BIT_AND_EXPR
, tem
, mask
);
3152 gimple_set_location (stmt
, loc
);
3153 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3154 tem
= gimple_assign_lhs (stmt
);
3156 if (TREE_CODE (src
) == INTEGER_CST
)
3157 src
= wide_int_to_tree (int_type
,
3158 wi::bit_and_not (wi::to_wide (src
),
3159 wi::to_wide (mask
)));
3163 = wide_int_to_tree (int_type
,
3164 wi::bit_not (wi::to_wide (mask
)));
3165 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3166 BIT_AND_EXPR
, src
, nmask
);
3167 gimple_set_location (stmt
, loc
);
3168 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3169 src
= gimple_assign_lhs (stmt
);
3171 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3172 BIT_IOR_EXPR
, tem
, src
);
3173 gimple_set_location (stmt
, loc
);
3174 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3175 src
= gimple_assign_lhs (stmt
);
3179 stmt
= gimple_build_assign (dest
, src
);
3180 gimple_set_location (stmt
, loc
);
3181 gimple_set_vuse (stmt
, new_vuse
);
3182 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3185 if (i
< split_stores
.length () - 1)
3186 new_vdef
= make_ssa_name (gimple_vop (cfun
), stmt
);
3188 new_vdef
= last_vdef
;
3190 gimple_set_vdef (stmt
, new_vdef
);
3191 SSA_NAME_DEF_STMT (new_vdef
) = stmt
;
3192 new_vuse
= new_vdef
;
3195 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3202 "New sequence of %u stmts to replace old one of %u stmts\n",
3203 split_stores
.length (), orig_num_stmts
);
3204 if (dump_flags
& TDF_DETAILS
)
3205 print_gimple_seq (dump_file
, seq
, 0, TDF_VOPS
| TDF_MEMSYMS
);
3207 gsi_insert_seq_after (&last_gsi
, seq
, GSI_SAME_STMT
);
3208 for (int j
= 0; j
< 2; ++j
)
3210 gsi_insert_seq_after (&load_gsi
[j
], load_seq
[j
], GSI_SAME_STMT
);
3215 /* Process the merged_store_group objects created in the coalescing phase.
3216 The stores are all against the base object BASE.
3217 Try to output the widened stores and delete the original statements if
3218 successful. Return true iff any changes were made. */
3221 imm_store_chain_info::output_merged_stores ()
3224 merged_store_group
*merged_store
;
3226 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_store
)
3228 if (output_merged_store (merged_store
))
3231 store_immediate_info
*store
;
3232 FOR_EACH_VEC_ELT (merged_store
->stores
, j
, store
)
3234 gimple
*stmt
= store
->stmt
;
3235 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
3236 gsi_remove (&gsi
, true);
3237 if (stmt
!= merged_store
->last_stmt
)
3239 unlink_stmt_vdef (stmt
);
3240 release_defs (stmt
);
3246 if (ret
&& dump_file
)
3247 fprintf (dump_file
, "Merging successful!\n");
3252 /* Coalesce the store_immediate_info objects recorded against the base object
3253 BASE in the first phase and output them.
3254 Delete the allocated structures.
3255 Return true if any changes were made. */
3258 imm_store_chain_info::terminate_and_process_chain ()
3260 /* Process store chain. */
3262 if (m_store_info
.length () > 1)
3264 ret
= coalesce_immediate_stores ();
3266 ret
= output_merged_stores ();
3269 /* Delete all the entries we allocated ourselves. */
3270 store_immediate_info
*info
;
3272 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
3275 merged_store_group
*merged_info
;
3276 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_info
)
3282 /* Return true iff LHS is a destination potentially interesting for
3283 store merging. In practice these are the codes that get_inner_reference
3287 lhs_valid_for_store_merging_p (tree lhs
)
3289 tree_code code
= TREE_CODE (lhs
);
3291 if (code
== ARRAY_REF
|| code
== ARRAY_RANGE_REF
|| code
== MEM_REF
3292 || code
== COMPONENT_REF
|| code
== BIT_FIELD_REF
)
3298 /* Return true if the tree RHS is a constant we want to consider
3299 during store merging. In practice accept all codes that
3300 native_encode_expr accepts. */
3303 rhs_valid_for_store_merging_p (tree rhs
)
3305 return native_encode_expr (rhs
, NULL
,
3306 GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs
)))) != 0;
3309 /* If MEM is a memory reference usable for store merging (either as
3310 store destination or for loads), return the non-NULL base_addr
3311 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
3312 Otherwise return NULL, *PBITPOS should be still valid even for that
3316 mem_valid_for_store_merging (tree mem
, unsigned HOST_WIDE_INT
*pbitsize
,
3317 unsigned HOST_WIDE_INT
*pbitpos
,
3318 unsigned HOST_WIDE_INT
*pbitregion_start
,
3319 unsigned HOST_WIDE_INT
*pbitregion_end
)
3321 HOST_WIDE_INT bitsize
;
3322 HOST_WIDE_INT bitpos
;
3323 unsigned HOST_WIDE_INT bitregion_start
= 0;
3324 unsigned HOST_WIDE_INT bitregion_end
= 0;
3326 int unsignedp
= 0, reversep
= 0, volatilep
= 0;
3328 tree base_addr
= get_inner_reference (mem
, &bitsize
, &bitpos
, &offset
, &mode
,
3329 &unsignedp
, &reversep
, &volatilep
);
3330 *pbitsize
= bitsize
;
3334 if (TREE_CODE (mem
) == COMPONENT_REF
3335 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem
, 1)))
3337 get_bit_range (&bitregion_start
, &bitregion_end
, mem
, &bitpos
, &offset
);
3345 /* We do not want to rewrite TARGET_MEM_REFs. */
3346 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
3348 /* In some cases get_inner_reference may return a
3349 MEM_REF [ptr + byteoffset]. For the purposes of this pass
3350 canonicalize the base_addr to MEM_REF [ptr] and take
3351 byteoffset into account in the bitpos. This occurs in
3352 PR 23684 and this way we can catch more chains. */
3353 else if (TREE_CODE (base_addr
) == MEM_REF
)
3355 offset_int bit_off
, byte_off
= mem_ref_offset (base_addr
);
3356 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
3358 if (!wi::neg_p (bit_off
) && wi::fits_shwi_p (bit_off
))
3360 bitpos
= bit_off
.to_shwi ();
3363 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
3364 bit_off
+= bitregion_start
;
3365 if (wi::fits_uhwi_p (bit_off
))
3367 bitregion_start
= bit_off
.to_uhwi ();
3368 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
3369 bit_off
+= bitregion_end
;
3370 if (wi::fits_uhwi_p (bit_off
))
3371 bitregion_end
= bit_off
.to_uhwi ();
3381 base_addr
= TREE_OPERAND (base_addr
, 0);
3383 /* get_inner_reference returns the base object, get at its
3389 base_addr
= build_fold_addr_expr (base_addr
);
3394 bitregion_start
= ROUND_DOWN (bitpos
, BITS_PER_UNIT
);
3395 bitregion_end
= ROUND_UP (bitpos
+ bitsize
, BITS_PER_UNIT
);
3398 if (offset
!= NULL_TREE
)
3400 /* If the access is variable offset then a base decl has to be
3401 address-taken to be able to emit pointer-based stores to it.
3402 ??? We might be able to get away with re-using the original
3403 base up to the first variable part and then wrapping that inside
3405 tree base
= get_base_address (base_addr
);
3407 || (DECL_P (base
) && ! TREE_ADDRESSABLE (base
)))
3410 base_addr
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base_addr
),
3414 *pbitsize
= bitsize
;
3416 *pbitregion_start
= bitregion_start
;
3417 *pbitregion_end
= bitregion_end
;
3421 /* Return true if STMT is a load that can be used for store merging.
3422 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
3423 BITREGION_END are properties of the corresponding store. */
3426 handled_load (gimple
*stmt
, store_operand_info
*op
,
3427 unsigned HOST_WIDE_INT bitsize
, unsigned HOST_WIDE_INT bitpos
,
3428 unsigned HOST_WIDE_INT bitregion_start
,
3429 unsigned HOST_WIDE_INT bitregion_end
)
3431 if (!is_gimple_assign (stmt
))
3433 if (gimple_assign_rhs_code (stmt
) == BIT_NOT_EXPR
)
3435 tree rhs1
= gimple_assign_rhs1 (stmt
);
3436 if (TREE_CODE (rhs1
) == SSA_NAME
3437 && handled_load (SSA_NAME_DEF_STMT (rhs1
), op
, bitsize
, bitpos
,
3438 bitregion_start
, bitregion_end
))
3440 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
3441 been optimized earlier, but if allowed here, would confuse the
3442 multiple uses counting. */
3445 op
->bit_not_p
= !op
->bit_not_p
;
3450 if (gimple_vuse (stmt
)
3451 && gimple_assign_load_p (stmt
)
3452 && !stmt_can_throw_internal (stmt
)
3453 && !gimple_has_volatile_ops (stmt
))
3455 tree mem
= gimple_assign_rhs1 (stmt
);
3457 = mem_valid_for_store_merging (mem
, &op
->bitsize
, &op
->bitpos
,
3458 &op
->bitregion_start
,
3459 &op
->bitregion_end
);
3460 if (op
->base_addr
!= NULL_TREE
3461 && op
->bitsize
== bitsize
3462 && ((op
->bitpos
- bitpos
) % BITS_PER_UNIT
) == 0
3463 && op
->bitpos
- op
->bitregion_start
>= bitpos
- bitregion_start
3464 && op
->bitregion_end
- op
->bitpos
>= bitregion_end
- bitpos
)
3468 op
->bit_not_p
= false;
3475 /* Record the store STMT for store merging optimization if it can be
3479 pass_store_merging::process_store (gimple
*stmt
)
3481 tree lhs
= gimple_assign_lhs (stmt
);
3482 tree rhs
= gimple_assign_rhs1 (stmt
);
3483 unsigned HOST_WIDE_INT bitsize
, bitpos
;
3484 unsigned HOST_WIDE_INT bitregion_start
;
3485 unsigned HOST_WIDE_INT bitregion_end
;
3487 = mem_valid_for_store_merging (lhs
, &bitsize
, &bitpos
,
3488 &bitregion_start
, &bitregion_end
);
3492 bool invalid
= (base_addr
== NULL_TREE
3493 || ((bitsize
> MAX_BITSIZE_MODE_ANY_INT
)
3494 && (TREE_CODE (rhs
) != INTEGER_CST
)));
3495 enum tree_code rhs_code
= ERROR_MARK
;
3496 bool bit_not_p
= false;
3497 store_operand_info ops
[2];
3500 else if (rhs_valid_for_store_merging_p (rhs
))
3502 rhs_code
= INTEGER_CST
;
3505 else if (TREE_CODE (rhs
) != SSA_NAME
)
3509 gimple
*def_stmt
= SSA_NAME_DEF_STMT (rhs
), *def_stmt1
, *def_stmt2
;
3510 if (!is_gimple_assign (def_stmt
))
3512 else if (handled_load (def_stmt
, &ops
[0], bitsize
, bitpos
,
3513 bitregion_start
, bitregion_end
))
3515 else if (gimple_assign_rhs_code (def_stmt
) == BIT_NOT_EXPR
)
3517 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
3518 if (TREE_CODE (rhs1
) == SSA_NAME
3519 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1
)))
3522 def_stmt
= SSA_NAME_DEF_STMT (rhs1
);
3525 if (rhs_code
== ERROR_MARK
&& !invalid
)
3526 switch ((rhs_code
= gimple_assign_rhs_code (def_stmt
)))
3532 rhs1
= gimple_assign_rhs1 (def_stmt
);
3533 rhs2
= gimple_assign_rhs2 (def_stmt
);
3535 if (TREE_CODE (rhs1
) != SSA_NAME
)
3537 def_stmt1
= SSA_NAME_DEF_STMT (rhs1
);
3538 if (!is_gimple_assign (def_stmt1
)
3539 || !handled_load (def_stmt1
, &ops
[0], bitsize
, bitpos
,
3540 bitregion_start
, bitregion_end
))
3542 if (rhs_valid_for_store_merging_p (rhs2
))
3544 else if (TREE_CODE (rhs2
) != SSA_NAME
)
3548 def_stmt2
= SSA_NAME_DEF_STMT (rhs2
);
3549 if (!is_gimple_assign (def_stmt2
))
3551 else if (!handled_load (def_stmt2
, &ops
[1], bitsize
, bitpos
,
3552 bitregion_start
, bitregion_end
))
3565 terminate_all_aliasing_chains (NULL
, stmt
);
3569 struct imm_store_chain_info
**chain_info
= NULL
;
3571 chain_info
= m_stores
.get (base_addr
);
3573 store_immediate_info
*info
;
3576 unsigned int ord
= (*chain_info
)->m_store_info
.length ();
3577 info
= new store_immediate_info (bitsize
, bitpos
, bitregion_start
,
3578 bitregion_end
, stmt
, ord
, rhs_code
,
3579 bit_not_p
, ops
[0], ops
[1]);
3580 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3582 fprintf (dump_file
, "Recording immediate store from stmt:\n");
3583 print_gimple_stmt (dump_file
, stmt
, 0);
3585 (*chain_info
)->m_store_info
.safe_push (info
);
3586 terminate_all_aliasing_chains (chain_info
, stmt
);
3587 /* If we reach the limit of stores to merge in a chain terminate and
3588 process the chain now. */
3589 if ((*chain_info
)->m_store_info
.length ()
3590 == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE
))
3592 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3594 "Reached maximum number of statements to merge:\n");
3595 terminate_and_release_chain (*chain_info
);
3600 /* Store aliases any existing chain? */
3601 terminate_all_aliasing_chains (NULL
, stmt
);
3602 /* Start a new chain. */
3603 struct imm_store_chain_info
*new_chain
3604 = new imm_store_chain_info (m_stores_head
, base_addr
);
3605 info
= new store_immediate_info (bitsize
, bitpos
, bitregion_start
,
3606 bitregion_end
, stmt
, 0, rhs_code
,
3607 bit_not_p
, ops
[0], ops
[1]);
3608 new_chain
->m_store_info
.safe_push (info
);
3609 m_stores
.put (base_addr
, new_chain
);
3610 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3612 fprintf (dump_file
, "Starting new chain with statement:\n");
3613 print_gimple_stmt (dump_file
, stmt
, 0);
3614 fprintf (dump_file
, "The base object is:\n");
3615 print_generic_expr (dump_file
, base_addr
);
3616 fprintf (dump_file
, "\n");
3620 /* Entry point for the pass. Go over each basic block recording chains of
3621 immediate stores. Upon encountering a terminating statement (as defined
3622 by stmt_terminates_chain_p) process the recorded stores and emit the widened
3626 pass_store_merging::execute (function
*fun
)
3629 hash_set
<gimple
*> orig_stmts
;
3631 FOR_EACH_BB_FN (bb
, fun
)
3633 gimple_stmt_iterator gsi
;
3634 unsigned HOST_WIDE_INT num_statements
= 0;
3635 /* Record the original statements so that we can keep track of
3636 statements emitted in this pass and not re-process new
3638 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3640 if (is_gimple_debug (gsi_stmt (gsi
)))
3643 if (++num_statements
>= 2)
3647 if (num_statements
< 2)
3650 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3651 fprintf (dump_file
, "Processing basic block <%d>:\n", bb
->index
);
3653 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3655 gimple
*stmt
= gsi_stmt (gsi
);
3657 if (is_gimple_debug (stmt
))
3660 if (gimple_has_volatile_ops (stmt
))
3662 /* Terminate all chains. */
3663 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3664 fprintf (dump_file
, "Volatile access terminates "
3666 terminate_and_process_all_chains ();
3670 if (gimple_assign_single_p (stmt
) && gimple_vdef (stmt
)
3671 && !stmt_can_throw_internal (stmt
)
3672 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt
)))
3673 process_store (stmt
);
3675 terminate_all_aliasing_chains (NULL
, stmt
);
3677 terminate_and_process_all_chains ();
3684 /* Construct and return a store merging pass object. */
3687 make_pass_store_merging (gcc::context
*ctxt
)
3689 return new pass_store_merging (ctxt
);
3694 namespace selftest
{
3696 /* Selftests for store merging helpers. */
3698 /* Assert that all elements of the byte arrays X and Y, both of length N
3702 verify_array_eq (unsigned char *x
, unsigned char *y
, unsigned int n
)
3704 for (unsigned int i
= 0; i
< n
; i
++)
3708 fprintf (stderr
, "Arrays do not match. X:\n");
3709 dump_char_array (stderr
, x
, n
);
3710 fprintf (stderr
, "Y:\n");
3711 dump_char_array (stderr
, y
, n
);
3713 ASSERT_EQ (x
[i
], y
[i
]);
3717 /* Test shift_bytes_in_array and that it carries bits across between
3721 verify_shift_bytes_in_array (void)
3724 00011111 | 11100000. */
3725 unsigned char orig
[2] = { 0xe0, 0x1f };
3726 unsigned char in
[2];
3727 memcpy (in
, orig
, sizeof orig
);
3729 unsigned char expected
[2] = { 0x80, 0x7f };
3730 shift_bytes_in_array (in
, sizeof (in
), 2);
3731 verify_array_eq (in
, expected
, sizeof (in
));
3733 memcpy (in
, orig
, sizeof orig
);
3734 memcpy (expected
, orig
, sizeof orig
);
3735 /* Check that shifting by zero doesn't change anything. */
3736 shift_bytes_in_array (in
, sizeof (in
), 0);
3737 verify_array_eq (in
, expected
, sizeof (in
));
3741 /* Test shift_bytes_in_array_right and that it carries bits across between
3745 verify_shift_bytes_in_array_right (void)
3748 00011111 | 11100000. */
3749 unsigned char orig
[2] = { 0x1f, 0xe0};
3750 unsigned char in
[2];
3751 memcpy (in
, orig
, sizeof orig
);
3752 unsigned char expected
[2] = { 0x07, 0xf8};
3753 shift_bytes_in_array_right (in
, sizeof (in
), 2);
3754 verify_array_eq (in
, expected
, sizeof (in
));
3756 memcpy (in
, orig
, sizeof orig
);
3757 memcpy (expected
, orig
, sizeof orig
);
3758 /* Check that shifting by zero doesn't change anything. */
3759 shift_bytes_in_array_right (in
, sizeof (in
), 0);
3760 verify_array_eq (in
, expected
, sizeof (in
));
3763 /* Test clear_bit_region that it clears exactly the bits asked and
3767 verify_clear_bit_region (void)
3769 /* Start with all bits set and test clearing various patterns in them. */
3770 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
3771 unsigned char in
[3];
3772 unsigned char expected
[3];
3773 memcpy (in
, orig
, sizeof in
);
3775 /* Check zeroing out all the bits. */
3776 clear_bit_region (in
, 0, 3 * BITS_PER_UNIT
);
3777 expected
[0] = expected
[1] = expected
[2] = 0;
3778 verify_array_eq (in
, expected
, sizeof in
);
3780 memcpy (in
, orig
, sizeof in
);
3781 /* Leave the first and last bits intact. */
3782 clear_bit_region (in
, 1, 3 * BITS_PER_UNIT
- 2);
3786 verify_array_eq (in
, expected
, sizeof in
);
3789 /* Test verify_clear_bit_region_be that it clears exactly the bits asked and
3793 verify_clear_bit_region_be (void)
3795 /* Start with all bits set and test clearing various patterns in them. */
3796 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
3797 unsigned char in
[3];
3798 unsigned char expected
[3];
3799 memcpy (in
, orig
, sizeof in
);
3801 /* Check zeroing out all the bits. */
3802 clear_bit_region_be (in
, BITS_PER_UNIT
- 1, 3 * BITS_PER_UNIT
);
3803 expected
[0] = expected
[1] = expected
[2] = 0;
3804 verify_array_eq (in
, expected
, sizeof in
);
3806 memcpy (in
, orig
, sizeof in
);
3807 /* Leave the first and last bits intact. */
3808 clear_bit_region_be (in
, BITS_PER_UNIT
- 2, 3 * BITS_PER_UNIT
- 2);
3812 verify_array_eq (in
, expected
, sizeof in
);
3816 /* Run all of the selftests within this file. */
3819 store_merging_c_tests (void)
3821 verify_shift_bytes_in_array ();
3822 verify_shift_bytes_in_array_right ();
3823 verify_clear_bit_region ();
3824 verify_clear_bit_region_be ();
3827 } // namespace selftest
3828 #endif /* CHECKING_P. */