1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2009-2021 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
31 we can transform this into a single 4-byte store if the target supports it:
32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
56 The algorithm is applied to each basic block in three phases:
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
64 terminate such chains when appropriate (for example when the stored
65 values get used subsequently).
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
76 store_immediate_info objects) and coalesce contiguous stores into
77 merged_store_group objects. For bit-field stores, we don't need to
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
84 For example, given the stores:
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
143 #include "coretypes.h"
147 #include "builtins.h"
148 #include "fold-const.h"
149 #include "tree-pass.h"
151 #include "gimple-pretty-print.h"
153 #include "fold-const.h"
154 #include "print-tree.h"
155 #include "tree-hash-traits.h"
156 #include "gimple-iterator.h"
157 #include "gimplify.h"
158 #include "gimple-fold.h"
159 #include "stor-layout.h"
162 #include "cfgcleanup.h"
163 #include "tree-cfg.h"
167 #include "gimplify-me.h"
169 #include "expr.h" /* For get_bit_range. */
170 #include "optabs-tree.h"
172 #include "selftest.h"
174 /* The maximum size (in bits) of the stores this pass should generate. */
175 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
176 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
178 /* Limit to bound the number of aliasing checks for loads with the same
179 vuse as the corresponding store. */
180 #define MAX_STORE_ALIAS_CHECKS 64
186 /* Number of hand-written 16-bit nop / bswaps found. */
189 /* Number of hand-written 32-bit nop / bswaps found. */
192 /* Number of hand-written 64-bit nop / bswaps found. */
194 } nop_stats
, bswap_stats
;
196 /* A symbolic number structure is used to detect byte permutation and selection
197 patterns of a source. To achieve that, its field N contains an artificial
198 number consisting of BITS_PER_MARKER sized markers tracking where does each
199 byte come from in the source:
201 0 - target byte has the value 0
202 FF - target byte has an unknown value (eg. due to sign extension)
203 1..size - marker value is the byte index in the source (0 for lsb).
205 To detect permutations on memory sources (arrays and structures), a symbolic
206 number is also associated:
207 - a base address BASE_ADDR and an OFFSET giving the address of the source;
208 - a range which gives the difference between the highest and lowest accessed
209 memory location to make such a symbolic number;
210 - the address SRC of the source element of lowest address as a convenience
211 to easily get BASE_ADDR + offset + lowest bytepos;
212 - number of expressions N_OPS bitwise ored together to represent
213 approximate cost of the computation.
215 Note 1: the range is different from size as size reflects the size of the
216 type of the current expression. For instance, for an array char a[],
217 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
218 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
221 Note 2: for non-memory sources, range holds the same value as size.
223 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
225 struct symbolic_number
{
230 poly_int64_pod bytepos
;
234 unsigned HOST_WIDE_INT range
;
238 #define BITS_PER_MARKER 8
239 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
240 #define MARKER_BYTE_UNKNOWN MARKER_MASK
241 #define HEAD_MARKER(n, size) \
242 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
244 /* The number which the find_bswap_or_nop_1 result should match in
245 order to have a nop. The number is masked according to the size of
246 the symbolic number before using it. */
247 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
248 (uint64_t)0x08070605 << 32 | 0x04030201)
250 /* The number which the find_bswap_or_nop_1 result should match in
251 order to have a byte swap. The number is masked according to the
252 size of the symbolic number before using it. */
253 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
254 (uint64_t)0x01020304 << 32 | 0x05060708)
256 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
257 number N. Return false if the requested operation is not permitted
258 on a symbolic number. */
261 do_shift_rotate (enum tree_code code
,
262 struct symbolic_number
*n
,
265 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
266 unsigned head_marker
;
269 || count
>= TYPE_PRECISION (n
->type
)
270 || count
% BITS_PER_UNIT
!= 0)
272 count
= (count
/ BITS_PER_UNIT
) * BITS_PER_MARKER
;
274 /* Zero out the extra bits of N in order to avoid them being shifted
275 into the significant bits. */
276 if (size
< 64 / BITS_PER_MARKER
)
277 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
285 head_marker
= HEAD_MARKER (n
->n
, size
);
287 /* Arithmetic shift of signed type: result is dependent on the value. */
288 if (!TYPE_UNSIGNED (n
->type
) && head_marker
)
289 for (i
= 0; i
< count
/ BITS_PER_MARKER
; i
++)
290 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
291 << ((size
- 1 - i
) * BITS_PER_MARKER
);
294 n
->n
= (n
->n
<< count
) | (n
->n
>> ((size
* BITS_PER_MARKER
) - count
));
297 n
->n
= (n
->n
>> count
) | (n
->n
<< ((size
* BITS_PER_MARKER
) - count
));
302 /* Zero unused bits for size. */
303 if (size
< 64 / BITS_PER_MARKER
)
304 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
308 /* Perform sanity checking for the symbolic number N and the gimple
312 verify_symbolic_number_p (struct symbolic_number
*n
, gimple
*stmt
)
316 lhs_type
= gimple_expr_type (stmt
);
318 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
319 && TREE_CODE (lhs_type
) != ENUMERAL_TYPE
)
322 if (TYPE_PRECISION (lhs_type
) != TYPE_PRECISION (n
->type
))
328 /* Initialize the symbolic number N for the bswap pass from the base element
329 SRC manipulated by the bitwise OR expression. */
332 init_symbolic_number (struct symbolic_number
*n
, tree src
)
336 if (! INTEGRAL_TYPE_P (TREE_TYPE (src
)))
339 n
->base_addr
= n
->offset
= n
->alias_set
= n
->vuse
= NULL_TREE
;
342 /* Set up the symbolic number N by setting each byte to a value between 1 and
343 the byte size of rhs1. The highest order byte is set to n->size and the
344 lowest order byte to 1. */
345 n
->type
= TREE_TYPE (src
);
346 size
= TYPE_PRECISION (n
->type
);
347 if (size
% BITS_PER_UNIT
!= 0)
349 size
/= BITS_PER_UNIT
;
350 if (size
> 64 / BITS_PER_MARKER
)
356 if (size
< 64 / BITS_PER_MARKER
)
357 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
362 /* Check if STMT might be a byte swap or a nop from a memory source and returns
363 the answer. If so, REF is that memory source and the base of the memory area
364 accessed and the offset of the access from that base are recorded in N. */
367 find_bswap_or_nop_load (gimple
*stmt
, tree ref
, struct symbolic_number
*n
)
369 /* Leaf node is an array or component ref. Memorize its base and
370 offset from base to compare to other such leaf node. */
371 poly_int64 bitsize
, bitpos
, bytepos
;
373 int unsignedp
, reversep
, volatilep
;
374 tree offset
, base_addr
;
376 /* Not prepared to handle PDP endian. */
377 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
380 if (!gimple_assign_load_p (stmt
) || gimple_has_volatile_ops (stmt
))
383 base_addr
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
384 &unsignedp
, &reversep
, &volatilep
);
386 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
387 /* Do not rewrite TARGET_MEM_REF. */
389 else if (TREE_CODE (base_addr
) == MEM_REF
)
391 poly_offset_int bit_offset
= 0;
392 tree off
= TREE_OPERAND (base_addr
, 1);
394 if (!integer_zerop (off
))
396 poly_offset_int boff
= mem_ref_offset (base_addr
);
397 boff
<<= LOG2_BITS_PER_UNIT
;
401 base_addr
= TREE_OPERAND (base_addr
, 0);
403 /* Avoid returning a negative bitpos as this may wreak havoc later. */
404 if (maybe_lt (bit_offset
, 0))
406 tree byte_offset
= wide_int_to_tree
407 (sizetype
, bits_to_bytes_round_down (bit_offset
));
408 bit_offset
= num_trailing_bits (bit_offset
);
410 offset
= size_binop (PLUS_EXPR
, offset
, byte_offset
);
412 offset
= byte_offset
;
415 bitpos
+= bit_offset
.force_shwi ();
418 base_addr
= build_fold_addr_expr (base_addr
);
420 if (!multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
))
422 if (!multiple_p (bitsize
, BITS_PER_UNIT
))
427 if (!init_symbolic_number (n
, ref
))
429 n
->base_addr
= base_addr
;
431 n
->bytepos
= bytepos
;
432 n
->alias_set
= reference_alias_ptr_type (ref
);
433 n
->vuse
= gimple_vuse (stmt
);
437 /* Compute the symbolic number N representing the result of a bitwise OR on 2
438 symbolic number N1 and N2 whose source statements are respectively
439 SOURCE_STMT1 and SOURCE_STMT2. */
442 perform_symbolic_merge (gimple
*source_stmt1
, struct symbolic_number
*n1
,
443 gimple
*source_stmt2
, struct symbolic_number
*n2
,
444 struct symbolic_number
*n
)
449 struct symbolic_number
*n_start
;
451 tree rhs1
= gimple_assign_rhs1 (source_stmt1
);
452 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
454 rhs1
= TREE_OPERAND (rhs1
, 0);
455 tree rhs2
= gimple_assign_rhs1 (source_stmt2
);
456 if (TREE_CODE (rhs2
) == BIT_FIELD_REF
457 && TREE_CODE (TREE_OPERAND (rhs2
, 0)) == SSA_NAME
)
458 rhs2
= TREE_OPERAND (rhs2
, 0);
460 /* Sources are different, cancel bswap if they are not memory location with
461 the same base (array, structure, ...). */
465 HOST_WIDE_INT start1
, start2
, start_sub
, end_sub
, end1
, end2
, end
;
466 struct symbolic_number
*toinc_n_ptr
, *n_end
;
467 basic_block bb1
, bb2
;
469 if (!n1
->base_addr
|| !n2
->base_addr
470 || !operand_equal_p (n1
->base_addr
, n2
->base_addr
, 0))
473 if (!n1
->offset
!= !n2
->offset
474 || (n1
->offset
&& !operand_equal_p (n1
->offset
, n2
->offset
, 0)))
478 if (!(n2
->bytepos
- n1
->bytepos
).is_constant (&start2
))
484 start_sub
= start2
- start1
;
489 start_sub
= start1
- start2
;
492 bb1
= gimple_bb (source_stmt1
);
493 bb2
= gimple_bb (source_stmt2
);
494 if (dominated_by_p (CDI_DOMINATORS
, bb1
, bb2
))
495 source_stmt
= source_stmt1
;
497 source_stmt
= source_stmt2
;
499 /* Find the highest address at which a load is performed and
500 compute related info. */
501 end1
= start1
+ (n1
->range
- 1);
502 end2
= start2
+ (n2
->range
- 1);
506 end_sub
= end2
- end1
;
511 end_sub
= end1
- end2
;
513 n_end
= (end2
> end1
) ? n2
: n1
;
515 /* Find symbolic number whose lsb is the most significant. */
516 if (BYTES_BIG_ENDIAN
)
517 toinc_n_ptr
= (n_end
== n1
) ? n2
: n1
;
519 toinc_n_ptr
= (n_start
== n1
) ? n2
: n1
;
521 n
->range
= end
- MIN (start1
, start2
) + 1;
523 /* Check that the range of memory covered can be represented by
524 a symbolic number. */
525 if (n
->range
> 64 / BITS_PER_MARKER
)
528 /* Reinterpret byte marks in symbolic number holding the value of
529 bigger weight according to target endianness. */
530 inc
= BYTES_BIG_ENDIAN
? end_sub
: start_sub
;
531 size
= TYPE_PRECISION (n1
->type
) / BITS_PER_UNIT
;
532 for (i
= 0; i
< size
; i
++, inc
<<= BITS_PER_MARKER
)
535 = (toinc_n_ptr
->n
>> (i
* BITS_PER_MARKER
)) & MARKER_MASK
;
536 if (marker
&& marker
!= MARKER_BYTE_UNKNOWN
)
537 toinc_n_ptr
->n
+= inc
;
542 n
->range
= n1
->range
;
544 source_stmt
= source_stmt1
;
548 || alias_ptr_types_compatible_p (n1
->alias_set
, n2
->alias_set
))
549 n
->alias_set
= n1
->alias_set
;
551 n
->alias_set
= ptr_type_node
;
552 n
->vuse
= n_start
->vuse
;
553 n
->base_addr
= n_start
->base_addr
;
554 n
->offset
= n_start
->offset
;
555 n
->src
= n_start
->src
;
556 n
->bytepos
= n_start
->bytepos
;
557 n
->type
= n_start
->type
;
558 size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
560 for (i
= 0, mask
= MARKER_MASK
; i
< size
; i
++, mask
<<= BITS_PER_MARKER
)
562 uint64_t masked1
, masked2
;
564 masked1
= n1
->n
& mask
;
565 masked2
= n2
->n
& mask
;
566 if (masked1
&& masked2
&& masked1
!= masked2
)
569 n
->n
= n1
->n
| n2
->n
;
570 n
->n_ops
= n1
->n_ops
+ n2
->n_ops
;
575 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
576 the operation given by the rhs of STMT on the result. If the operation
577 could successfully be executed the function returns a gimple stmt whose
578 rhs's first tree is the expression of the source operand and NULL
582 find_bswap_or_nop_1 (gimple
*stmt
, struct symbolic_number
*n
, int limit
)
585 tree rhs1
, rhs2
= NULL
;
586 gimple
*rhs1_stmt
, *rhs2_stmt
, *source_stmt1
;
587 enum gimple_rhs_class rhs_class
;
589 if (!limit
|| !is_gimple_assign (stmt
))
592 rhs1
= gimple_assign_rhs1 (stmt
);
594 if (find_bswap_or_nop_load (stmt
, rhs1
, n
))
597 /* Handle BIT_FIELD_REF. */
598 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
599 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
601 if (!tree_fits_uhwi_p (TREE_OPERAND (rhs1
, 1))
602 || !tree_fits_uhwi_p (TREE_OPERAND (rhs1
, 2)))
605 unsigned HOST_WIDE_INT bitsize
= tree_to_uhwi (TREE_OPERAND (rhs1
, 1));
606 unsigned HOST_WIDE_INT bitpos
= tree_to_uhwi (TREE_OPERAND (rhs1
, 2));
607 if (bitpos
% BITS_PER_UNIT
== 0
608 && bitsize
% BITS_PER_UNIT
== 0
609 && init_symbolic_number (n
, TREE_OPERAND (rhs1
, 0)))
611 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
612 if (BYTES_BIG_ENDIAN
)
613 bitpos
= TYPE_PRECISION (n
->type
) - bitpos
- bitsize
;
616 if (!do_shift_rotate (RSHIFT_EXPR
, n
, bitpos
))
621 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
622 for (unsigned i
= 0; i
< bitsize
/ BITS_PER_UNIT
;
623 i
++, tmp
<<= BITS_PER_UNIT
)
624 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
628 n
->type
= TREE_TYPE (rhs1
);
630 n
->range
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
632 return verify_symbolic_number_p (n
, stmt
) ? stmt
: NULL
;
638 if (TREE_CODE (rhs1
) != SSA_NAME
)
641 code
= gimple_assign_rhs_code (stmt
);
642 rhs_class
= gimple_assign_rhs_class (stmt
);
643 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
645 if (rhs_class
== GIMPLE_BINARY_RHS
)
646 rhs2
= gimple_assign_rhs2 (stmt
);
648 /* Handle unary rhs and binary rhs with integer constants as second
651 if (rhs_class
== GIMPLE_UNARY_RHS
652 || (rhs_class
== GIMPLE_BINARY_RHS
653 && TREE_CODE (rhs2
) == INTEGER_CST
))
655 if (code
!= BIT_AND_EXPR
656 && code
!= LSHIFT_EXPR
657 && code
!= RSHIFT_EXPR
658 && code
!= LROTATE_EXPR
659 && code
!= RROTATE_EXPR
660 && !CONVERT_EXPR_CODE_P (code
))
663 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, n
, limit
- 1);
665 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
666 we have to initialize the symbolic number. */
669 if (gimple_assign_load_p (stmt
)
670 || !init_symbolic_number (n
, rhs1
))
679 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
680 uint64_t val
= int_cst_value (rhs2
), mask
= 0;
681 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
683 /* Only constants masking full bytes are allowed. */
684 for (i
= 0; i
< size
; i
++, tmp
<<= BITS_PER_UNIT
)
685 if ((val
& tmp
) != 0 && (val
& tmp
) != tmp
)
688 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
697 if (!do_shift_rotate (code
, n
, (int) TREE_INT_CST_LOW (rhs2
)))
702 int i
, type_size
, old_type_size
;
705 type
= gimple_expr_type (stmt
);
706 type_size
= TYPE_PRECISION (type
);
707 if (type_size
% BITS_PER_UNIT
!= 0)
709 type_size
/= BITS_PER_UNIT
;
710 if (type_size
> 64 / BITS_PER_MARKER
)
713 /* Sign extension: result is dependent on the value. */
714 old_type_size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
715 if (!TYPE_UNSIGNED (n
->type
) && type_size
> old_type_size
716 && HEAD_MARKER (n
->n
, old_type_size
))
717 for (i
= 0; i
< type_size
- old_type_size
; i
++)
718 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
719 << ((type_size
- 1 - i
) * BITS_PER_MARKER
);
721 if (type_size
< 64 / BITS_PER_MARKER
)
723 /* If STMT casts to a smaller type mask out the bits not
724 belonging to the target type. */
725 n
->n
&= ((uint64_t) 1 << (type_size
* BITS_PER_MARKER
)) - 1;
729 n
->range
= type_size
;
735 return verify_symbolic_number_p (n
, stmt
) ? source_stmt1
: NULL
;
738 /* Handle binary rhs. */
740 if (rhs_class
== GIMPLE_BINARY_RHS
)
742 struct symbolic_number n1
, n2
;
743 gimple
*source_stmt
, *source_stmt2
;
745 if (code
!= BIT_IOR_EXPR
)
748 if (TREE_CODE (rhs2
) != SSA_NAME
)
751 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
756 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, &n1
, limit
- 1);
761 source_stmt2
= find_bswap_or_nop_1 (rhs2_stmt
, &n2
, limit
- 1);
766 if (TYPE_PRECISION (n1
.type
) != TYPE_PRECISION (n2
.type
))
769 if (n1
.vuse
!= n2
.vuse
)
773 = perform_symbolic_merge (source_stmt1
, &n1
, source_stmt2
, &n2
, n
);
778 if (!verify_symbolic_number_p (n
, stmt
))
790 /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
791 *CMPXCHG, *CMPNOP and adjust *N. */
794 find_bswap_or_nop_finalize (struct symbolic_number
*n
, uint64_t *cmpxchg
,
800 /* The number which the find_bswap_or_nop_1 result should match in order
801 to have a full byte swap. The number is shifted to the right
802 according to the size of the symbolic number before using it. */
806 /* Find real size of result (highest non-zero byte). */
808 for (tmpn
= n
->n
, rsize
= 0; tmpn
; tmpn
>>= BITS_PER_MARKER
, rsize
++);
812 /* Zero out the bits corresponding to untouched bytes in original gimple
814 if (n
->range
< (int) sizeof (int64_t))
816 mask
= ((uint64_t) 1 << (n
->range
* BITS_PER_MARKER
)) - 1;
817 *cmpxchg
>>= (64 / BITS_PER_MARKER
- n
->range
) * BITS_PER_MARKER
;
821 /* Zero out the bits corresponding to unused bytes in the result of the
822 gimple expression. */
823 if (rsize
< n
->range
)
825 if (BYTES_BIG_ENDIAN
)
827 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
829 *cmpnop
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
833 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
834 *cmpxchg
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
840 n
->range
*= BITS_PER_UNIT
;
843 /* Check if STMT completes a bswap implementation or a read in a given
844 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
845 accordingly. It also sets N to represent the kind of operations
846 performed: size of the resulting expression and whether it works on
847 a memory source, and if so alias-set and vuse. At last, the
848 function returns a stmt whose rhs's first tree is the source
852 find_bswap_or_nop (gimple
*stmt
, struct symbolic_number
*n
, bool *bswap
)
854 tree type_size
= TYPE_SIZE_UNIT (gimple_expr_type (stmt
));
855 if (!tree_fits_uhwi_p (type_size
))
858 /* The last parameter determines the depth search limit. It usually
859 correlates directly to the number n of bytes to be touched. We
860 increase that number by 2 * (log2(n) + 1) here in order to also
861 cover signed -> unsigned conversions of the src operand as can be seen
862 in libgcc, and for initial shift/and operation of the src operand. */
863 int limit
= tree_to_uhwi (type_size
);
864 limit
+= 2 * (1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
));
865 gimple
*ins_stmt
= find_bswap_or_nop_1 (stmt
, n
, limit
);
869 if (gimple_assign_rhs_code (stmt
) != CONSTRUCTOR
870 || BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
872 unsigned HOST_WIDE_INT sz
= tree_to_uhwi (type_size
) * BITS_PER_UNIT
;
873 if (sz
!= 16 && sz
!= 32 && sz
!= 64)
875 tree rhs
= gimple_assign_rhs1 (stmt
);
876 if (CONSTRUCTOR_NELTS (rhs
) == 0)
878 tree eltype
= TREE_TYPE (TREE_TYPE (rhs
));
879 unsigned HOST_WIDE_INT eltsz
880 = int_size_in_bytes (eltype
) * BITS_PER_UNIT
;
881 if (TYPE_PRECISION (eltype
) != eltsz
)
883 constructor_elt
*elt
;
885 tree type
= build_nonstandard_integer_type (sz
, 1);
886 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (rhs
), i
, elt
)
888 if (TREE_CODE (elt
->value
) != SSA_NAME
889 || !INTEGRAL_TYPE_P (TREE_TYPE (elt
->value
)))
891 struct symbolic_number n1
;
893 = find_bswap_or_nop_1 (SSA_NAME_DEF_STMT (elt
->value
), &n1
,
901 n1
.range
= sz
/ BITS_PER_UNIT
;
905 ins_stmt
= source_stmt
;
910 if (n
->vuse
!= n1
.vuse
)
913 struct symbolic_number n0
= *n
;
915 if (!BYTES_BIG_ENDIAN
)
917 if (!do_shift_rotate (LSHIFT_EXPR
, &n1
, i
* eltsz
))
920 else if (!do_shift_rotate (LSHIFT_EXPR
, &n0
, eltsz
))
923 = perform_symbolic_merge (ins_stmt
, &n0
, source_stmt
, &n1
, n
);
931 uint64_t cmpxchg
, cmpnop
;
932 find_bswap_or_nop_finalize (n
, &cmpxchg
, &cmpnop
);
934 /* A complete byte swap should make the symbolic number to start with
935 the largest digit in the highest order byte. Unchanged symbolic
936 number indicates a read with same endianness as target architecture. */
939 else if (n
->n
== cmpxchg
)
944 /* Useless bit manipulation performed by code. */
945 if (!n
->base_addr
&& n
->n
== cmpnop
&& n
->n_ops
== 1)
951 const pass_data pass_data_optimize_bswap
=
953 GIMPLE_PASS
, /* type */
955 OPTGROUP_NONE
, /* optinfo_flags */
957 PROP_ssa
, /* properties_required */
958 0, /* properties_provided */
959 0, /* properties_destroyed */
960 0, /* todo_flags_start */
961 0, /* todo_flags_finish */
964 class pass_optimize_bswap
: public gimple_opt_pass
967 pass_optimize_bswap (gcc::context
*ctxt
)
968 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
971 /* opt_pass methods: */
972 virtual bool gate (function
*)
974 return flag_expensive_optimizations
&& optimize
&& BITS_PER_UNIT
== 8;
977 virtual unsigned int execute (function
*);
979 }; // class pass_optimize_bswap
981 /* Helper function for bswap_replace. Build VIEW_CONVERT_EXPR from
982 VAL to TYPE. If VAL has different type size, emit a NOP_EXPR cast
986 bswap_view_convert (gimple_stmt_iterator
*gsi
, tree type
, tree val
)
988 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (val
)));
989 if (TYPE_SIZE (type
) != TYPE_SIZE (TREE_TYPE (val
)))
991 HOST_WIDE_INT prec
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
992 tree itype
= build_nonstandard_integer_type (prec
, 1);
993 gimple
*g
= gimple_build_assign (make_ssa_name (itype
), NOP_EXPR
, val
);
994 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
995 val
= gimple_assign_lhs (g
);
997 return build1 (VIEW_CONVERT_EXPR
, type
, val
);
1000 /* Perform the bswap optimization: replace the expression computed in the rhs
1001 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
1002 bswap, load or load + bswap expression.
1003 Which of these alternatives replace the rhs is given by N->base_addr (non
1004 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
1005 load to perform are also given in N while the builtin bswap invoke is given
1006 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
1007 load statements involved to construct the rhs in gsi_stmt (GSI) and
1008 N->range gives the size of the rhs expression for maintaining some
1011 Note that if the replacement involve a load and if gsi_stmt (GSI) is
1012 non-NULL, that stmt is moved just after INS_STMT to do the load with the
1013 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
1016 bswap_replace (gimple_stmt_iterator gsi
, gimple
*ins_stmt
, tree fndecl
,
1017 tree bswap_type
, tree load_type
, struct symbolic_number
*n
,
1020 tree src
, tmp
, tgt
= NULL_TREE
;
1022 tree_code conv_code
= NOP_EXPR
;
1024 gimple
*cur_stmt
= gsi_stmt (gsi
);
1028 tgt
= gimple_assign_lhs (cur_stmt
);
1029 if (gimple_assign_rhs_code (cur_stmt
) == CONSTRUCTOR
1031 && VECTOR_TYPE_P (TREE_TYPE (tgt
)))
1032 conv_code
= VIEW_CONVERT_EXPR
;
1035 /* Need to load the value from memory first. */
1038 gimple_stmt_iterator gsi_ins
= gsi
;
1040 gsi_ins
= gsi_for_stmt (ins_stmt
);
1041 tree addr_expr
, addr_tmp
, val_expr
, val_tmp
;
1042 tree load_offset_ptr
, aligned_load_type
;
1044 unsigned align
= get_object_alignment (src
);
1045 poly_int64 load_offset
= 0;
1049 basic_block ins_bb
= gimple_bb (ins_stmt
);
1050 basic_block cur_bb
= gimple_bb (cur_stmt
);
1051 if (!dominated_by_p (CDI_DOMINATORS
, cur_bb
, ins_bb
))
1054 /* Move cur_stmt just before one of the load of the original
1055 to ensure it has the same VUSE. See PR61517 for what could
1057 if (gimple_bb (cur_stmt
) != gimple_bb (ins_stmt
))
1058 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt
));
1059 gsi_move_before (&gsi
, &gsi_ins
);
1060 gsi
= gsi_for_stmt (cur_stmt
);
1065 /* Compute address to load from and cast according to the size
1067 addr_expr
= build_fold_addr_expr (src
);
1068 if (is_gimple_mem_ref_addr (addr_expr
))
1069 addr_tmp
= unshare_expr (addr_expr
);
1072 addr_tmp
= unshare_expr (n
->base_addr
);
1073 if (!is_gimple_mem_ref_addr (addr_tmp
))
1074 addr_tmp
= force_gimple_operand_gsi_1 (&gsi
, addr_tmp
,
1075 is_gimple_mem_ref_addr
,
1078 load_offset
= n
->bytepos
;
1082 = force_gimple_operand_gsi (&gsi
, unshare_expr (n
->offset
),
1083 true, NULL_TREE
, true,
1086 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp
)),
1087 POINTER_PLUS_EXPR
, addr_tmp
, off
);
1088 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1089 addr_tmp
= gimple_assign_lhs (stmt
);
1093 /* Perform the load. */
1094 aligned_load_type
= load_type
;
1095 if (align
< TYPE_ALIGN (load_type
))
1096 aligned_load_type
= build_aligned_type (load_type
, align
);
1097 load_offset_ptr
= build_int_cst (n
->alias_set
, load_offset
);
1098 val_expr
= fold_build2 (MEM_REF
, aligned_load_type
, addr_tmp
,
1104 nop_stats
.found_16bit
++;
1105 else if (n
->range
== 32)
1106 nop_stats
.found_32bit
++;
1109 gcc_assert (n
->range
== 64);
1110 nop_stats
.found_64bit
++;
1113 /* Convert the result of load if necessary. */
1114 if (tgt
&& !useless_type_conversion_p (TREE_TYPE (tgt
), load_type
))
1116 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
,
1118 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
1119 gimple_set_vuse (load_stmt
, n
->vuse
);
1120 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1121 if (conv_code
== VIEW_CONVERT_EXPR
)
1122 val_tmp
= bswap_view_convert (&gsi
, TREE_TYPE (tgt
), val_tmp
);
1123 gimple_assign_set_rhs_with_ops (&gsi
, conv_code
, val_tmp
);
1124 update_stmt (cur_stmt
);
1128 gimple_assign_set_rhs_with_ops (&gsi
, MEM_REF
, val_expr
);
1129 gimple_set_vuse (cur_stmt
, n
->vuse
);
1130 update_stmt (cur_stmt
);
1134 tgt
= make_ssa_name (load_type
);
1135 cur_stmt
= gimple_build_assign (tgt
, MEM_REF
, val_expr
);
1136 gimple_set_vuse (cur_stmt
, n
->vuse
);
1137 gsi_insert_before (&gsi
, cur_stmt
, GSI_SAME_STMT
);
1143 "%d bit load in target endianness found at: ",
1145 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1151 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
, "load_dst");
1152 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
1153 gimple_set_vuse (load_stmt
, n
->vuse
);
1154 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1161 if (tgt
&& !useless_type_conversion_p (TREE_TYPE (tgt
), TREE_TYPE (src
)))
1163 if (!is_gimple_val (src
))
1165 if (conv_code
== VIEW_CONVERT_EXPR
)
1166 src
= bswap_view_convert (&gsi
, TREE_TYPE (tgt
), src
);
1167 g
= gimple_build_assign (tgt
, conv_code
, src
);
1170 g
= gimple_build_assign (tgt
, src
);
1174 nop_stats
.found_16bit
++;
1175 else if (n
->range
== 32)
1176 nop_stats
.found_32bit
++;
1179 gcc_assert (n
->range
== 64);
1180 nop_stats
.found_64bit
++;
1185 "%d bit reshuffle in target endianness found at: ",
1188 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1191 print_generic_expr (dump_file
, tgt
, TDF_NONE
);
1192 fprintf (dump_file
, "\n");
1196 gsi_replace (&gsi
, g
, true);
1199 else if (TREE_CODE (src
) == BIT_FIELD_REF
)
1200 src
= TREE_OPERAND (src
, 0);
1203 bswap_stats
.found_16bit
++;
1204 else if (n
->range
== 32)
1205 bswap_stats
.found_32bit
++;
1208 gcc_assert (n
->range
== 64);
1209 bswap_stats
.found_64bit
++;
1214 /* Convert the src expression if necessary. */
1215 if (!useless_type_conversion_p (TREE_TYPE (tmp
), bswap_type
))
1217 gimple
*convert_stmt
;
1219 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
1220 convert_stmt
= gimple_build_assign (tmp
, NOP_EXPR
, src
);
1221 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1224 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1225 are considered as rotation of 2N bit values by N bits is generally not
1226 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1227 gives 0x03040102 while a bswap for that value is 0x04030201. */
1228 if (bswap
&& n
->range
== 16)
1230 tree count
= build_int_cst (NULL
, BITS_PER_UNIT
);
1231 src
= fold_build2 (LROTATE_EXPR
, bswap_type
, tmp
, count
);
1232 bswap_stmt
= gimple_build_assign (NULL
, src
);
1235 bswap_stmt
= gimple_build_call (fndecl
, 1, tmp
);
1237 if (tgt
== NULL_TREE
)
1238 tgt
= make_ssa_name (bswap_type
);
1241 /* Convert the result if necessary. */
1242 if (!useless_type_conversion_p (TREE_TYPE (tgt
), bswap_type
))
1244 gimple
*convert_stmt
;
1246 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
1248 if (conv_code
== VIEW_CONVERT_EXPR
)
1249 atmp
= bswap_view_convert (&gsi
, TREE_TYPE (tgt
), tmp
);
1250 convert_stmt
= gimple_build_assign (tgt
, conv_code
, atmp
);
1251 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1254 gimple_set_lhs (bswap_stmt
, tmp
);
1258 fprintf (dump_file
, "%d bit bswap implementation found at: ",
1261 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1264 print_generic_expr (dump_file
, tgt
, TDF_NONE
);
1265 fprintf (dump_file
, "\n");
1271 gsi_insert_after (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1272 gsi_remove (&gsi
, true);
1275 gsi_insert_before (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1279 /* Try to optimize an assignment CUR_STMT with CONSTRUCTOR on the rhs
1280 using bswap optimizations. CDI_DOMINATORS need to be
1281 computed on entry. Return true if it has been optimized and
1282 TODO_update_ssa is needed. */
1285 maybe_optimize_vector_constructor (gimple
*cur_stmt
)
1287 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
1288 struct symbolic_number n
;
1291 gcc_assert (is_gimple_assign (cur_stmt
)
1292 && gimple_assign_rhs_code (cur_stmt
) == CONSTRUCTOR
);
1294 tree rhs
= gimple_assign_rhs1 (cur_stmt
);
1295 if (!VECTOR_TYPE_P (TREE_TYPE (rhs
))
1296 || !INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs
)))
1297 || gimple_assign_lhs (cur_stmt
) == NULL_TREE
)
1300 HOST_WIDE_INT sz
= int_size_in_bytes (TREE_TYPE (rhs
)) * BITS_PER_UNIT
;
1304 load_type
= bswap_type
= uint16_type_node
;
1307 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
1308 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
)
1310 load_type
= uint32_type_node
;
1311 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1312 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1318 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
1319 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
1320 || (word_mode
== SImode
1321 && builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
1322 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
)))
1324 load_type
= uint64_type_node
;
1325 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1326 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1335 gimple
*ins_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
1336 if (!ins_stmt
|| n
.range
!= (unsigned HOST_WIDE_INT
) sz
)
1339 if (bswap
&& !fndecl
&& n
.range
!= 16)
1342 memset (&nop_stats
, 0, sizeof (nop_stats
));
1343 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
1344 return bswap_replace (gsi_for_stmt (cur_stmt
), ins_stmt
, fndecl
,
1345 bswap_type
, load_type
, &n
, bswap
) != NULL_TREE
;
1348 /* Find manual byte swap implementations as well as load in a given
1349 endianness. Byte swaps are turned into a bswap builtin invokation
1350 while endian loads are converted to bswap builtin invokation or
1351 simple load according to the target endianness. */
1354 pass_optimize_bswap::execute (function
*fun
)
1357 bool bswap32_p
, bswap64_p
;
1358 bool changed
= false;
1359 tree bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
1361 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
1362 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
1363 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
1364 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
1365 || (bswap32_p
&& word_mode
== SImode
)));
1367 /* Determine the argument type of the builtins. The code later on
1368 assumes that the return and argument type are the same. */
1371 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1372 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1377 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1378 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1381 memset (&nop_stats
, 0, sizeof (nop_stats
));
1382 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
1383 calculate_dominance_info (CDI_DOMINATORS
);
1385 FOR_EACH_BB_FN (bb
, fun
)
1387 gimple_stmt_iterator gsi
;
1389 /* We do a reverse scan for bswap patterns to make sure we get the
1390 widest match. As bswap pattern matching doesn't handle previously
1391 inserted smaller bswap replacements as sub-patterns, the wider
1392 variant wouldn't be detected. */
1393 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
);)
1395 gimple
*ins_stmt
, *cur_stmt
= gsi_stmt (gsi
);
1396 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
1397 enum tree_code code
;
1398 struct symbolic_number n
;
1401 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1402 might be moved to a different basic block by bswap_replace and gsi
1403 must not points to it if that's the case. Moving the gsi_prev
1404 there make sure that gsi points to the statement previous to
1405 cur_stmt while still making sure that all statements are
1406 considered in this basic block. */
1409 if (!is_gimple_assign (cur_stmt
))
1412 code
= gimple_assign_rhs_code (cur_stmt
);
1417 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt
))
1418 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt
))
1426 tree rhs
= gimple_assign_rhs1 (cur_stmt
);
1427 if (VECTOR_TYPE_P (TREE_TYPE (rhs
))
1428 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs
))))
1436 ins_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
1444 /* Already in canonical form, nothing to do. */
1445 if (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
1447 load_type
= bswap_type
= uint16_type_node
;
1450 load_type
= uint32_type_node
;
1453 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1454 bswap_type
= bswap32_type
;
1458 load_type
= uint64_type_node
;
1461 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1462 bswap_type
= bswap64_type
;
1469 if (bswap
&& !fndecl
&& n
.range
!= 16)
1472 if (bswap_replace (gsi_for_stmt (cur_stmt
), ins_stmt
, fndecl
,
1473 bswap_type
, load_type
, &n
, bswap
))
1478 statistics_counter_event (fun
, "16-bit nop implementations found",
1479 nop_stats
.found_16bit
);
1480 statistics_counter_event (fun
, "32-bit nop implementations found",
1481 nop_stats
.found_32bit
);
1482 statistics_counter_event (fun
, "64-bit nop implementations found",
1483 nop_stats
.found_64bit
);
1484 statistics_counter_event (fun
, "16-bit bswap implementations found",
1485 bswap_stats
.found_16bit
);
1486 statistics_counter_event (fun
, "32-bit bswap implementations found",
1487 bswap_stats
.found_32bit
);
1488 statistics_counter_event (fun
, "64-bit bswap implementations found",
1489 bswap_stats
.found_64bit
);
1491 return (changed
? TODO_update_ssa
: 0);
1497 make_pass_optimize_bswap (gcc::context
*ctxt
)
1499 return new pass_optimize_bswap (ctxt
);
1504 /* Struct recording one operand for the store, which is either a constant,
1505 then VAL represents the constant and all the other fields are zero, or
1506 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1507 and the other fields also reflect the memory load, or an SSA name, then
1508 VAL represents the SSA name and all the other fields are zero, */
1510 class store_operand_info
1515 poly_uint64 bitsize
;
1517 poly_uint64 bitregion_start
;
1518 poly_uint64 bitregion_end
;
1521 store_operand_info ();
1524 store_operand_info::store_operand_info ()
1525 : val (NULL_TREE
), base_addr (NULL_TREE
), bitsize (0), bitpos (0),
1526 bitregion_start (0), bitregion_end (0), stmt (NULL
), bit_not_p (false)
1530 /* Struct recording the information about a single store of an immediate
1531 to memory. These are created in the first phase and coalesced into
1532 merged_store_group objects in the second phase. */
1534 class store_immediate_info
1537 unsigned HOST_WIDE_INT bitsize
;
1538 unsigned HOST_WIDE_INT bitpos
;
1539 unsigned HOST_WIDE_INT bitregion_start
;
1540 /* This is one past the last bit of the bit region. */
1541 unsigned HOST_WIDE_INT bitregion_end
;
1544 /* INTEGER_CST for constant store, STRING_CST for string store,
1545 MEM_REF for memory copy, BIT_*_EXPR for logical bitwise operation,
1546 BIT_INSERT_EXPR for bit insertion.
1547 LROTATE_EXPR if it can be only bswap optimized and
1548 ops are not really meaningful.
1549 NOP_EXPR if bswap optimization detected identity, ops
1550 are not meaningful. */
1551 enum tree_code rhs_code
;
1552 /* Two fields for bswap optimization purposes. */
1553 struct symbolic_number n
;
1555 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1557 /* True if ops have been swapped and thus ops[1] represents
1558 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1560 /* The index number of the landing pad, or 0 if there is none. */
1562 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1563 just the first one. */
1564 store_operand_info ops
[2];
1565 store_immediate_info (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1566 unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1567 gimple
*, unsigned int, enum tree_code
,
1568 struct symbolic_number
&, gimple
*, bool, int,
1569 const store_operand_info
&,
1570 const store_operand_info
&);
1573 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs
,
1574 unsigned HOST_WIDE_INT bp
,
1575 unsigned HOST_WIDE_INT brs
,
1576 unsigned HOST_WIDE_INT bre
,
1579 enum tree_code rhscode
,
1580 struct symbolic_number
&nr
,
1584 const store_operand_info
&op0r
,
1585 const store_operand_info
&op1r
)
1586 : bitsize (bs
), bitpos (bp
), bitregion_start (brs
), bitregion_end (bre
),
1587 stmt (st
), order (ord
), rhs_code (rhscode
), n (nr
),
1588 ins_stmt (ins_stmtp
), bit_not_p (bitnotp
), ops_swapped_p (false),
1590 #if __cplusplus >= 201103L
1591 , ops
{ op0r
, op1r
}
1601 /* Struct representing a group of stores to contiguous memory locations.
1602 These are produced by the second phase (coalescing) and consumed in the
1603 third phase that outputs the widened stores. */
1605 class merged_store_group
1608 unsigned HOST_WIDE_INT start
;
1609 unsigned HOST_WIDE_INT width
;
1610 unsigned HOST_WIDE_INT bitregion_start
;
1611 unsigned HOST_WIDE_INT bitregion_end
;
1612 /* The size of the allocated memory for val and mask. */
1613 unsigned HOST_WIDE_INT buf_size
;
1614 unsigned HOST_WIDE_INT align_base
;
1615 poly_uint64 load_align_base
[2];
1618 unsigned int load_align
[2];
1619 unsigned int first_order
;
1620 unsigned int last_order
;
1622 bool string_concatenation
;
1623 bool only_constants
;
1625 unsigned int first_nonmergeable_order
;
1628 auto_vec
<store_immediate_info
*> stores
;
1629 /* We record the first and last original statements in the sequence because
1630 we'll need their vuse/vdef and replacement position. It's easier to keep
1631 track of them separately as 'stores' is reordered by apply_stores. */
1635 unsigned char *mask
;
1637 merged_store_group (store_immediate_info
*);
1638 ~merged_store_group ();
1639 bool can_be_merged_into (store_immediate_info
*);
1640 void merge_into (store_immediate_info
*);
1641 void merge_overlapping (store_immediate_info
*);
1642 bool apply_stores ();
1644 void do_merge (store_immediate_info
*);
1647 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1650 dump_char_array (FILE *fd
, unsigned char *ptr
, unsigned int len
)
1655 for (unsigned int i
= 0; i
< len
; i
++)
1656 fprintf (fd
, "%02x ", ptr
[i
]);
1660 /* Clear out LEN bits starting from bit START in the byte array
1661 PTR. This clears the bits to the *right* from START.
1662 START must be within [0, BITS_PER_UNIT) and counts starting from
1663 the least significant bit. */
1666 clear_bit_region_be (unsigned char *ptr
, unsigned int start
,
1671 /* Clear len bits to the right of start. */
1672 else if (len
<= start
+ 1)
1674 unsigned char mask
= (~(~0U << len
));
1675 mask
= mask
<< (start
+ 1U - len
);
1678 else if (start
!= BITS_PER_UNIT
- 1)
1680 clear_bit_region_be (ptr
, start
, (start
% BITS_PER_UNIT
) + 1);
1681 clear_bit_region_be (ptr
+ 1, BITS_PER_UNIT
- 1,
1682 len
- (start
% BITS_PER_UNIT
) - 1);
1684 else if (start
== BITS_PER_UNIT
- 1
1685 && len
> BITS_PER_UNIT
)
1687 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1688 memset (ptr
, 0, nbytes
);
1689 if (len
% BITS_PER_UNIT
!= 0)
1690 clear_bit_region_be (ptr
+ nbytes
, BITS_PER_UNIT
- 1,
1691 len
% BITS_PER_UNIT
);
1697 /* In the byte array PTR clear the bit region starting at bit
1698 START and is LEN bits wide.
1699 For regions spanning multiple bytes do this recursively until we reach
1700 zero LEN or a region contained within a single byte. */
1703 clear_bit_region (unsigned char *ptr
, unsigned int start
,
1706 /* Degenerate base case. */
1709 else if (start
>= BITS_PER_UNIT
)
1710 clear_bit_region (ptr
+ 1, start
- BITS_PER_UNIT
, len
);
1711 /* Second base case. */
1712 else if ((start
+ len
) <= BITS_PER_UNIT
)
1714 unsigned char mask
= (~0U) << (unsigned char) (BITS_PER_UNIT
- len
);
1715 mask
>>= BITS_PER_UNIT
- (start
+ len
);
1721 /* Clear most significant bits in a byte and proceed with the next byte. */
1722 else if (start
!= 0)
1724 clear_bit_region (ptr
, start
, BITS_PER_UNIT
- start
);
1725 clear_bit_region (ptr
+ 1, 0, len
- (BITS_PER_UNIT
- start
));
1727 /* Whole bytes need to be cleared. */
1728 else if (start
== 0 && len
> BITS_PER_UNIT
)
1730 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1731 /* We could recurse on each byte but we clear whole bytes, so a simple
1733 memset (ptr
, '\0', nbytes
);
1734 /* Clear the remaining sub-byte region if there is one. */
1735 if (len
% BITS_PER_UNIT
!= 0)
1736 clear_bit_region (ptr
+ nbytes
, 0, len
% BITS_PER_UNIT
);
1742 /* Write BITLEN bits of EXPR to the byte array PTR at
1743 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1744 Return true if the operation succeeded. */
1747 encode_tree_to_bitpos (tree expr
, unsigned char *ptr
, int bitlen
, int bitpos
,
1748 unsigned int total_bytes
)
1750 unsigned int first_byte
= bitpos
/ BITS_PER_UNIT
;
1751 bool sub_byte_op_p
= ((bitlen
% BITS_PER_UNIT
)
1752 || (bitpos
% BITS_PER_UNIT
)
1753 || !int_mode_for_size (bitlen
, 0).exists ());
1755 = (TREE_CODE (expr
) == CONSTRUCTOR
1756 && CONSTRUCTOR_NELTS (expr
) == 0
1757 && TYPE_SIZE_UNIT (TREE_TYPE (expr
))
1758 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr
))));
1762 if (first_byte
>= total_bytes
)
1764 total_bytes
-= first_byte
;
1767 unsigned HOST_WIDE_INT rhs_bytes
1768 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
1769 if (rhs_bytes
> total_bytes
)
1771 memset (ptr
+ first_byte
, '\0', rhs_bytes
);
1774 return native_encode_expr (expr
, ptr
+ first_byte
, total_bytes
) != 0;
1778 We are writing a non byte-sized quantity or at a position that is not
1780 |--------|--------|--------| ptr + first_byte
1782 xxx xxxxxxxx xxx< bp>
1785 First native_encode_expr EXPR into a temporary buffer and shift each
1786 byte in the buffer by 'bp' (carrying the bits over as necessary).
1787 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1788 <------bitlen---->< bp>
1789 Then we clear the destination bits:
1790 |---00000|00000000|000-----| ptr + first_byte
1791 <-------bitlen--->< bp>
1793 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1794 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1797 We are writing a non byte-sized quantity or at a position that is not
1799 ptr + first_byte |--------|--------|--------|
1801 <bp >xxx xxxxxxxx xxx
1804 First native_encode_expr EXPR into a temporary buffer and shift each
1805 byte in the buffer to the right by (carrying the bits over as necessary).
1806 We shift by as much as needed to align the most significant bit of EXPR
1808 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1809 <---bitlen----> <bp ><-----bitlen----->
1810 Then we clear the destination bits:
1811 ptr + first_byte |-----000||00000000||00000---|
1812 <bp ><-------bitlen----->
1814 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1815 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1816 The awkwardness comes from the fact that bitpos is counted from the
1817 most significant bit of a byte. */
1819 /* We must be dealing with fixed-size data at this point, since the
1820 total size is also fixed. */
1821 unsigned int byte_size
;
1824 unsigned HOST_WIDE_INT rhs_bytes
1825 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
1826 if (rhs_bytes
> total_bytes
)
1828 byte_size
= rhs_bytes
;
1832 fixed_size_mode mode
1833 = as_a
<fixed_size_mode
> (TYPE_MODE (TREE_TYPE (expr
)));
1836 ? tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr
)))
1837 : GET_MODE_SIZE (mode
);
1839 /* Allocate an extra byte so that we have space to shift into. */
1841 unsigned char *tmpbuf
= XALLOCAVEC (unsigned char, byte_size
);
1842 memset (tmpbuf
, '\0', byte_size
);
1843 /* The store detection code should only have allowed constants that are
1844 accepted by native_encode_expr or empty ctors. */
1846 && native_encode_expr (expr
, tmpbuf
, byte_size
- 1) == 0)
1849 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1850 bytes to write. This means it can write more than
1851 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1852 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1853 bitlen and zero out the bits that are not relevant as well (that may
1854 contain a sign bit due to sign-extension). */
1855 unsigned int padding
1856 = byte_size
- ROUND_UP (bitlen
, BITS_PER_UNIT
) / BITS_PER_UNIT
- 1;
1857 /* On big-endian the padding is at the 'front' so just skip the initial
1859 if (BYTES_BIG_ENDIAN
)
1862 byte_size
-= padding
;
1864 if (bitlen
% BITS_PER_UNIT
!= 0)
1866 if (BYTES_BIG_ENDIAN
)
1867 clear_bit_region_be (tmpbuf
, BITS_PER_UNIT
- 1,
1868 BITS_PER_UNIT
- (bitlen
% BITS_PER_UNIT
));
1870 clear_bit_region (tmpbuf
, bitlen
,
1871 byte_size
* BITS_PER_UNIT
- bitlen
);
1873 /* Left shifting relies on the last byte being clear if bitlen is
1874 a multiple of BITS_PER_UNIT, which might not be clear if
1875 there are padding bytes. */
1876 else if (!BYTES_BIG_ENDIAN
)
1877 tmpbuf
[byte_size
- 1] = '\0';
1879 /* Clear the bit region in PTR where the bits from TMPBUF will be
1881 if (BYTES_BIG_ENDIAN
)
1882 clear_bit_region_be (ptr
+ first_byte
,
1883 BITS_PER_UNIT
- 1 - (bitpos
% BITS_PER_UNIT
), bitlen
);
1885 clear_bit_region (ptr
+ first_byte
, bitpos
% BITS_PER_UNIT
, bitlen
);
1888 int bitlen_mod
= bitlen
% BITS_PER_UNIT
;
1889 int bitpos_mod
= bitpos
% BITS_PER_UNIT
;
1891 bool skip_byte
= false;
1892 if (BYTES_BIG_ENDIAN
)
1894 /* BITPOS and BITLEN are exactly aligned and no shifting
1896 if (bitpos_mod
+ bitlen_mod
== BITS_PER_UNIT
1897 || (bitpos_mod
== 0 && bitlen_mod
== 0))
1899 /* |. . . . . . . .|
1901 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1902 of the value until it aligns with 'bp' in the next byte over. */
1903 else if (bitpos_mod
+ bitlen_mod
< BITS_PER_UNIT
)
1905 shift_amnt
= bitlen_mod
+ bitpos_mod
;
1906 skip_byte
= bitlen_mod
!= 0;
1908 /* |. . . . . . . .|
1911 Shift the value right within the same byte so it aligns with 'bp'. */
1913 shift_amnt
= bitlen_mod
+ bitpos_mod
- BITS_PER_UNIT
;
1916 shift_amnt
= bitpos
% BITS_PER_UNIT
;
1918 /* Create the shifted version of EXPR. */
1919 if (!BYTES_BIG_ENDIAN
)
1921 shift_bytes_in_array_left (tmpbuf
, byte_size
, shift_amnt
);
1922 if (shift_amnt
== 0)
1927 gcc_assert (BYTES_BIG_ENDIAN
);
1928 shift_bytes_in_array_right (tmpbuf
, byte_size
, shift_amnt
);
1929 /* If shifting right forced us to move into the next byte skip the now
1938 /* Insert the bits from TMPBUF. */
1939 for (unsigned int i
= 0; i
< byte_size
; i
++)
1940 ptr
[first_byte
+ i
] |= tmpbuf
[i
];
1945 /* Sorting function for store_immediate_info objects.
1946 Sorts them by bitposition. */
1949 sort_by_bitpos (const void *x
, const void *y
)
1951 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1952 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1954 if ((*tmp
)->bitpos
< (*tmp2
)->bitpos
)
1956 else if ((*tmp
)->bitpos
> (*tmp2
)->bitpos
)
1959 /* If they are the same let's use the order which is guaranteed to
1961 return (*tmp
)->order
- (*tmp2
)->order
;
1964 /* Sorting function for store_immediate_info objects.
1965 Sorts them by the order field. */
1968 sort_by_order (const void *x
, const void *y
)
1970 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1971 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1973 if ((*tmp
)->order
< (*tmp2
)->order
)
1975 else if ((*tmp
)->order
> (*tmp2
)->order
)
1981 /* Initialize a merged_store_group object from a store_immediate_info
1984 merged_store_group::merged_store_group (store_immediate_info
*info
)
1986 start
= info
->bitpos
;
1987 width
= info
->bitsize
;
1988 bitregion_start
= info
->bitregion_start
;
1989 bitregion_end
= info
->bitregion_end
;
1990 /* VAL has memory allocated for it in apply_stores once the group
1991 width has been finalized. */
1994 bit_insertion
= info
->rhs_code
== BIT_INSERT_EXPR
;
1995 string_concatenation
= info
->rhs_code
== STRING_CST
;
1996 only_constants
= info
->rhs_code
== INTEGER_CST
;
1998 first_nonmergeable_order
= ~0U;
1999 lp_nr
= info
->lp_nr
;
2000 unsigned HOST_WIDE_INT align_bitpos
= 0;
2001 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
2002 &align
, &align_bitpos
);
2003 align_base
= start
- align_bitpos
;
2004 for (int i
= 0; i
< 2; ++i
)
2006 store_operand_info
&op
= info
->ops
[i
];
2007 if (op
.base_addr
== NULL_TREE
)
2010 load_align_base
[i
] = 0;
2014 get_object_alignment_1 (op
.val
, &load_align
[i
], &align_bitpos
);
2015 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
2019 stores
.safe_push (info
);
2020 last_stmt
= info
->stmt
;
2021 last_order
= info
->order
;
2022 first_stmt
= last_stmt
;
2023 first_order
= last_order
;
2027 merged_store_group::~merged_store_group ()
2033 /* Return true if the store described by INFO can be merged into the group. */
2036 merged_store_group::can_be_merged_into (store_immediate_info
*info
)
2038 /* Do not merge bswap patterns. */
2039 if (info
->rhs_code
== LROTATE_EXPR
)
2042 if (info
->lp_nr
!= lp_nr
)
2045 /* The canonical case. */
2046 if (info
->rhs_code
== stores
[0]->rhs_code
)
2049 /* BIT_INSERT_EXPR is compatible with INTEGER_CST if no STRING_CST. */
2050 if (info
->rhs_code
== BIT_INSERT_EXPR
&& stores
[0]->rhs_code
== INTEGER_CST
)
2051 return !string_concatenation
;
2053 if (stores
[0]->rhs_code
== BIT_INSERT_EXPR
&& info
->rhs_code
== INTEGER_CST
)
2054 return !string_concatenation
;
2056 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores, but do it
2057 only for small regions since this can generate a lot of instructions. */
2058 if (info
->rhs_code
== MEM_REF
2059 && (stores
[0]->rhs_code
== INTEGER_CST
2060 || stores
[0]->rhs_code
== BIT_INSERT_EXPR
)
2061 && info
->bitregion_start
== stores
[0]->bitregion_start
2062 && info
->bitregion_end
== stores
[0]->bitregion_end
2063 && info
->bitregion_end
- info
->bitregion_start
<= MAX_FIXED_MODE_SIZE
)
2064 return !string_concatenation
;
2066 if (stores
[0]->rhs_code
== MEM_REF
2067 && (info
->rhs_code
== INTEGER_CST
2068 || info
->rhs_code
== BIT_INSERT_EXPR
)
2069 && info
->bitregion_start
== stores
[0]->bitregion_start
2070 && info
->bitregion_end
== stores
[0]->bitregion_end
2071 && info
->bitregion_end
- info
->bitregion_start
<= MAX_FIXED_MODE_SIZE
)
2072 return !string_concatenation
;
2074 /* STRING_CST is compatible with INTEGER_CST if no BIT_INSERT_EXPR. */
2075 if (info
->rhs_code
== STRING_CST
2076 && stores
[0]->rhs_code
== INTEGER_CST
2077 && stores
[0]->bitsize
== CHAR_BIT
)
2078 return !bit_insertion
;
2080 if (stores
[0]->rhs_code
== STRING_CST
2081 && info
->rhs_code
== INTEGER_CST
2082 && info
->bitsize
== CHAR_BIT
)
2083 return !bit_insertion
;
2088 /* Helper method for merge_into and merge_overlapping to do
2092 merged_store_group::do_merge (store_immediate_info
*info
)
2094 bitregion_start
= MIN (bitregion_start
, info
->bitregion_start
);
2095 bitregion_end
= MAX (bitregion_end
, info
->bitregion_end
);
2097 unsigned int this_align
;
2098 unsigned HOST_WIDE_INT align_bitpos
= 0;
2099 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
2100 &this_align
, &align_bitpos
);
2101 if (this_align
> align
)
2104 align_base
= info
->bitpos
- align_bitpos
;
2106 for (int i
= 0; i
< 2; ++i
)
2108 store_operand_info
&op
= info
->ops
[i
];
2112 get_object_alignment_1 (op
.val
, &this_align
, &align_bitpos
);
2113 if (this_align
> load_align
[i
])
2115 load_align
[i
] = this_align
;
2116 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
2120 gimple
*stmt
= info
->stmt
;
2121 stores
.safe_push (info
);
2122 if (info
->order
> last_order
)
2124 last_order
= info
->order
;
2127 else if (info
->order
< first_order
)
2129 first_order
= info
->order
;
2133 if (info
->bitpos
!= start
+ width
)
2134 consecutive
= false;
2136 /* We need to use extraction if there is any bit-field. */
2137 if (info
->rhs_code
== BIT_INSERT_EXPR
)
2139 bit_insertion
= true;
2140 gcc_assert (!string_concatenation
);
2143 /* We want to use concatenation if there is any string. */
2144 if (info
->rhs_code
== STRING_CST
)
2146 string_concatenation
= true;
2147 gcc_assert (!bit_insertion
);
2150 /* But we cannot use it if we don't have consecutive stores. */
2152 string_concatenation
= false;
2154 if (info
->rhs_code
!= INTEGER_CST
)
2155 only_constants
= false;
2158 /* Merge a store recorded by INFO into this merged store.
2159 The store is not overlapping with the existing recorded
2163 merged_store_group::merge_into (store_immediate_info
*info
)
2167 /* Make sure we're inserting in the position we think we're inserting. */
2168 gcc_assert (info
->bitpos
>= start
+ width
2169 && info
->bitregion_start
<= bitregion_end
);
2171 width
= info
->bitpos
+ info
->bitsize
- start
;
2174 /* Merge a store described by INFO into this merged store.
2175 INFO overlaps in some way with the current store (i.e. it's not contiguous
2176 which is handled by merged_store_group::merge_into). */
2179 merged_store_group::merge_overlapping (store_immediate_info
*info
)
2183 /* If the store extends the size of the group, extend the width. */
2184 if (info
->bitpos
+ info
->bitsize
> start
+ width
)
2185 width
= info
->bitpos
+ info
->bitsize
- start
;
2188 /* Go through all the recorded stores in this group in program order and
2189 apply their values to the VAL byte array to create the final merged
2190 value. Return true if the operation succeeded. */
2193 merged_store_group::apply_stores ()
2195 store_immediate_info
*info
;
2198 /* Make sure we have more than one store in the group, otherwise we cannot
2200 if (bitregion_start
% BITS_PER_UNIT
!= 0
2201 || bitregion_end
% BITS_PER_UNIT
!= 0
2202 || stores
.length () == 1)
2205 buf_size
= (bitregion_end
- bitregion_start
) / BITS_PER_UNIT
;
2207 /* Really do string concatenation for large strings only. */
2208 if (buf_size
<= MOVE_MAX
)
2209 string_concatenation
= false;
2211 /* Create a power-of-2-sized buffer for native_encode_expr. */
2212 if (!string_concatenation
)
2213 buf_size
= 1 << ceil_log2 (buf_size
);
2215 val
= XNEWVEC (unsigned char, 2 * buf_size
);
2216 mask
= val
+ buf_size
;
2217 memset (val
, 0, buf_size
);
2218 memset (mask
, ~0U, buf_size
);
2220 stores
.qsort (sort_by_order
);
2222 FOR_EACH_VEC_ELT (stores
, i
, info
)
2224 unsigned int pos_in_buffer
= info
->bitpos
- bitregion_start
;
2226 if (info
->ops
[0].val
&& info
->ops
[0].base_addr
== NULL_TREE
)
2227 cst
= info
->ops
[0].val
;
2228 else if (info
->ops
[1].val
&& info
->ops
[1].base_addr
== NULL_TREE
)
2229 cst
= info
->ops
[1].val
;
2233 if (cst
&& info
->rhs_code
!= BIT_INSERT_EXPR
)
2234 ret
= encode_tree_to_bitpos (cst
, val
, info
->bitsize
, pos_in_buffer
,
2236 unsigned char *m
= mask
+ (pos_in_buffer
/ BITS_PER_UNIT
);
2237 if (BYTES_BIG_ENDIAN
)
2238 clear_bit_region_be (m
, (BITS_PER_UNIT
- 1
2239 - (pos_in_buffer
% BITS_PER_UNIT
)),
2242 clear_bit_region (m
, pos_in_buffer
% BITS_PER_UNIT
, info
->bitsize
);
2243 if (cst
&& dump_file
&& (dump_flags
& TDF_DETAILS
))
2247 fputs ("After writing ", dump_file
);
2248 print_generic_expr (dump_file
, cst
, TDF_NONE
);
2249 fprintf (dump_file
, " of size " HOST_WIDE_INT_PRINT_DEC
2250 " at position %d\n", info
->bitsize
, pos_in_buffer
);
2251 fputs (" the merged value contains ", dump_file
);
2252 dump_char_array (dump_file
, val
, buf_size
);
2253 fputs (" the merged mask contains ", dump_file
);
2254 dump_char_array (dump_file
, mask
, buf_size
);
2256 fputs (" bit insertion is required\n", dump_file
);
2257 if (string_concatenation
)
2258 fputs (" string concatenation is required\n", dump_file
);
2261 fprintf (dump_file
, "Failed to merge stores\n");
2266 stores
.qsort (sort_by_bitpos
);
2270 /* Structure describing the store chain. */
2272 class imm_store_chain_info
2275 /* Doubly-linked list that imposes an order on chain processing.
2276 PNXP (prev's next pointer) points to the head of a list, or to
2277 the next field in the previous chain in the list.
2278 See pass_store_merging::m_stores_head for more rationale. */
2279 imm_store_chain_info
*next
, **pnxp
;
2281 auto_vec
<store_immediate_info
*> m_store_info
;
2282 auto_vec
<merged_store_group
*> m_merged_store_groups
;
2284 imm_store_chain_info (imm_store_chain_info
*&inspt
, tree b_a
)
2285 : next (inspt
), pnxp (&inspt
), base_addr (b_a
)
2290 gcc_checking_assert (pnxp
== next
->pnxp
);
2294 ~imm_store_chain_info ()
2299 gcc_checking_assert (&next
== next
->pnxp
);
2303 bool terminate_and_process_chain ();
2304 bool try_coalesce_bswap (merged_store_group
*, unsigned int, unsigned int,
2306 bool coalesce_immediate_stores ();
2307 bool output_merged_store (merged_store_group
*);
2308 bool output_merged_stores ();
2311 const pass_data pass_data_tree_store_merging
= {
2312 GIMPLE_PASS
, /* type */
2313 "store-merging", /* name */
2314 OPTGROUP_NONE
, /* optinfo_flags */
2315 TV_GIMPLE_STORE_MERGING
, /* tv_id */
2316 PROP_ssa
, /* properties_required */
2317 0, /* properties_provided */
2318 0, /* properties_destroyed */
2319 0, /* todo_flags_start */
2320 TODO_update_ssa
, /* todo_flags_finish */
2323 class pass_store_merging
: public gimple_opt_pass
2326 pass_store_merging (gcc::context
*ctxt
)
2327 : gimple_opt_pass (pass_data_tree_store_merging
, ctxt
), m_stores_head ()
2331 /* Pass not supported for PDP-endian, nor for insane hosts or
2332 target character sizes where native_{encode,interpret}_expr
2333 doesn't work properly. */
2337 return flag_store_merging
2338 && BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
2340 && BITS_PER_UNIT
== 8;
2343 virtual unsigned int execute (function
*);
2346 hash_map
<tree_operand_hash
, class imm_store_chain_info
*> m_stores
;
2348 /* Form a doubly-linked stack of the elements of m_stores, so that
2349 we can iterate over them in a predictable way. Using this order
2350 avoids extraneous differences in the compiler output just because
2351 of tree pointer variations (e.g. different chains end up in
2352 different positions of m_stores, so they are handled in different
2353 orders, so they allocate or release SSA names in different
2354 orders, and when they get reused, subsequent passes end up
2355 getting different SSA names, which may ultimately change
2356 decisions when going out of SSA). */
2357 imm_store_chain_info
*m_stores_head
;
2359 bool process_store (gimple
*);
2360 bool terminate_and_process_chain (imm_store_chain_info
*);
2361 bool terminate_all_aliasing_chains (imm_store_chain_info
**, gimple
*);
2362 bool terminate_and_process_all_chains ();
2363 }; // class pass_store_merging
2365 /* Terminate and process all recorded chains. Return true if any changes
2369 pass_store_merging::terminate_and_process_all_chains ()
2372 while (m_stores_head
)
2373 ret
|= terminate_and_process_chain (m_stores_head
);
2374 gcc_assert (m_stores
.is_empty ());
2378 /* Terminate all chains that are affected by the statement STMT.
2379 CHAIN_INFO is the chain we should ignore from the checks if
2380 non-NULL. Return true if any changes were made. */
2383 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
2389 /* If the statement doesn't touch memory it can't alias. */
2390 if (!gimple_vuse (stmt
))
2393 tree store_lhs
= gimple_store_p (stmt
) ? gimple_get_lhs (stmt
) : NULL_TREE
;
2394 ao_ref store_lhs_ref
;
2395 ao_ref_init (&store_lhs_ref
, store_lhs
);
2396 for (imm_store_chain_info
*next
= m_stores_head
, *cur
= next
; cur
; cur
= next
)
2400 /* We already checked all the stores in chain_info and terminated the
2401 chain if necessary. Skip it here. */
2402 if (chain_info
&& *chain_info
== cur
)
2405 store_immediate_info
*info
;
2407 FOR_EACH_VEC_ELT (cur
->m_store_info
, i
, info
)
2409 tree lhs
= gimple_assign_lhs (info
->stmt
);
2411 ao_ref_init (&lhs_ref
, lhs
);
2412 if (ref_maybe_used_by_stmt_p (stmt
, &lhs_ref
)
2413 || stmt_may_clobber_ref_p_1 (stmt
, &lhs_ref
)
2414 || (store_lhs
&& refs_may_alias_p_1 (&store_lhs_ref
,
2417 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2419 fprintf (dump_file
, "stmt causes chain termination:\n");
2420 print_gimple_stmt (dump_file
, stmt
, 0);
2422 ret
|= terminate_and_process_chain (cur
);
2431 /* Helper function. Terminate the recorded chain storing to base object
2432 BASE. Return true if the merging and output was successful. The m_stores
2433 entry is removed after the processing in any case. */
2436 pass_store_merging::terminate_and_process_chain (imm_store_chain_info
*chain_info
)
2438 bool ret
= chain_info
->terminate_and_process_chain ();
2439 m_stores
.remove (chain_info
->base_addr
);
2444 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2445 may clobber REF. FIRST and LAST must have non-NULL vdef. We want to
2446 be able to sink load of REF across stores between FIRST and LAST, up
2447 to right before LAST. */
2450 stmts_may_clobber_ref_p (gimple
*first
, gimple
*last
, tree ref
)
2453 ao_ref_init (&r
, ref
);
2454 unsigned int count
= 0;
2455 tree vop
= gimple_vdef (last
);
2458 /* Return true conservatively if the basic blocks are different. */
2459 if (gimple_bb (first
) != gimple_bb (last
))
2464 stmt
= SSA_NAME_DEF_STMT (vop
);
2465 if (stmt_may_clobber_ref_p_1 (stmt
, &r
))
2467 if (gimple_store_p (stmt
)
2468 && refs_anti_dependent_p (ref
, gimple_get_lhs (stmt
)))
2470 /* Avoid quadratic compile time by bounding the number of checks
2472 if (++count
> MAX_STORE_ALIAS_CHECKS
)
2474 vop
= gimple_vuse (stmt
);
2476 while (stmt
!= first
);
2481 /* Return true if INFO->ops[IDX] is mergeable with the
2482 corresponding loads already in MERGED_STORE group.
2483 BASE_ADDR is the base address of the whole store group. */
2486 compatible_load_p (merged_store_group
*merged_store
,
2487 store_immediate_info
*info
,
2488 tree base_addr
, int idx
)
2490 store_immediate_info
*infof
= merged_store
->stores
[0];
2491 if (!info
->ops
[idx
].base_addr
2492 || maybe_ne (info
->ops
[idx
].bitpos
- infof
->ops
[idx
].bitpos
,
2493 info
->bitpos
- infof
->bitpos
)
2494 || !operand_equal_p (info
->ops
[idx
].base_addr
,
2495 infof
->ops
[idx
].base_addr
, 0))
2498 store_immediate_info
*infol
= merged_store
->stores
.last ();
2499 tree load_vuse
= gimple_vuse (info
->ops
[idx
].stmt
);
2500 /* In this case all vuses should be the same, e.g.
2501 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2503 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2504 and we can emit the coalesced load next to any of those loads. */
2505 if (gimple_vuse (infof
->ops
[idx
].stmt
) == load_vuse
2506 && gimple_vuse (infol
->ops
[idx
].stmt
) == load_vuse
)
2509 /* Otherwise, at least for now require that the load has the same
2510 vuse as the store. See following examples. */
2511 if (gimple_vuse (info
->stmt
) != load_vuse
)
2514 if (gimple_vuse (infof
->stmt
) != gimple_vuse (infof
->ops
[idx
].stmt
)
2516 && gimple_vuse (infol
->stmt
) != gimple_vuse (infol
->ops
[idx
].stmt
)))
2519 /* If the load is from the same location as the store, already
2520 the construction of the immediate chain info guarantees no intervening
2521 stores, so no further checks are needed. Example:
2522 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2523 if (known_eq (info
->ops
[idx
].bitpos
, info
->bitpos
)
2524 && operand_equal_p (info
->ops
[idx
].base_addr
, base_addr
, 0))
2527 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2528 of the stores in the group, or any other stores in between those.
2529 Previous calls to compatible_load_p ensured that for all the
2530 merged_store->stores IDX loads, no stmts starting with
2531 merged_store->first_stmt and ending right before merged_store->last_stmt
2532 clobbers those loads. */
2533 gimple
*first
= merged_store
->first_stmt
;
2534 gimple
*last
= merged_store
->last_stmt
;
2536 store_immediate_info
*infoc
;
2537 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2538 comes before the so far first load, we'll be changing
2539 merged_store->first_stmt. In that case we need to give up if
2540 any of the earlier processed loads clobber with the stmts in the new
2542 if (info
->order
< merged_store
->first_order
)
2544 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2545 if (stmts_may_clobber_ref_p (info
->stmt
, first
, infoc
->ops
[idx
].val
))
2549 /* Similarly, we could change merged_store->last_stmt, so ensure
2550 in that case no stmts in the new range clobber any of the earlier
2552 else if (info
->order
> merged_store
->last_order
)
2554 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2555 if (stmts_may_clobber_ref_p (last
, info
->stmt
, infoc
->ops
[idx
].val
))
2559 /* And finally, we'd be adding a new load to the set, ensure it isn't
2560 clobbered in the new range. */
2561 if (stmts_may_clobber_ref_p (first
, last
, info
->ops
[idx
].val
))
2564 /* Otherwise, we are looking for:
2565 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2567 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2571 /* Add all refs loaded to compute VAL to REFS vector. */
2574 gather_bswap_load_refs (vec
<tree
> *refs
, tree val
)
2576 if (TREE_CODE (val
) != SSA_NAME
)
2579 gimple
*stmt
= SSA_NAME_DEF_STMT (val
);
2580 if (!is_gimple_assign (stmt
))
2583 if (gimple_assign_load_p (stmt
))
2585 refs
->safe_push (gimple_assign_rhs1 (stmt
));
2589 switch (gimple_assign_rhs_class (stmt
))
2591 case GIMPLE_BINARY_RHS
:
2592 gather_bswap_load_refs (refs
, gimple_assign_rhs2 (stmt
));
2594 case GIMPLE_UNARY_RHS
:
2595 gather_bswap_load_refs (refs
, gimple_assign_rhs1 (stmt
));
2602 /* Check if there are any stores in M_STORE_INFO after index I
2603 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2604 a potential group ending with END that have their order
2605 smaller than LAST_ORDER. ALL_INTEGER_CST_P is true if
2606 all the stores already merged and the one under consideration
2607 have rhs_code of INTEGER_CST. Return true if there are no such stores.
2609 MEM[(long long int *)p_28] = 0;
2610 MEM[(long long int *)p_28 + 8B] = 0;
2611 MEM[(long long int *)p_28 + 16B] = 0;
2612 MEM[(long long int *)p_28 + 24B] = 0;
2614 MEM[(int *)p_28 + 8B] = _129;
2615 MEM[(int *)p_28].a = -1;
2617 MEM[(long long int *)p_28] = 0;
2618 MEM[(int *)p_28].a = -1;
2619 stmts in the current group and need to consider if it is safe to
2620 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2621 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2622 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2623 into the group and merging of those 3 stores is successful, merged
2624 stmts will be emitted at the latest store from that group, i.e.
2625 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2626 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2627 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2628 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2629 into the group. That way it will be its own store group and will
2630 not be touched. If ALL_INTEGER_CST_P and there are overlapping
2631 INTEGER_CST stores, those are mergeable using merge_overlapping,
2632 so don't return false for those.
2634 Similarly, check stores from FIRST_EARLIER (inclusive) to END_EARLIER
2635 (exclusive), whether they don't overlap the bitrange START to END
2636 and have order in between FIRST_ORDER and LAST_ORDER. This is to
2637 prevent merging in cases like:
2638 MEM <char[12]> [&b + 8B] = {};
2639 MEM[(short *) &b] = 5;
2641 MEM <long long unsigned int> [&b + 2B] = _5;
2642 MEM[(char *)&b + 16B] = 88;
2643 MEM[(int *)&b + 20B] = 1;
2644 The = {} store comes in sort_by_bitpos before the = 88 store, and can't
2645 be merged with it, because the = _5 store overlaps these and is in between
2646 them in sort_by_order ordering. If it was merged, the merged store would
2647 go after the = _5 store and thus change behavior. */
2650 check_no_overlap (vec
<store_immediate_info
*> m_store_info
, unsigned int i
,
2651 bool all_integer_cst_p
, unsigned int first_order
,
2652 unsigned int last_order
, unsigned HOST_WIDE_INT start
,
2653 unsigned HOST_WIDE_INT end
, unsigned int first_earlier
,
2654 unsigned end_earlier
)
2656 unsigned int len
= m_store_info
.length ();
2657 for (unsigned int j
= first_earlier
; j
< end_earlier
; j
++)
2659 store_immediate_info
*info
= m_store_info
[j
];
2660 if (info
->order
> first_order
2661 && info
->order
< last_order
2662 && info
->bitpos
+ info
->bitsize
> start
)
2665 for (++i
; i
< len
; ++i
)
2667 store_immediate_info
*info
= m_store_info
[i
];
2668 if (info
->bitpos
>= end
)
2670 if (info
->order
< last_order
2671 && (!all_integer_cst_p
|| info
->rhs_code
!= INTEGER_CST
))
2677 /* Return true if m_store_info[first] and at least one following store
2678 form a group which store try_size bitsize value which is byte swapped
2679 from a memory load or some value, or identity from some value.
2680 This uses the bswap pass APIs. */
2683 imm_store_chain_info::try_coalesce_bswap (merged_store_group
*merged_store
,
2685 unsigned int try_size
,
2686 unsigned int first_earlier
)
2688 unsigned int len
= m_store_info
.length (), last
= first
;
2689 unsigned HOST_WIDE_INT width
= m_store_info
[first
]->bitsize
;
2690 if (width
>= try_size
)
2692 for (unsigned int i
= first
+ 1; i
< len
; ++i
)
2694 if (m_store_info
[i
]->bitpos
!= m_store_info
[first
]->bitpos
+ width
2695 || m_store_info
[i
]->lp_nr
!= merged_store
->lp_nr
2696 || m_store_info
[i
]->ins_stmt
== NULL
)
2698 width
+= m_store_info
[i
]->bitsize
;
2699 if (width
>= try_size
)
2705 if (width
!= try_size
)
2708 bool allow_unaligned
2709 = !STRICT_ALIGNMENT
&& param_store_merging_allow_unaligned
;
2710 /* Punt if the combined store would not be aligned and we need alignment. */
2711 if (!allow_unaligned
)
2713 unsigned int align
= merged_store
->align
;
2714 unsigned HOST_WIDE_INT align_base
= merged_store
->align_base
;
2715 for (unsigned int i
= first
+ 1; i
<= last
; ++i
)
2717 unsigned int this_align
;
2718 unsigned HOST_WIDE_INT align_bitpos
= 0;
2719 get_object_alignment_1 (gimple_assign_lhs (m_store_info
[i
]->stmt
),
2720 &this_align
, &align_bitpos
);
2721 if (this_align
> align
)
2724 align_base
= m_store_info
[i
]->bitpos
- align_bitpos
;
2727 unsigned HOST_WIDE_INT align_bitpos
2728 = (m_store_info
[first
]->bitpos
- align_base
) & (align
- 1);
2730 align
= least_bit_hwi (align_bitpos
);
2731 if (align
< try_size
)
2738 case 16: type
= uint16_type_node
; break;
2739 case 32: type
= uint32_type_node
; break;
2740 case 64: type
= uint64_type_node
; break;
2741 default: gcc_unreachable ();
2743 struct symbolic_number n
;
2744 gimple
*ins_stmt
= NULL
;
2745 int vuse_store
= -1;
2746 unsigned int first_order
= merged_store
->first_order
;
2747 unsigned int last_order
= merged_store
->last_order
;
2748 gimple
*first_stmt
= merged_store
->first_stmt
;
2749 gimple
*last_stmt
= merged_store
->last_stmt
;
2750 unsigned HOST_WIDE_INT end
= merged_store
->start
+ merged_store
->width
;
2751 store_immediate_info
*infof
= m_store_info
[first
];
2753 for (unsigned int i
= first
; i
<= last
; ++i
)
2755 store_immediate_info
*info
= m_store_info
[i
];
2756 struct symbolic_number this_n
= info
->n
;
2758 if (!this_n
.base_addr
)
2759 this_n
.range
= try_size
/ BITS_PER_UNIT
;
2761 /* Update vuse in case it has changed by output_merged_stores. */
2762 this_n
.vuse
= gimple_vuse (info
->ins_stmt
);
2763 unsigned int bitpos
= info
->bitpos
- infof
->bitpos
;
2764 if (!do_shift_rotate (LSHIFT_EXPR
, &this_n
,
2766 ? try_size
- info
->bitsize
- bitpos
2769 if (this_n
.base_addr
&& vuse_store
)
2772 for (j
= first
; j
<= last
; ++j
)
2773 if (this_n
.vuse
== gimple_vuse (m_store_info
[j
]->stmt
))
2777 if (vuse_store
== 1)
2785 ins_stmt
= info
->ins_stmt
;
2789 if (n
.base_addr
&& n
.vuse
!= this_n
.vuse
)
2791 if (vuse_store
== 0)
2795 if (info
->order
> last_order
)
2797 last_order
= info
->order
;
2798 last_stmt
= info
->stmt
;
2800 else if (info
->order
< first_order
)
2802 first_order
= info
->order
;
2803 first_stmt
= info
->stmt
;
2805 end
= MAX (end
, info
->bitpos
+ info
->bitsize
);
2807 ins_stmt
= perform_symbolic_merge (ins_stmt
, &n
, info
->ins_stmt
,
2809 if (ins_stmt
== NULL
)
2814 uint64_t cmpxchg
, cmpnop
;
2815 find_bswap_or_nop_finalize (&n
, &cmpxchg
, &cmpnop
);
2817 /* A complete byte swap should make the symbolic number to start with
2818 the largest digit in the highest order byte. Unchanged symbolic
2819 number indicates a read with same endianness as target architecture. */
2820 if (n
.n
!= cmpnop
&& n
.n
!= cmpxchg
)
2823 if (n
.base_addr
== NULL_TREE
&& !is_gimple_val (n
.src
))
2826 if (!check_no_overlap (m_store_info
, last
, false, first_order
, last_order
,
2827 merged_store
->start
, end
, first_earlier
, first
))
2830 /* Don't handle memory copy this way if normal non-bswap processing
2831 would handle it too. */
2832 if (n
.n
== cmpnop
&& (unsigned) n
.n_ops
== last
- first
+ 1)
2835 for (i
= first
; i
<= last
; ++i
)
2836 if (m_store_info
[i
]->rhs_code
!= MEM_REF
)
2846 /* Will emit LROTATE_EXPR. */
2849 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
2850 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
)
2854 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
2855 && optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
)
2862 if (!allow_unaligned
&& n
.base_addr
)
2864 unsigned int align
= get_object_alignment (n
.src
);
2865 if (align
< try_size
)
2869 /* If each load has vuse of the corresponding store, need to verify
2870 the loads can be sunk right before the last store. */
2871 if (vuse_store
== 1)
2873 auto_vec
<tree
, 64> refs
;
2874 for (unsigned int i
= first
; i
<= last
; ++i
)
2875 gather_bswap_load_refs (&refs
,
2876 gimple_assign_rhs1 (m_store_info
[i
]->stmt
));
2880 FOR_EACH_VEC_ELT (refs
, i
, ref
)
2881 if (stmts_may_clobber_ref_p (first_stmt
, last_stmt
, ref
))
2887 infof
->ins_stmt
= ins_stmt
;
2888 for (unsigned int i
= first
; i
<= last
; ++i
)
2890 m_store_info
[i
]->rhs_code
= n
.n
== cmpxchg
? LROTATE_EXPR
: NOP_EXPR
;
2891 m_store_info
[i
]->ops
[0].base_addr
= NULL_TREE
;
2892 m_store_info
[i
]->ops
[1].base_addr
= NULL_TREE
;
2894 merged_store
->merge_into (m_store_info
[i
]);
2900 /* Go through the candidate stores recorded in m_store_info and merge them
2901 into merged_store_group objects recorded into m_merged_store_groups
2902 representing the widened stores. Return true if coalescing was successful
2903 and the number of widened stores is fewer than the original number
2907 imm_store_chain_info::coalesce_immediate_stores ()
2909 /* Anything less can't be processed. */
2910 if (m_store_info
.length () < 2)
2913 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2914 fprintf (dump_file
, "Attempting to coalesce %u stores in chain\n",
2915 m_store_info
.length ());
2917 store_immediate_info
*info
;
2918 unsigned int i
, ignore
= 0;
2919 unsigned int first_earlier
= 0;
2920 unsigned int end_earlier
= 0;
2922 /* Order the stores by the bitposition they write to. */
2923 m_store_info
.qsort (sort_by_bitpos
);
2925 info
= m_store_info
[0];
2926 merged_store_group
*merged_store
= new merged_store_group (info
);
2927 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2928 fputs ("New store group\n", dump_file
);
2930 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
2932 unsigned HOST_WIDE_INT new_bitregion_start
, new_bitregion_end
;
2937 while (first_earlier
< end_earlier
2938 && (m_store_info
[first_earlier
]->bitpos
2939 + m_store_info
[first_earlier
]->bitsize
2940 <= merged_store
->start
))
2943 /* First try to handle group of stores like:
2948 using the bswap framework. */
2949 if (info
->bitpos
== merged_store
->start
+ merged_store
->width
2950 && merged_store
->stores
.length () == 1
2951 && merged_store
->stores
[0]->ins_stmt
!= NULL
2952 && info
->lp_nr
== merged_store
->lp_nr
2953 && info
->ins_stmt
!= NULL
)
2955 unsigned int try_size
;
2956 for (try_size
= 64; try_size
>= 16; try_size
>>= 1)
2957 if (try_coalesce_bswap (merged_store
, i
- 1, try_size
,
2963 ignore
= i
+ merged_store
->stores
.length () - 1;
2964 m_merged_store_groups
.safe_push (merged_store
);
2965 if (ignore
< m_store_info
.length ())
2967 merged_store
= new merged_store_group (m_store_info
[ignore
]);
2968 end_earlier
= ignore
;
2971 merged_store
= NULL
;
2977 = MIN (merged_store
->bitregion_start
, info
->bitregion_start
);
2979 = MAX (merged_store
->bitregion_end
, info
->bitregion_end
);
2981 if (info
->order
>= merged_store
->first_nonmergeable_order
2982 || (((new_bitregion_end
- new_bitregion_start
+ 1) / BITS_PER_UNIT
)
2983 > (unsigned) param_store_merging_max_size
))
2988 Overlapping stores. */
2989 else if (IN_RANGE (info
->bitpos
, merged_store
->start
,
2990 merged_store
->start
+ merged_store
->width
- 1)
2991 /* |---store 1---||---store 2---|
2992 Handle also the consecutive INTEGER_CST stores case here,
2993 as we have here the code to deal with overlaps. */
2994 || (info
->bitregion_start
<= merged_store
->bitregion_end
2995 && info
->rhs_code
== INTEGER_CST
2996 && merged_store
->only_constants
2997 && merged_store
->can_be_merged_into (info
)))
2999 /* Only allow overlapping stores of constants. */
3000 if (info
->rhs_code
== INTEGER_CST
3001 && merged_store
->only_constants
3002 && info
->lp_nr
== merged_store
->lp_nr
)
3004 unsigned int first_order
3005 = MIN (merged_store
->first_order
, info
->order
);
3006 unsigned int last_order
3007 = MAX (merged_store
->last_order
, info
->order
);
3008 unsigned HOST_WIDE_INT end
3009 = MAX (merged_store
->start
+ merged_store
->width
,
3010 info
->bitpos
+ info
->bitsize
);
3011 if (check_no_overlap (m_store_info
, i
, true, first_order
,
3012 last_order
, merged_store
->start
, end
,
3013 first_earlier
, end_earlier
))
3015 /* check_no_overlap call above made sure there are no
3016 overlapping stores with non-INTEGER_CST rhs_code
3017 in between the first and last of the stores we've
3018 just merged. If there are any INTEGER_CST rhs_code
3019 stores in between, we need to merge_overlapping them
3020 even if in the sort_by_bitpos order there are other
3021 overlapping stores in between. Keep those stores as is.
3023 MEM[(int *)p_28] = 0;
3024 MEM[(char *)p_28 + 3B] = 1;
3025 MEM[(char *)p_28 + 1B] = 2;
3026 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
3027 We can't merge the zero store with the store of two and
3028 not merge anything else, because the store of one is
3029 in the original order in between those two, but in
3030 store_by_bitpos order it comes after the last store that
3031 we can't merge with them. We can merge the first 3 stores
3032 and keep the last store as is though. */
3033 unsigned int len
= m_store_info
.length ();
3034 unsigned int try_order
= last_order
;
3035 unsigned int first_nonmergeable_order
;
3037 bool last_iter
= false;
3041 unsigned int max_order
= 0;
3042 unsigned int min_order
= first_order
;
3043 unsigned first_nonmergeable_int_order
= ~0U;
3044 unsigned HOST_WIDE_INT this_end
= end
;
3046 first_nonmergeable_order
= ~0U;
3047 for (unsigned int j
= i
+ 1; j
< len
; ++j
)
3049 store_immediate_info
*info2
= m_store_info
[j
];
3050 if (info2
->bitpos
>= this_end
)
3052 if (info2
->order
< try_order
)
3054 if (info2
->rhs_code
!= INTEGER_CST
3055 || info2
->lp_nr
!= merged_store
->lp_nr
)
3057 /* Normally check_no_overlap makes sure this
3058 doesn't happen, but if end grows below,
3059 then we need to process more stores than
3060 check_no_overlap verified. Example:
3061 MEM[(int *)p_5] = 0;
3062 MEM[(short *)p_5 + 3B] = 1;
3063 MEM[(char *)p_5 + 4B] = _9;
3064 MEM[(char *)p_5 + 2B] = 2; */
3069 min_order
= MIN (min_order
, info2
->order
);
3070 this_end
= MAX (this_end
,
3071 info2
->bitpos
+ info2
->bitsize
);
3073 else if (info2
->rhs_code
== INTEGER_CST
3074 && info2
->lp_nr
== merged_store
->lp_nr
3077 max_order
= MAX (max_order
, info2
->order
+ 1);
3078 first_nonmergeable_int_order
3079 = MIN (first_nonmergeable_int_order
,
3083 first_nonmergeable_order
3084 = MIN (first_nonmergeable_order
, info2
->order
);
3087 && !check_no_overlap (m_store_info
, len
- 1, true,
3088 min_order
, try_order
,
3089 merged_store
->start
, this_end
,
3090 first_earlier
, end_earlier
))
3094 if (last_order
== try_order
)
3096 /* If this failed, but only because we grew
3097 try_order, retry with the last working one,
3098 so that we merge at least something. */
3099 try_order
= last_order
;
3103 last_order
= try_order
;
3104 /* Retry with a larger try_order to see if we could
3105 merge some further INTEGER_CST stores. */
3107 && (first_nonmergeable_int_order
3108 < first_nonmergeable_order
))
3110 try_order
= MIN (max_order
,
3111 first_nonmergeable_order
);
3114 merged_store
->first_nonmergeable_order
);
3115 if (try_order
> last_order
&& ++attempts
< 16)
3118 first_nonmergeable_order
3119 = MIN (first_nonmergeable_order
,
3120 first_nonmergeable_int_order
);
3128 merged_store
->merge_overlapping (info
);
3130 merged_store
->first_nonmergeable_order
3131 = MIN (merged_store
->first_nonmergeable_order
,
3132 first_nonmergeable_order
);
3134 for (unsigned int j
= i
+ 1; j
<= k
; j
++)
3136 store_immediate_info
*info2
= m_store_info
[j
];
3137 gcc_assert (info2
->bitpos
< end
);
3138 if (info2
->order
< last_order
)
3140 gcc_assert (info2
->rhs_code
== INTEGER_CST
);
3142 merged_store
->merge_overlapping (info2
);
3144 /* Other stores are kept and not merged in any
3153 /* |---store 1---||---store 2---|
3154 This store is consecutive to the previous one.
3155 Merge it into the current store group. There can be gaps in between
3156 the stores, but there can't be gaps in between bitregions. */
3157 else if (info
->bitregion_start
<= merged_store
->bitregion_end
3158 && merged_store
->can_be_merged_into (info
))
3160 store_immediate_info
*infof
= merged_store
->stores
[0];
3162 /* All the rhs_code ops that take 2 operands are commutative,
3163 swap the operands if it could make the operands compatible. */
3164 if (infof
->ops
[0].base_addr
3165 && infof
->ops
[1].base_addr
3166 && info
->ops
[0].base_addr
3167 && info
->ops
[1].base_addr
3168 && known_eq (info
->ops
[1].bitpos
- infof
->ops
[0].bitpos
,
3169 info
->bitpos
- infof
->bitpos
)
3170 && operand_equal_p (info
->ops
[1].base_addr
,
3171 infof
->ops
[0].base_addr
, 0))
3173 std::swap (info
->ops
[0], info
->ops
[1]);
3174 info
->ops_swapped_p
= true;
3176 if (check_no_overlap (m_store_info
, i
, false,
3177 MIN (merged_store
->first_order
, info
->order
),
3178 MAX (merged_store
->last_order
, info
->order
),
3179 merged_store
->start
,
3180 MAX (merged_store
->start
+ merged_store
->width
,
3181 info
->bitpos
+ info
->bitsize
),
3182 first_earlier
, end_earlier
))
3184 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
3185 if (info
->rhs_code
== MEM_REF
&& infof
->rhs_code
!= MEM_REF
)
3187 info
->rhs_code
= BIT_INSERT_EXPR
;
3188 info
->ops
[0].val
= gimple_assign_rhs1 (info
->stmt
);
3189 info
->ops
[0].base_addr
= NULL_TREE
;
3191 else if (infof
->rhs_code
== MEM_REF
&& info
->rhs_code
!= MEM_REF
)
3193 store_immediate_info
*infoj
;
3195 FOR_EACH_VEC_ELT (merged_store
->stores
, j
, infoj
)
3197 infoj
->rhs_code
= BIT_INSERT_EXPR
;
3198 infoj
->ops
[0].val
= gimple_assign_rhs1 (infoj
->stmt
);
3199 infoj
->ops
[0].base_addr
= NULL_TREE
;
3201 merged_store
->bit_insertion
= true;
3203 if ((infof
->ops
[0].base_addr
3204 ? compatible_load_p (merged_store
, info
, base_addr
, 0)
3205 : !info
->ops
[0].base_addr
)
3206 && (infof
->ops
[1].base_addr
3207 ? compatible_load_p (merged_store
, info
, base_addr
, 1)
3208 : !info
->ops
[1].base_addr
))
3210 merged_store
->merge_into (info
);
3216 /* |---store 1---| <gap> |---store 2---|.
3217 Gap between stores or the rhs not compatible. Start a new group. */
3219 /* Try to apply all the stores recorded for the group to determine
3220 the bitpattern they write and discard it if that fails.
3221 This will also reject single-store groups. */
3222 if (merged_store
->apply_stores ())
3223 m_merged_store_groups
.safe_push (merged_store
);
3225 delete merged_store
;
3227 merged_store
= new merged_store_group (info
);
3229 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3230 fputs ("New store group\n", dump_file
);
3233 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3235 fprintf (dump_file
, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
3236 " bitpos:" HOST_WIDE_INT_PRINT_DEC
" val:",
3237 i
, info
->bitsize
, info
->bitpos
);
3238 print_generic_expr (dump_file
, gimple_assign_rhs1 (info
->stmt
));
3239 fputc ('\n', dump_file
);
3243 /* Record or discard the last store group. */
3246 if (merged_store
->apply_stores ())
3247 m_merged_store_groups
.safe_push (merged_store
);
3249 delete merged_store
;
3252 gcc_assert (m_merged_store_groups
.length () <= m_store_info
.length ());
3255 = !m_merged_store_groups
.is_empty ()
3256 && m_merged_store_groups
.length () < m_store_info
.length ();
3258 if (success
&& dump_file
)
3259 fprintf (dump_file
, "Coalescing successful!\nMerged into %u stores\n",
3260 m_merged_store_groups
.length ());
3265 /* Return the type to use for the merged stores or loads described by STMTS.
3266 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
3267 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
3268 of the MEM_REFs if any. */
3271 get_alias_type_for_stmts (vec
<gimple
*> &stmts
, bool is_load
,
3272 unsigned short *cliquep
, unsigned short *basep
)
3276 tree type
= NULL_TREE
;
3277 tree ret
= NULL_TREE
;
3281 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
3283 tree ref
= is_load
? gimple_assign_rhs1 (stmt
)
3284 : gimple_assign_lhs (stmt
);
3285 tree type1
= reference_alias_ptr_type (ref
);
3286 tree base
= get_base_address (ref
);
3290 if (TREE_CODE (base
) == MEM_REF
)
3292 *cliquep
= MR_DEPENDENCE_CLIQUE (base
);
3293 *basep
= MR_DEPENDENCE_BASE (base
);
3298 if (!alias_ptr_types_compatible_p (type
, type1
))
3299 ret
= ptr_type_node
;
3300 if (TREE_CODE (base
) != MEM_REF
3301 || *cliquep
!= MR_DEPENDENCE_CLIQUE (base
)
3302 || *basep
!= MR_DEPENDENCE_BASE (base
))
3311 /* Return the location_t information we can find among the statements
3315 get_location_for_stmts (vec
<gimple
*> &stmts
)
3320 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
3321 if (gimple_has_location (stmt
))
3322 return gimple_location (stmt
);
3324 return UNKNOWN_LOCATION
;
3327 /* Used to decribe a store resulting from splitting a wide store in smaller
3328 regularly-sized stores in split_group. */
3333 unsigned HOST_WIDE_INT bytepos
;
3334 unsigned HOST_WIDE_INT size
;
3335 unsigned HOST_WIDE_INT align
;
3336 auto_vec
<store_immediate_info
*> orig_stores
;
3337 /* True if there is a single orig stmt covering the whole split store. */
3339 split_store (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
3340 unsigned HOST_WIDE_INT
);
3343 /* Simple constructor. */
3345 split_store::split_store (unsigned HOST_WIDE_INT bp
,
3346 unsigned HOST_WIDE_INT sz
,
3347 unsigned HOST_WIDE_INT al
)
3348 : bytepos (bp
), size (sz
), align (al
), orig (false)
3350 orig_stores
.create (0);
3353 /* Record all stores in GROUP that write to the region starting at BITPOS and
3354 is of size BITSIZE. Record infos for such statements in STORES if
3355 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
3356 if there is exactly one original store in the range (in that case ignore
3357 clobber stmts, unless there are only clobber stmts). */
3359 static store_immediate_info
*
3360 find_constituent_stores (class merged_store_group
*group
,
3361 vec
<store_immediate_info
*> *stores
,
3362 unsigned int *first
,
3363 unsigned HOST_WIDE_INT bitpos
,
3364 unsigned HOST_WIDE_INT bitsize
)
3366 store_immediate_info
*info
, *ret
= NULL
;
3368 bool second
= false;
3369 bool update_first
= true;
3370 unsigned HOST_WIDE_INT end
= bitpos
+ bitsize
;
3371 for (i
= *first
; group
->stores
.iterate (i
, &info
); ++i
)
3373 unsigned HOST_WIDE_INT stmt_start
= info
->bitpos
;
3374 unsigned HOST_WIDE_INT stmt_end
= stmt_start
+ info
->bitsize
;
3375 if (stmt_end
<= bitpos
)
3377 /* BITPOS passed to this function never decreases from within the
3378 same split_group call, so optimize and don't scan info records
3379 which are known to end before or at BITPOS next time.
3380 Only do it if all stores before this one also pass this. */
3386 update_first
= false;
3388 /* The stores in GROUP are ordered by bitposition so if we're past
3389 the region for this group return early. */
3390 if (stmt_start
>= end
)
3393 if (gimple_clobber_p (info
->stmt
))
3396 stores
->safe_push (info
);
3403 stores
->safe_push (info
);
3404 if (ret
&& !gimple_clobber_p (ret
->stmt
))
3410 else if (ret
&& !gimple_clobber_p (ret
->stmt
))
3418 /* Return how many SSA_NAMEs used to compute value to store in the INFO
3419 store have multiple uses. If any SSA_NAME has multiple uses, also
3420 count statements needed to compute it. */
3423 count_multiple_uses (store_immediate_info
*info
)
3425 gimple
*stmt
= info
->stmt
;
3427 switch (info
->rhs_code
)
3435 if (info
->bit_not_p
)
3437 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3438 ret
= 1; /* Fall through below to return
3439 the BIT_NOT_EXPR stmt and then
3440 BIT_{AND,IOR,XOR}_EXPR and anything it
3443 /* stmt is after this the BIT_NOT_EXPR. */
3444 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3446 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3448 ret
+= 1 + info
->ops
[0].bit_not_p
;
3449 if (info
->ops
[1].base_addr
)
3450 ret
+= 1 + info
->ops
[1].bit_not_p
;
3453 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3454 /* stmt is now the BIT_*_EXPR. */
3455 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3456 ret
+= 1 + info
->ops
[info
->ops_swapped_p
].bit_not_p
;
3457 else if (info
->ops
[info
->ops_swapped_p
].bit_not_p
)
3459 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3460 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
3463 if (info
->ops
[1].base_addr
== NULL_TREE
)
3465 gcc_checking_assert (!info
->ops_swapped_p
);
3468 if (!has_single_use (gimple_assign_rhs2 (stmt
)))
3469 ret
+= 1 + info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
;
3470 else if (info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
)
3472 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
3473 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
3478 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3479 return 1 + info
->ops
[0].bit_not_p
;
3480 else if (info
->ops
[0].bit_not_p
)
3482 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3483 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3487 case BIT_INSERT_EXPR
:
3488 return has_single_use (gimple_assign_rhs1 (stmt
)) ? 0 : 1;
3494 /* Split a merged store described by GROUP by populating the SPLIT_STORES
3495 vector (if non-NULL) with split_store structs describing the byte offset
3496 (from the base), the bit size and alignment of each store as well as the
3497 original statements involved in each such split group.
3498 This is to separate the splitting strategy from the statement
3499 building/emission/linking done in output_merged_store.
3500 Return number of new stores.
3501 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3502 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3503 BZERO_FIRST may be true only when the first store covers the whole group
3504 and clears it; if BZERO_FIRST is true, keep that first store in the set
3505 unmodified and emit further stores for the overrides only.
3506 If SPLIT_STORES is NULL, it is just a dry run to count number of
3510 split_group (merged_store_group
*group
, bool allow_unaligned_store
,
3511 bool allow_unaligned_load
, bool bzero_first
,
3512 vec
<split_store
*> *split_stores
,
3513 unsigned *total_orig
,
3514 unsigned *total_new
)
3516 unsigned HOST_WIDE_INT pos
= group
->bitregion_start
;
3517 unsigned HOST_WIDE_INT size
= group
->bitregion_end
- pos
;
3518 unsigned HOST_WIDE_INT bytepos
= pos
/ BITS_PER_UNIT
;
3519 unsigned HOST_WIDE_INT group_align
= group
->align
;
3520 unsigned HOST_WIDE_INT align_base
= group
->align_base
;
3521 unsigned HOST_WIDE_INT group_load_align
= group_align
;
3522 bool any_orig
= false;
3524 gcc_assert ((size
% BITS_PER_UNIT
== 0) && (pos
% BITS_PER_UNIT
== 0));
3526 /* For bswap framework using sets of stores, all the checking has been done
3527 earlier in try_coalesce_bswap and the result always needs to be emitted
3528 as a single store. Likewise for string concatenation, */
3529 if (group
->stores
[0]->rhs_code
== LROTATE_EXPR
3530 || group
->stores
[0]->rhs_code
== NOP_EXPR
3531 || group
->string_concatenation
)
3533 gcc_assert (!bzero_first
);
3536 /* Avoid the old/new stmt count heuristics. It should be
3537 always beneficial. */
3544 unsigned HOST_WIDE_INT align_bitpos
3545 = (group
->start
- align_base
) & (group_align
- 1);
3546 unsigned HOST_WIDE_INT align
= group_align
;
3548 align
= least_bit_hwi (align_bitpos
);
3549 bytepos
= group
->start
/ BITS_PER_UNIT
;
3551 = new split_store (bytepos
, group
->width
, align
);
3552 unsigned int first
= 0;
3553 find_constituent_stores (group
, &store
->orig_stores
,
3554 &first
, group
->start
, group
->width
);
3555 split_stores
->safe_push (store
);
3561 unsigned int ret
= 0, first
= 0;
3562 unsigned HOST_WIDE_INT try_pos
= bytepos
;
3567 store_immediate_info
*info
= group
->stores
[0];
3570 total_orig
[0] = 1; /* The orig store. */
3571 info
= group
->stores
[0];
3572 if (info
->ops
[0].base_addr
)
3574 if (info
->ops
[1].base_addr
)
3576 switch (info
->rhs_code
)
3581 total_orig
[0]++; /* The orig BIT_*_EXPR stmt. */
3586 total_orig
[0] *= group
->stores
.length ();
3588 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
3590 total_new
[0] += count_multiple_uses (info
);
3591 total_orig
[0] += (info
->bit_not_p
3592 + info
->ops
[0].bit_not_p
3593 + info
->ops
[1].bit_not_p
);
3597 if (!allow_unaligned_load
)
3598 for (int i
= 0; i
< 2; ++i
)
3599 if (group
->load_align
[i
])
3600 group_load_align
= MIN (group_load_align
, group
->load_align
[i
]);
3604 store_immediate_info
*gstore
;
3605 FOR_EACH_VEC_ELT (group
->stores
, first
, gstore
)
3606 if (!gimple_clobber_p (gstore
->stmt
))
3613 = new split_store (bytepos
, gstore
->bitsize
, align_base
);
3614 store
->orig_stores
.safe_push (gstore
);
3617 split_stores
->safe_push (store
);
3623 if ((allow_unaligned_store
|| group_align
<= BITS_PER_UNIT
)
3624 && (group
->mask
[try_pos
- bytepos
] == (unsigned char) ~0U
3625 || (bzero_first
&& group
->val
[try_pos
- bytepos
] == 0)))
3627 /* Skip padding bytes. */
3629 size
-= BITS_PER_UNIT
;
3633 unsigned HOST_WIDE_INT try_bitpos
= try_pos
* BITS_PER_UNIT
;
3634 unsigned int try_size
= MAX_STORE_BITSIZE
, nonmasked
;
3635 unsigned HOST_WIDE_INT align_bitpos
3636 = (try_bitpos
- align_base
) & (group_align
- 1);
3637 unsigned HOST_WIDE_INT align
= group_align
;
3638 bool found_orig
= false;
3640 align
= least_bit_hwi (align_bitpos
);
3641 if (!allow_unaligned_store
)
3642 try_size
= MIN (try_size
, align
);
3643 if (!allow_unaligned_load
)
3645 /* If we can't do or don't want to do unaligned stores
3646 as well as loads, we need to take the loads into account
3648 unsigned HOST_WIDE_INT load_align
= group_load_align
;
3649 align_bitpos
= (try_bitpos
- align_base
) & (load_align
- 1);
3651 load_align
= least_bit_hwi (align_bitpos
);
3652 for (int i
= 0; i
< 2; ++i
)
3653 if (group
->load_align
[i
])
3656 = known_alignment (try_bitpos
3657 - group
->stores
[0]->bitpos
3658 + group
->stores
[0]->ops
[i
].bitpos
3659 - group
->load_align_base
[i
]);
3660 if (align_bitpos
& (group_load_align
- 1))
3662 unsigned HOST_WIDE_INT a
= least_bit_hwi (align_bitpos
);
3663 load_align
= MIN (load_align
, a
);
3666 try_size
= MIN (try_size
, load_align
);
3668 store_immediate_info
*info
3669 = find_constituent_stores (group
, NULL
, &first
, try_bitpos
, try_size
);
3670 if (info
&& !gimple_clobber_p (info
->stmt
))
3672 /* If there is just one original statement for the range, see if
3673 we can just reuse the original store which could be even larger
3675 unsigned HOST_WIDE_INT stmt_end
3676 = ROUND_UP (info
->bitpos
+ info
->bitsize
, BITS_PER_UNIT
);
3677 info
= find_constituent_stores (group
, NULL
, &first
, try_bitpos
,
3678 stmt_end
- try_bitpos
);
3679 if (info
&& info
->bitpos
>= try_bitpos
)
3681 store_immediate_info
*info2
= NULL
;
3682 unsigned int first_copy
= first
;
3683 if (info
->bitpos
> try_bitpos
3684 && stmt_end
- try_bitpos
<= try_size
)
3686 info2
= find_constituent_stores (group
, NULL
, &first_copy
,
3688 info
->bitpos
- try_bitpos
);
3689 gcc_assert (info2
== NULL
|| gimple_clobber_p (info2
->stmt
));
3691 if (info2
== NULL
&& stmt_end
- try_bitpos
< try_size
)
3693 info2
= find_constituent_stores (group
, NULL
, &first_copy
,
3695 (try_bitpos
+ try_size
)
3697 gcc_assert (info2
== NULL
|| gimple_clobber_p (info2
->stmt
));
3701 try_size
= stmt_end
- try_bitpos
;
3708 /* Approximate store bitsize for the case when there are no padding
3710 while (try_size
> size
)
3712 /* Now look for whole padding bytes at the end of that bitsize. */
3713 for (nonmasked
= try_size
/ BITS_PER_UNIT
; nonmasked
> 0; --nonmasked
)
3714 if (group
->mask
[try_pos
- bytepos
+ nonmasked
- 1]
3715 != (unsigned char) ~0U
3717 || group
->val
[try_pos
- bytepos
+ nonmasked
- 1] != 0))
3719 if (nonmasked
== 0 || (info
&& gimple_clobber_p (info
->stmt
)))
3721 /* If entire try_size range is padding, skip it. */
3722 try_pos
+= try_size
/ BITS_PER_UNIT
;
3726 /* Otherwise try to decrease try_size if second half, last 3 quarters
3727 etc. are padding. */
3728 nonmasked
*= BITS_PER_UNIT
;
3729 while (nonmasked
<= try_size
/ 2)
3731 if (!allow_unaligned_store
&& group_align
> BITS_PER_UNIT
)
3733 /* Now look for whole padding bytes at the start of that bitsize. */
3734 unsigned int try_bytesize
= try_size
/ BITS_PER_UNIT
, masked
;
3735 for (masked
= 0; masked
< try_bytesize
; ++masked
)
3736 if (group
->mask
[try_pos
- bytepos
+ masked
] != (unsigned char) ~0U
3738 || group
->val
[try_pos
- bytepos
+ masked
] != 0))
3740 masked
*= BITS_PER_UNIT
;
3741 gcc_assert (masked
< try_size
);
3742 if (masked
>= try_size
/ 2)
3744 while (masked
>= try_size
/ 2)
3747 try_pos
+= try_size
/ BITS_PER_UNIT
;
3751 /* Need to recompute the alignment, so just retry at the new
3763 = new split_store (try_pos
, try_size
, align
);
3764 info
= find_constituent_stores (group
, &store
->orig_stores
,
3765 &first
, try_bitpos
, try_size
);
3767 && !gimple_clobber_p (info
->stmt
)
3768 && info
->bitpos
>= try_bitpos
3769 && info
->bitpos
+ info
->bitsize
<= try_bitpos
+ try_size
3770 && (store
->orig_stores
.length () == 1
3772 || (info
->bitpos
== try_bitpos
3773 && (info
->bitpos
+ info
->bitsize
3774 == try_bitpos
+ try_size
))))
3779 split_stores
->safe_push (store
);
3782 try_pos
+= try_size
/ BITS_PER_UNIT
;
3790 /* If we are reusing some original stores and any of the
3791 original SSA_NAMEs had multiple uses, we need to subtract
3792 those now before we add the new ones. */
3793 if (total_new
[0] && any_orig
)
3795 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
3797 total_new
[0] -= count_multiple_uses (store
->orig_stores
[0]);
3799 total_new
[0] += ret
; /* The new store. */
3800 store_immediate_info
*info
= group
->stores
[0];
3801 if (info
->ops
[0].base_addr
)
3802 total_new
[0] += ret
;
3803 if (info
->ops
[1].base_addr
)
3804 total_new
[0] += ret
;
3805 switch (info
->rhs_code
)
3810 total_new
[0] += ret
; /* The new BIT_*_EXPR stmt. */
3815 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
3818 bool bit_not_p
[3] = { false, false, false };
3819 /* If all orig_stores have certain bit_not_p set, then
3820 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3821 If some orig_stores have certain bit_not_p set, then
3822 we'd use a BIT_XOR_EXPR with a mask and need to account for
3824 FOR_EACH_VEC_ELT (store
->orig_stores
, j
, info
)
3826 if (info
->ops
[0].bit_not_p
)
3827 bit_not_p
[0] = true;
3828 if (info
->ops
[1].bit_not_p
)
3829 bit_not_p
[1] = true;
3830 if (info
->bit_not_p
)
3831 bit_not_p
[2] = true;
3833 total_new
[0] += bit_not_p
[0] + bit_not_p
[1] + bit_not_p
[2];
3841 /* Return the operation through which the operand IDX (if < 2) or
3842 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3843 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3844 the bits should be xored with mask. */
3846 static enum tree_code
3847 invert_op (split_store
*split_store
, int idx
, tree int_type
, tree
&mask
)
3850 store_immediate_info
*info
;
3851 unsigned int cnt
= 0;
3852 bool any_paddings
= false;
3853 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
3855 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
3859 tree lhs
= gimple_assign_lhs (info
->stmt
);
3860 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3861 && TYPE_PRECISION (TREE_TYPE (lhs
)) < info
->bitsize
)
3862 any_paddings
= true;
3868 if (cnt
== split_store
->orig_stores
.length () && !any_paddings
)
3869 return BIT_NOT_EXPR
;
3871 unsigned HOST_WIDE_INT try_bitpos
= split_store
->bytepos
* BITS_PER_UNIT
;
3872 unsigned buf_size
= split_store
->size
/ BITS_PER_UNIT
;
3874 = XALLOCAVEC (unsigned char, buf_size
);
3875 memset (buf
, ~0U, buf_size
);
3876 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
3878 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
3881 /* Clear regions with bit_not_p and invert afterwards, rather than
3882 clear regions with !bit_not_p, so that gaps in between stores aren't
3884 unsigned HOST_WIDE_INT bitsize
= info
->bitsize
;
3885 unsigned HOST_WIDE_INT prec
= bitsize
;
3886 unsigned int pos_in_buffer
= 0;
3889 tree lhs
= gimple_assign_lhs (info
->stmt
);
3890 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3891 && TYPE_PRECISION (TREE_TYPE (lhs
)) < bitsize
)
3892 prec
= TYPE_PRECISION (TREE_TYPE (lhs
));
3894 if (info
->bitpos
< try_bitpos
)
3896 gcc_assert (info
->bitpos
+ bitsize
> try_bitpos
);
3897 if (!BYTES_BIG_ENDIAN
)
3899 if (prec
<= try_bitpos
- info
->bitpos
)
3901 prec
-= try_bitpos
- info
->bitpos
;
3903 bitsize
-= try_bitpos
- info
->bitpos
;
3904 if (BYTES_BIG_ENDIAN
&& prec
> bitsize
)
3908 pos_in_buffer
= info
->bitpos
- try_bitpos
;
3911 /* If this is a bool inversion, invert just the least significant
3912 prec bits rather than all bits of it. */
3913 if (BYTES_BIG_ENDIAN
)
3915 pos_in_buffer
+= bitsize
- prec
;
3916 if (pos_in_buffer
>= split_store
->size
)
3921 if (pos_in_buffer
+ bitsize
> split_store
->size
)
3922 bitsize
= split_store
->size
- pos_in_buffer
;
3923 unsigned char *p
= buf
+ (pos_in_buffer
/ BITS_PER_UNIT
);
3924 if (BYTES_BIG_ENDIAN
)
3925 clear_bit_region_be (p
, (BITS_PER_UNIT
- 1
3926 - (pos_in_buffer
% BITS_PER_UNIT
)), bitsize
);
3928 clear_bit_region (p
, pos_in_buffer
% BITS_PER_UNIT
, bitsize
);
3930 for (unsigned int i
= 0; i
< buf_size
; ++i
)
3932 mask
= native_interpret_expr (int_type
, buf
, buf_size
);
3933 return BIT_XOR_EXPR
;
3936 /* Given a merged store group GROUP output the widened version of it.
3937 The store chain is against the base object BASE.
3938 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3939 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3940 Make sure that the number of statements output is less than the number of
3941 original statements. If a better sequence is possible emit it and
3945 imm_store_chain_info::output_merged_store (merged_store_group
*group
)
3947 const unsigned HOST_WIDE_INT start_byte_pos
3948 = group
->bitregion_start
/ BITS_PER_UNIT
;
3949 unsigned int orig_num_stmts
= group
->stores
.length ();
3950 if (orig_num_stmts
< 2)
3953 bool allow_unaligned_store
3954 = !STRICT_ALIGNMENT
&& param_store_merging_allow_unaligned
;
3955 bool allow_unaligned_load
= allow_unaligned_store
;
3956 bool bzero_first
= false;
3957 store_immediate_info
*store
;
3958 unsigned int num_clobber_stmts
= 0;
3959 if (group
->stores
[0]->rhs_code
== INTEGER_CST
)
3962 FOR_EACH_VEC_ELT (group
->stores
, i
, store
)
3963 if (gimple_clobber_p (store
->stmt
))
3964 num_clobber_stmts
++;
3965 else if (TREE_CODE (gimple_assign_rhs1 (store
->stmt
)) == CONSTRUCTOR
3966 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (store
->stmt
)) == 0
3967 && group
->start
== store
->bitpos
3968 && group
->width
== store
->bitsize
3969 && (group
->start
% BITS_PER_UNIT
) == 0
3970 && (group
->width
% BITS_PER_UNIT
) == 0)
3977 FOR_EACH_VEC_ELT_FROM (group
->stores
, i
, store
, i
)
3978 if (gimple_clobber_p (store
->stmt
))
3979 num_clobber_stmts
++;
3980 if (num_clobber_stmts
== orig_num_stmts
)
3982 orig_num_stmts
-= num_clobber_stmts
;
3984 if (allow_unaligned_store
|| bzero_first
)
3986 /* If unaligned stores are allowed, see how many stores we'd emit
3987 for unaligned and how many stores we'd emit for aligned stores.
3988 Only use unaligned stores if it allows fewer stores than aligned.
3989 Similarly, if there is a whole region clear first, prefer expanding
3990 it together compared to expanding clear first followed by merged
3992 unsigned cnt
[4] = { ~0U, ~0U, ~0U, ~0U };
3994 for (int pass
= 0; pass
< 4; ++pass
)
3996 if (!allow_unaligned_store
&& (pass
& 1) != 0)
3998 if (!bzero_first
&& (pass
& 2) != 0)
4000 cnt
[pass
] = split_group (group
, (pass
& 1) != 0,
4001 allow_unaligned_load
, (pass
& 2) != 0,
4003 if (cnt
[pass
] < cnt
[pass_min
])
4006 if ((pass_min
& 1) == 0)
4007 allow_unaligned_store
= false;
4008 if ((pass_min
& 2) == 0)
4009 bzero_first
= false;
4012 auto_vec
<class split_store
*, 32> split_stores
;
4013 split_store
*split_store
;
4014 unsigned total_orig
, total_new
, i
;
4015 split_group (group
, allow_unaligned_store
, allow_unaligned_load
, bzero_first
,
4016 &split_stores
, &total_orig
, &total_new
);
4018 /* Determine if there is a clobber covering the whole group at the start,
4019 followed by proposed split stores that cover the whole group. In that
4020 case, prefer the transformation even if
4021 split_stores.length () == orig_num_stmts. */
4022 bool clobber_first
= false;
4023 if (num_clobber_stmts
4024 && gimple_clobber_p (group
->stores
[0]->stmt
)
4025 && group
->start
== group
->stores
[0]->bitpos
4026 && group
->width
== group
->stores
[0]->bitsize
4027 && (group
->start
% BITS_PER_UNIT
) == 0
4028 && (group
->width
% BITS_PER_UNIT
) == 0)
4030 clobber_first
= true;
4031 unsigned HOST_WIDE_INT pos
= group
->start
/ BITS_PER_UNIT
;
4032 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
4033 if (split_store
->bytepos
!= pos
)
4035 clobber_first
= false;
4039 pos
+= split_store
->size
/ BITS_PER_UNIT
;
4040 if (pos
!= (group
->start
+ group
->width
) / BITS_PER_UNIT
)
4041 clobber_first
= false;
4044 if (split_stores
.length () >= orig_num_stmts
+ clobber_first
)
4047 /* We didn't manage to reduce the number of statements. Bail out. */
4048 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4049 fprintf (dump_file
, "Exceeded original number of stmts (%u)."
4050 " Not profitable to emit new sequence.\n",
4052 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
4056 if (total_orig
<= total_new
)
4058 /* If number of estimated new statements is above estimated original
4059 statements, bail out too. */
4060 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4061 fprintf (dump_file
, "Estimated number of original stmts (%u)"
4062 " not larger than estimated number of new"
4064 total_orig
, total_new
);
4065 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
4069 if (group
->stores
[0]->rhs_code
== INTEGER_CST
)
4071 bool all_orig
= true;
4072 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
4073 if (!split_store
->orig
)
4080 unsigned int cnt
= split_stores
.length ();
4081 store_immediate_info
*store
;
4082 FOR_EACH_VEC_ELT (group
->stores
, i
, store
)
4083 if (gimple_clobber_p (store
->stmt
))
4085 /* Punt if we wouldn't make any real changes, i.e. keep all
4086 orig stmts + all clobbers. */
4087 if (cnt
== group
->stores
.length ())
4089 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4090 fprintf (dump_file
, "Exceeded original number of stmts (%u)."
4091 " Not profitable to emit new sequence.\n",
4093 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
4100 gimple_stmt_iterator last_gsi
= gsi_for_stmt (group
->last_stmt
);
4101 gimple_seq seq
= NULL
;
4102 tree last_vdef
, new_vuse
;
4103 last_vdef
= gimple_vdef (group
->last_stmt
);
4104 new_vuse
= gimple_vuse (group
->last_stmt
);
4105 tree bswap_res
= NULL_TREE
;
4107 /* Clobbers are not removed. */
4108 if (gimple_clobber_p (group
->last_stmt
))
4110 new_vuse
= make_ssa_name (gimple_vop (cfun
), group
->last_stmt
);
4111 gimple_set_vdef (group
->last_stmt
, new_vuse
);
4114 if (group
->stores
[0]->rhs_code
== LROTATE_EXPR
4115 || group
->stores
[0]->rhs_code
== NOP_EXPR
)
4117 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
4118 gimple
*ins_stmt
= group
->stores
[0]->ins_stmt
;
4119 struct symbolic_number
*n
= &group
->stores
[0]->n
;
4120 bool bswap
= group
->stores
[0]->rhs_code
== LROTATE_EXPR
;
4125 load_type
= bswap_type
= uint16_type_node
;
4128 load_type
= uint32_type_node
;
4131 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
4132 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
4136 load_type
= uint64_type_node
;
4139 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
4140 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
4147 /* If the loads have each vuse of the corresponding store,
4148 we've checked the aliasing already in try_coalesce_bswap and
4149 we want to sink the need load into seq. So need to use new_vuse
4153 if (n
->vuse
== NULL
)
4159 /* Update vuse in case it has changed by output_merged_stores. */
4160 n
->vuse
= gimple_vuse (ins_stmt
);
4162 bswap_res
= bswap_replace (gsi_start (seq
), ins_stmt
, fndecl
,
4163 bswap_type
, load_type
, n
, bswap
);
4164 gcc_assert (bswap_res
);
4167 gimple
*stmt
= NULL
;
4168 auto_vec
<gimple
*, 32> orig_stmts
;
4169 gimple_seq this_seq
;
4170 tree addr
= force_gimple_operand_1 (unshare_expr (base_addr
), &this_seq
,
4171 is_gimple_mem_ref_addr
, NULL_TREE
);
4172 gimple_seq_add_seq_without_update (&seq
, this_seq
);
4174 tree load_addr
[2] = { NULL_TREE
, NULL_TREE
};
4175 gimple_seq load_seq
[2] = { NULL
, NULL
};
4176 gimple_stmt_iterator load_gsi
[2] = { gsi_none (), gsi_none () };
4177 for (int j
= 0; j
< 2; ++j
)
4179 store_operand_info
&op
= group
->stores
[0]->ops
[j
];
4180 if (op
.base_addr
== NULL_TREE
)
4183 store_immediate_info
*infol
= group
->stores
.last ();
4184 if (gimple_vuse (op
.stmt
) == gimple_vuse (infol
->ops
[j
].stmt
))
4186 /* We can't pick the location randomly; while we've verified
4187 all the loads have the same vuse, they can be still in different
4188 basic blocks and we need to pick the one from the last bb:
4194 otherwise if we put the wider load at the q[0] load, we might
4195 segfault if q[1] is not mapped. */
4196 basic_block bb
= gimple_bb (op
.stmt
);
4197 gimple
*ostmt
= op
.stmt
;
4198 store_immediate_info
*info
;
4199 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
4201 gimple
*tstmt
= info
->ops
[j
].stmt
;
4202 basic_block tbb
= gimple_bb (tstmt
);
4203 if (dominated_by_p (CDI_DOMINATORS
, tbb
, bb
))
4209 load_gsi
[j
] = gsi_for_stmt (ostmt
);
4211 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
4212 &load_seq
[j
], is_gimple_mem_ref_addr
,
4215 else if (operand_equal_p (base_addr
, op
.base_addr
, 0))
4216 load_addr
[j
] = addr
;
4220 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
4221 &this_seq
, is_gimple_mem_ref_addr
,
4223 gimple_seq_add_seq_without_update (&seq
, this_seq
);
4227 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
4229 const unsigned HOST_WIDE_INT try_size
= split_store
->size
;
4230 const unsigned HOST_WIDE_INT try_pos
= split_store
->bytepos
;
4231 const unsigned HOST_WIDE_INT try_bitpos
= try_pos
* BITS_PER_UNIT
;
4232 const unsigned HOST_WIDE_INT try_align
= split_store
->align
;
4233 const unsigned HOST_WIDE_INT try_offset
= try_pos
- start_byte_pos
;
4237 if (split_store
->orig
)
4239 /* If there is just a single non-clobber constituent store
4240 which covers the whole area, just reuse the lhs and rhs. */
4241 gimple
*orig_stmt
= NULL
;
4242 store_immediate_info
*store
;
4244 FOR_EACH_VEC_ELT (split_store
->orig_stores
, j
, store
)
4245 if (!gimple_clobber_p (store
->stmt
))
4247 orig_stmt
= store
->stmt
;
4250 dest
= gimple_assign_lhs (orig_stmt
);
4251 src
= gimple_assign_rhs1 (orig_stmt
);
4252 loc
= gimple_location (orig_stmt
);
4256 store_immediate_info
*info
;
4257 unsigned short clique
, base
;
4259 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
4260 orig_stmts
.safe_push (info
->stmt
);
4262 = get_alias_type_for_stmts (orig_stmts
, false, &clique
, &base
);
4264 loc
= get_location_for_stmts (orig_stmts
);
4265 orig_stmts
.truncate (0);
4267 if (group
->string_concatenation
)
4269 = build_array_type_nelts (char_type_node
,
4270 try_size
/ BITS_PER_UNIT
);
4273 dest_type
= build_nonstandard_integer_type (try_size
, UNSIGNED
);
4274 dest_type
= build_aligned_type (dest_type
, try_align
);
4276 dest
= fold_build2 (MEM_REF
, dest_type
, addr
,
4277 build_int_cst (offset_type
, try_pos
));
4278 if (TREE_CODE (dest
) == MEM_REF
)
4280 MR_DEPENDENCE_CLIQUE (dest
) = clique
;
4281 MR_DEPENDENCE_BASE (dest
) = base
;
4285 if (bswap_res
|| group
->string_concatenation
)
4286 mask
= integer_zero_node
;
4288 mask
= native_interpret_expr (dest_type
,
4289 group
->mask
+ try_offset
,
4294 j
< 1 + (split_store
->orig_stores
[0]->ops
[1].val
!= NULL_TREE
);
4297 store_operand_info
&op
= split_store
->orig_stores
[0]->ops
[j
];
4300 else if (group
->string_concatenation
)
4302 ops
[j
] = build_string (try_size
/ BITS_PER_UNIT
,
4303 (const char *) group
->val
+ try_offset
);
4304 TREE_TYPE (ops
[j
]) = dest_type
;
4306 else if (op
.base_addr
)
4308 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
4309 orig_stmts
.safe_push (info
->ops
[j
].stmt
);
4311 offset_type
= get_alias_type_for_stmts (orig_stmts
, true,
4313 location_t load_loc
= get_location_for_stmts (orig_stmts
);
4314 orig_stmts
.truncate (0);
4316 unsigned HOST_WIDE_INT load_align
= group
->load_align
[j
];
4317 unsigned HOST_WIDE_INT align_bitpos
4318 = known_alignment (try_bitpos
4319 - split_store
->orig_stores
[0]->bitpos
4321 if (align_bitpos
& (load_align
- 1))
4322 load_align
= least_bit_hwi (align_bitpos
);
4325 = build_nonstandard_integer_type (try_size
, UNSIGNED
);
4327 = build_aligned_type (load_int_type
, load_align
);
4329 poly_uint64 load_pos
4330 = exact_div (try_bitpos
4331 - split_store
->orig_stores
[0]->bitpos
4334 ops
[j
] = fold_build2 (MEM_REF
, load_int_type
, load_addr
[j
],
4335 build_int_cst (offset_type
, load_pos
));
4336 if (TREE_CODE (ops
[j
]) == MEM_REF
)
4338 MR_DEPENDENCE_CLIQUE (ops
[j
]) = clique
;
4339 MR_DEPENDENCE_BASE (ops
[j
]) = base
;
4341 if (!integer_zerop (mask
))
4342 /* The load might load some bits (that will be masked off
4343 later on) uninitialized, avoid -W*uninitialized
4344 warnings in that case. */
4345 TREE_NO_WARNING (ops
[j
]) = 1;
4347 stmt
= gimple_build_assign (make_ssa_name (dest_type
), ops
[j
]);
4348 gimple_set_location (stmt
, load_loc
);
4349 if (gsi_bb (load_gsi
[j
]))
4351 gimple_set_vuse (stmt
, gimple_vuse (op
.stmt
));
4352 gimple_seq_add_stmt_without_update (&load_seq
[j
], stmt
);
4356 gimple_set_vuse (stmt
, new_vuse
);
4357 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4359 ops
[j
] = gimple_assign_lhs (stmt
);
4361 enum tree_code inv_op
4362 = invert_op (split_store
, j
, dest_type
, xor_mask
);
4363 if (inv_op
!= NOP_EXPR
)
4365 stmt
= gimple_build_assign (make_ssa_name (dest_type
),
4366 inv_op
, ops
[j
], xor_mask
);
4367 gimple_set_location (stmt
, load_loc
);
4368 ops
[j
] = gimple_assign_lhs (stmt
);
4370 if (gsi_bb (load_gsi
[j
]))
4371 gimple_seq_add_stmt_without_update (&load_seq
[j
],
4374 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4378 ops
[j
] = native_interpret_expr (dest_type
,
4379 group
->val
+ try_offset
,
4383 switch (split_store
->orig_stores
[0]->rhs_code
)
4388 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
4390 tree rhs1
= gimple_assign_rhs1 (info
->stmt
);
4391 orig_stmts
.safe_push (SSA_NAME_DEF_STMT (rhs1
));
4394 bit_loc
= get_location_for_stmts (orig_stmts
);
4395 orig_stmts
.truncate (0);
4398 = gimple_build_assign (make_ssa_name (dest_type
),
4399 split_store
->orig_stores
[0]->rhs_code
,
4401 gimple_set_location (stmt
, bit_loc
);
4402 /* If there is just one load and there is a separate
4403 load_seq[0], emit the bitwise op right after it. */
4404 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
4405 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
4406 /* Otherwise, if at least one load is in seq, we need to
4407 emit the bitwise op right before the store. If there
4408 are two loads and are emitted somewhere else, it would
4409 be better to emit the bitwise op as early as possible;
4410 we don't track where that would be possible right now
4413 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4414 src
= gimple_assign_lhs (stmt
);
4416 enum tree_code inv_op
;
4417 inv_op
= invert_op (split_store
, 2, dest_type
, xor_mask
);
4418 if (inv_op
!= NOP_EXPR
)
4420 stmt
= gimple_build_assign (make_ssa_name (dest_type
),
4421 inv_op
, src
, xor_mask
);
4422 gimple_set_location (stmt
, bit_loc
);
4423 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
4424 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
4426 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4427 src
= gimple_assign_lhs (stmt
);
4433 if (!is_gimple_val (src
))
4435 stmt
= gimple_build_assign (make_ssa_name (TREE_TYPE (src
)),
4437 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4438 src
= gimple_assign_lhs (stmt
);
4440 if (!useless_type_conversion_p (dest_type
, TREE_TYPE (src
)))
4442 stmt
= gimple_build_assign (make_ssa_name (dest_type
),
4444 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4445 src
= gimple_assign_lhs (stmt
);
4447 inv_op
= invert_op (split_store
, 2, dest_type
, xor_mask
);
4448 if (inv_op
!= NOP_EXPR
)
4450 stmt
= gimple_build_assign (make_ssa_name (dest_type
),
4451 inv_op
, src
, xor_mask
);
4452 gimple_set_location (stmt
, loc
);
4453 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4454 src
= gimple_assign_lhs (stmt
);
4462 /* If bit insertion is required, we use the source as an accumulator
4463 into which the successive bit-field values are manually inserted.
4464 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4465 if (group
->bit_insertion
)
4466 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
4467 if (info
->rhs_code
== BIT_INSERT_EXPR
4468 && info
->bitpos
< try_bitpos
+ try_size
4469 && info
->bitpos
+ info
->bitsize
> try_bitpos
)
4471 /* Mask, truncate, convert to final type, shift and ior into
4472 the accumulator. Note that every step can be a no-op. */
4473 const HOST_WIDE_INT start_gap
= info
->bitpos
- try_bitpos
;
4474 const HOST_WIDE_INT end_gap
4475 = (try_bitpos
+ try_size
) - (info
->bitpos
+ info
->bitsize
);
4476 tree tem
= info
->ops
[0].val
;
4477 if (!INTEGRAL_TYPE_P (TREE_TYPE (tem
)))
4479 const unsigned HOST_WIDE_INT size
4480 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tem
)));
4482 = build_nonstandard_integer_type (size
, UNSIGNED
);
4483 tem
= gimple_build (&seq
, loc
, VIEW_CONVERT_EXPR
,
4486 if (TYPE_PRECISION (TREE_TYPE (tem
)) <= info
->bitsize
)
4489 = build_nonstandard_integer_type (info
->bitsize
,
4491 tem
= gimple_convert (&seq
, loc
, bitfield_type
, tem
);
4493 else if ((BYTES_BIG_ENDIAN
? start_gap
: end_gap
) > 0)
4495 const unsigned HOST_WIDE_INT imask
4496 = (HOST_WIDE_INT_1U
<< info
->bitsize
) - 1;
4497 tem
= gimple_build (&seq
, loc
,
4498 BIT_AND_EXPR
, TREE_TYPE (tem
), tem
,
4499 build_int_cst (TREE_TYPE (tem
),
4502 const HOST_WIDE_INT shift
4503 = (BYTES_BIG_ENDIAN
? end_gap
: start_gap
);
4505 tem
= gimple_build (&seq
, loc
,
4506 RSHIFT_EXPR
, TREE_TYPE (tem
), tem
,
4507 build_int_cst (NULL_TREE
, -shift
));
4508 tem
= gimple_convert (&seq
, loc
, dest_type
, tem
);
4510 tem
= gimple_build (&seq
, loc
,
4511 LSHIFT_EXPR
, dest_type
, tem
,
4512 build_int_cst (NULL_TREE
, shift
));
4513 src
= gimple_build (&seq
, loc
,
4514 BIT_IOR_EXPR
, dest_type
, tem
, src
);
4517 if (!integer_zerop (mask
))
4519 tree tem
= make_ssa_name (dest_type
);
4520 tree load_src
= unshare_expr (dest
);
4521 /* The load might load some or all bits uninitialized,
4522 avoid -W*uninitialized warnings in that case.
4523 As optimization, it would be nice if all the bits are
4524 provably uninitialized (no stores at all yet or previous
4525 store a CLOBBER) we'd optimize away the load and replace
4527 TREE_NO_WARNING (load_src
) = 1;
4528 stmt
= gimple_build_assign (tem
, load_src
);
4529 gimple_set_location (stmt
, loc
);
4530 gimple_set_vuse (stmt
, new_vuse
);
4531 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4533 /* FIXME: If there is a single chunk of zero bits in mask,
4534 perhaps use BIT_INSERT_EXPR instead? */
4535 stmt
= gimple_build_assign (make_ssa_name (dest_type
),
4536 BIT_AND_EXPR
, tem
, mask
);
4537 gimple_set_location (stmt
, loc
);
4538 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4539 tem
= gimple_assign_lhs (stmt
);
4541 if (TREE_CODE (src
) == INTEGER_CST
)
4542 src
= wide_int_to_tree (dest_type
,
4543 wi::bit_and_not (wi::to_wide (src
),
4544 wi::to_wide (mask
)));
4548 = wide_int_to_tree (dest_type
,
4549 wi::bit_not (wi::to_wide (mask
)));
4550 stmt
= gimple_build_assign (make_ssa_name (dest_type
),
4551 BIT_AND_EXPR
, src
, nmask
);
4552 gimple_set_location (stmt
, loc
);
4553 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4554 src
= gimple_assign_lhs (stmt
);
4556 stmt
= gimple_build_assign (make_ssa_name (dest_type
),
4557 BIT_IOR_EXPR
, tem
, src
);
4558 gimple_set_location (stmt
, loc
);
4559 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4560 src
= gimple_assign_lhs (stmt
);
4564 stmt
= gimple_build_assign (dest
, src
);
4565 gimple_set_location (stmt
, loc
);
4566 gimple_set_vuse (stmt
, new_vuse
);
4567 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4569 if (group
->lp_nr
&& stmt_could_throw_p (cfun
, stmt
))
4570 add_stmt_to_eh_lp (stmt
, group
->lp_nr
);
4573 if (i
< split_stores
.length () - 1)
4574 new_vdef
= make_ssa_name (gimple_vop (cfun
), stmt
);
4576 new_vdef
= last_vdef
;
4578 gimple_set_vdef (stmt
, new_vdef
);
4579 SSA_NAME_DEF_STMT (new_vdef
) = stmt
;
4580 new_vuse
= new_vdef
;
4583 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
4590 "New sequence of %u stores to replace old one of %u stores\n",
4591 split_stores
.length (), orig_num_stmts
);
4592 if (dump_flags
& TDF_DETAILS
)
4593 print_gimple_seq (dump_file
, seq
, 0, TDF_VOPS
| TDF_MEMSYMS
);
4596 if (gimple_clobber_p (group
->last_stmt
))
4597 update_stmt (group
->last_stmt
);
4599 if (group
->lp_nr
> 0)
4601 /* We're going to insert a sequence of (potentially) throwing stores
4602 into an active EH region. This means that we're going to create
4603 new basic blocks with EH edges pointing to the post landing pad
4604 and, therefore, to have to update its PHI nodes, if any. For the
4605 virtual PHI node, we're going to use the VDEFs created above, but
4606 for the other nodes, we need to record the original reaching defs. */
4607 eh_landing_pad lp
= get_eh_landing_pad_from_number (group
->lp_nr
);
4608 basic_block lp_bb
= label_to_block (cfun
, lp
->post_landing_pad
);
4609 basic_block last_bb
= gimple_bb (group
->last_stmt
);
4610 edge last_edge
= find_edge (last_bb
, lp_bb
);
4611 auto_vec
<tree
, 16> last_defs
;
4613 for (gpi
= gsi_start_phis (lp_bb
); !gsi_end_p (gpi
); gsi_next (&gpi
))
4615 gphi
*phi
= gpi
.phi ();
4617 if (virtual_operand_p (gimple_phi_result (phi
)))
4618 last_def
= NULL_TREE
;
4620 last_def
= gimple_phi_arg_def (phi
, last_edge
->dest_idx
);
4621 last_defs
.safe_push (last_def
);
4624 /* Do the insertion. Then, if new basic blocks have been created in the
4625 process, rewind the chain of VDEFs create above to walk the new basic
4626 blocks and update the corresponding arguments of the PHI nodes. */
4627 update_modified_stmts (seq
);
4628 if (gimple_find_sub_bbs (seq
, &last_gsi
))
4629 while (last_vdef
!= gimple_vuse (group
->last_stmt
))
4631 gimple
*stmt
= SSA_NAME_DEF_STMT (last_vdef
);
4632 if (stmt_could_throw_p (cfun
, stmt
))
4634 edge new_edge
= find_edge (gimple_bb (stmt
), lp_bb
);
4636 for (gpi
= gsi_start_phis (lp_bb
), i
= 0;
4638 gsi_next (&gpi
), i
++)
4640 gphi
*phi
= gpi
.phi ();
4642 if (virtual_operand_p (gimple_phi_result (phi
)))
4643 new_def
= last_vdef
;
4645 new_def
= last_defs
[i
];
4646 add_phi_arg (phi
, new_def
, new_edge
, UNKNOWN_LOCATION
);
4649 last_vdef
= gimple_vuse (stmt
);
4653 gsi_insert_seq_after (&last_gsi
, seq
, GSI_SAME_STMT
);
4655 for (int j
= 0; j
< 2; ++j
)
4657 gsi_insert_seq_after (&load_gsi
[j
], load_seq
[j
], GSI_SAME_STMT
);
4662 /* Process the merged_store_group objects created in the coalescing phase.
4663 The stores are all against the base object BASE.
4664 Try to output the widened stores and delete the original statements if
4665 successful. Return true iff any changes were made. */
4668 imm_store_chain_info::output_merged_stores ()
4671 merged_store_group
*merged_store
;
4673 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_store
)
4675 if (dbg_cnt (store_merging
)
4676 && output_merged_store (merged_store
))
4679 store_immediate_info
*store
;
4680 FOR_EACH_VEC_ELT (merged_store
->stores
, j
, store
)
4682 gimple
*stmt
= store
->stmt
;
4683 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
4684 /* Don't remove clobbers, they are still useful even if
4685 everything is overwritten afterwards. */
4686 if (gimple_clobber_p (stmt
))
4688 gsi_remove (&gsi
, true);
4690 remove_stmt_from_eh_lp (stmt
);
4691 if (stmt
!= merged_store
->last_stmt
)
4693 unlink_stmt_vdef (stmt
);
4694 release_defs (stmt
);
4700 if (ret
&& dump_file
)
4701 fprintf (dump_file
, "Merging successful!\n");
4706 /* Coalesce the store_immediate_info objects recorded against the base object
4707 BASE in the first phase and output them.
4708 Delete the allocated structures.
4709 Return true if any changes were made. */
4712 imm_store_chain_info::terminate_and_process_chain ()
4714 /* Process store chain. */
4716 if (m_store_info
.length () > 1)
4718 ret
= coalesce_immediate_stores ();
4720 ret
= output_merged_stores ();
4723 /* Delete all the entries we allocated ourselves. */
4724 store_immediate_info
*info
;
4726 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
4729 merged_store_group
*merged_info
;
4730 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_info
)
4736 /* Return true iff LHS is a destination potentially interesting for
4737 store merging. In practice these are the codes that get_inner_reference
4741 lhs_valid_for_store_merging_p (tree lhs
)
4746 switch (TREE_CODE (lhs
))
4749 case ARRAY_RANGE_REF
:
4753 case VIEW_CONVERT_EXPR
:
4762 /* Return true if the tree RHS is a constant we want to consider
4763 during store merging. In practice accept all codes that
4764 native_encode_expr accepts. */
4767 rhs_valid_for_store_merging_p (tree rhs
)
4769 unsigned HOST_WIDE_INT size
;
4770 if (TREE_CODE (rhs
) == CONSTRUCTOR
4771 && CONSTRUCTOR_NELTS (rhs
) == 0
4772 && TYPE_SIZE_UNIT (TREE_TYPE (rhs
))
4773 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs
))))
4775 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs
))).is_constant (&size
)
4776 && native_encode_expr (rhs
, NULL
, size
) != 0);
4779 /* Adjust *PBITPOS, *PBITREGION_START and *PBITREGION_END by BYTE_OFF bytes
4780 and return true on success or false on failure. */
4783 adjust_bit_pos (poly_offset_int byte_off
,
4784 poly_int64
*pbitpos
,
4785 poly_uint64
*pbitregion_start
,
4786 poly_uint64
*pbitregion_end
)
4788 poly_offset_int bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4789 bit_off
+= *pbitpos
;
4791 if (known_ge (bit_off
, 0) && bit_off
.to_shwi (pbitpos
))
4793 if (maybe_ne (*pbitregion_end
, 0U))
4795 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4796 bit_off
+= *pbitregion_start
;
4797 if (bit_off
.to_uhwi (pbitregion_start
))
4799 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4800 bit_off
+= *pbitregion_end
;
4801 if (!bit_off
.to_uhwi (pbitregion_end
))
4802 *pbitregion_end
= 0;
4805 *pbitregion_end
= 0;
4813 /* If MEM is a memory reference usable for store merging (either as
4814 store destination or for loads), return the non-NULL base_addr
4815 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4816 Otherwise return NULL, *PBITPOS should be still valid even for that
4820 mem_valid_for_store_merging (tree mem
, poly_uint64
*pbitsize
,
4821 poly_uint64
*pbitpos
,
4822 poly_uint64
*pbitregion_start
,
4823 poly_uint64
*pbitregion_end
)
4825 poly_int64 bitsize
, bitpos
;
4826 poly_uint64 bitregion_start
= 0, bitregion_end
= 0;
4828 int unsignedp
= 0, reversep
= 0, volatilep
= 0;
4830 tree base_addr
= get_inner_reference (mem
, &bitsize
, &bitpos
, &offset
, &mode
,
4831 &unsignedp
, &reversep
, &volatilep
);
4832 *pbitsize
= bitsize
;
4833 if (known_eq (bitsize
, 0))
4836 if (TREE_CODE (mem
) == COMPONENT_REF
4837 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem
, 1)))
4839 get_bit_range (&bitregion_start
, &bitregion_end
, mem
, &bitpos
, &offset
);
4840 if (maybe_ne (bitregion_end
, 0U))
4847 /* We do not want to rewrite TARGET_MEM_REFs. */
4848 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
4850 /* In some cases get_inner_reference may return a
4851 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4852 canonicalize the base_addr to MEM_REF [ptr] and take
4853 byteoffset into account in the bitpos. This occurs in
4854 PR 23684 and this way we can catch more chains. */
4855 else if (TREE_CODE (base_addr
) == MEM_REF
)
4857 if (!adjust_bit_pos (mem_ref_offset (base_addr
), &bitpos
,
4858 &bitregion_start
, &bitregion_end
))
4860 base_addr
= TREE_OPERAND (base_addr
, 0);
4862 /* get_inner_reference returns the base object, get at its
4866 if (maybe_lt (bitpos
, 0))
4868 base_addr
= build_fold_addr_expr (base_addr
);
4873 /* If the access is variable offset then a base decl has to be
4874 address-taken to be able to emit pointer-based stores to it.
4875 ??? We might be able to get away with re-using the original
4876 base up to the first variable part and then wrapping that inside
4878 tree base
= get_base_address (base_addr
);
4879 if (!base
|| (DECL_P (base
) && !TREE_ADDRESSABLE (base
)))
4882 /* Similarly to above for the base, remove constant from the offset. */
4883 if (TREE_CODE (offset
) == PLUS_EXPR
4884 && TREE_CODE (TREE_OPERAND (offset
, 1)) == INTEGER_CST
4885 && adjust_bit_pos (wi::to_poly_offset (TREE_OPERAND (offset
, 1)),
4886 &bitpos
, &bitregion_start
, &bitregion_end
))
4887 offset
= TREE_OPERAND (offset
, 0);
4889 base_addr
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base_addr
),
4893 if (known_eq (bitregion_end
, 0U))
4895 bitregion_start
= round_down_to_byte_boundary (bitpos
);
4896 bitregion_end
= round_up_to_byte_boundary (bitpos
+ bitsize
);
4899 *pbitsize
= bitsize
;
4901 *pbitregion_start
= bitregion_start
;
4902 *pbitregion_end
= bitregion_end
;
4906 /* Return true if STMT is a load that can be used for store merging.
4907 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4908 BITREGION_END are properties of the corresponding store. */
4911 handled_load (gimple
*stmt
, store_operand_info
*op
,
4912 poly_uint64 bitsize
, poly_uint64 bitpos
,
4913 poly_uint64 bitregion_start
, poly_uint64 bitregion_end
)
4915 if (!is_gimple_assign (stmt
))
4917 if (gimple_assign_rhs_code (stmt
) == BIT_NOT_EXPR
)
4919 tree rhs1
= gimple_assign_rhs1 (stmt
);
4920 if (TREE_CODE (rhs1
) == SSA_NAME
4921 && handled_load (SSA_NAME_DEF_STMT (rhs1
), op
, bitsize
, bitpos
,
4922 bitregion_start
, bitregion_end
))
4924 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4925 been optimized earlier, but if allowed here, would confuse the
4926 multiple uses counting. */
4929 op
->bit_not_p
= !op
->bit_not_p
;
4934 if (gimple_vuse (stmt
)
4935 && gimple_assign_load_p (stmt
)
4936 && !stmt_can_throw_internal (cfun
, stmt
)
4937 && !gimple_has_volatile_ops (stmt
))
4939 tree mem
= gimple_assign_rhs1 (stmt
);
4941 = mem_valid_for_store_merging (mem
, &op
->bitsize
, &op
->bitpos
,
4942 &op
->bitregion_start
,
4943 &op
->bitregion_end
);
4944 if (op
->base_addr
!= NULL_TREE
4945 && known_eq (op
->bitsize
, bitsize
)
4946 && multiple_p (op
->bitpos
- bitpos
, BITS_PER_UNIT
)
4947 && known_ge (op
->bitpos
- op
->bitregion_start
,
4948 bitpos
- bitregion_start
)
4949 && known_ge (op
->bitregion_end
- op
->bitpos
,
4950 bitregion_end
- bitpos
))
4954 op
->bit_not_p
= false;
4961 /* Return the index number of the landing pad for STMT, if any. */
4964 lp_nr_for_store (gimple
*stmt
)
4966 if (!cfun
->can_throw_non_call_exceptions
|| !cfun
->eh
)
4969 if (!stmt_could_throw_p (cfun
, stmt
))
4972 return lookup_stmt_eh_lp (stmt
);
4975 /* Record the store STMT for store merging optimization if it can be
4976 optimized. Return true if any changes were made. */
4979 pass_store_merging::process_store (gimple
*stmt
)
4981 tree lhs
= gimple_assign_lhs (stmt
);
4982 tree rhs
= gimple_assign_rhs1 (stmt
);
4983 poly_uint64 bitsize
, bitpos
= 0;
4984 poly_uint64 bitregion_start
= 0, bitregion_end
= 0;
4986 = mem_valid_for_store_merging (lhs
, &bitsize
, &bitpos
,
4987 &bitregion_start
, &bitregion_end
);
4988 if (known_eq (bitsize
, 0U))
4991 bool invalid
= (base_addr
== NULL_TREE
4992 || (maybe_gt (bitsize
,
4993 (unsigned int) MAX_BITSIZE_MODE_ANY_INT
)
4994 && TREE_CODE (rhs
) != INTEGER_CST
4995 && (TREE_CODE (rhs
) != CONSTRUCTOR
4996 || CONSTRUCTOR_NELTS (rhs
) != 0)));
4997 enum tree_code rhs_code
= ERROR_MARK
;
4998 bool bit_not_p
= false;
4999 struct symbolic_number n
;
5000 gimple
*ins_stmt
= NULL
;
5001 store_operand_info ops
[2];
5004 else if (TREE_CODE (rhs
) == STRING_CST
)
5006 rhs_code
= STRING_CST
;
5009 else if (rhs_valid_for_store_merging_p (rhs
))
5011 rhs_code
= INTEGER_CST
;
5014 else if (TREE_CODE (rhs
) == SSA_NAME
)
5016 gimple
*def_stmt
= SSA_NAME_DEF_STMT (rhs
), *def_stmt1
, *def_stmt2
;
5017 if (!is_gimple_assign (def_stmt
))
5019 else if (handled_load (def_stmt
, &ops
[0], bitsize
, bitpos
,
5020 bitregion_start
, bitregion_end
))
5022 else if (gimple_assign_rhs_code (def_stmt
) == BIT_NOT_EXPR
)
5024 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
5025 if (TREE_CODE (rhs1
) == SSA_NAME
5026 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1
)))
5029 def_stmt
= SSA_NAME_DEF_STMT (rhs1
);
5033 if (rhs_code
== ERROR_MARK
&& !invalid
)
5034 switch ((rhs_code
= gimple_assign_rhs_code (def_stmt
)))
5040 rhs1
= gimple_assign_rhs1 (def_stmt
);
5041 rhs2
= gimple_assign_rhs2 (def_stmt
);
5043 if (TREE_CODE (rhs1
) != SSA_NAME
)
5045 def_stmt1
= SSA_NAME_DEF_STMT (rhs1
);
5046 if (!is_gimple_assign (def_stmt1
)
5047 || !handled_load (def_stmt1
, &ops
[0], bitsize
, bitpos
,
5048 bitregion_start
, bitregion_end
))
5050 if (rhs_valid_for_store_merging_p (rhs2
))
5052 else if (TREE_CODE (rhs2
) != SSA_NAME
)
5056 def_stmt2
= SSA_NAME_DEF_STMT (rhs2
);
5057 if (!is_gimple_assign (def_stmt2
))
5059 else if (!handled_load (def_stmt2
, &ops
[1], bitsize
, bitpos
,
5060 bitregion_start
, bitregion_end
))
5070 unsigned HOST_WIDE_INT const_bitsize
;
5071 if (bitsize
.is_constant (&const_bitsize
)
5072 && (const_bitsize
% BITS_PER_UNIT
) == 0
5073 && const_bitsize
<= 64
5074 && multiple_p (bitpos
, BITS_PER_UNIT
))
5076 ins_stmt
= find_bswap_or_nop_1 (def_stmt
, &n
, 12);
5080 for (unsigned HOST_WIDE_INT i
= 0;
5082 i
+= BITS_PER_UNIT
, nn
>>= BITS_PER_MARKER
)
5083 if ((nn
& MARKER_MASK
) == 0
5084 || (nn
& MARKER_MASK
) == MARKER_BYTE_UNKNOWN
)
5093 rhs_code
= LROTATE_EXPR
;
5094 ops
[0].base_addr
= NULL_TREE
;
5095 ops
[1].base_addr
= NULL_TREE
;
5103 && bitsize
.is_constant (&const_bitsize
)
5104 && ((const_bitsize
% BITS_PER_UNIT
) != 0
5105 || !multiple_p (bitpos
, BITS_PER_UNIT
))
5106 && const_bitsize
<= MAX_FIXED_MODE_SIZE
)
5108 /* Bypass a conversion to the bit-field type. */
5110 && is_gimple_assign (def_stmt
)
5111 && CONVERT_EXPR_CODE_P (rhs_code
))
5113 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
5114 if (TREE_CODE (rhs1
) == SSA_NAME
5115 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1
)))
5118 rhs_code
= BIT_INSERT_EXPR
;
5121 ops
[0].base_addr
= NULL_TREE
;
5122 ops
[1].base_addr
= NULL_TREE
;
5129 unsigned HOST_WIDE_INT const_bitsize
, const_bitpos
;
5130 unsigned HOST_WIDE_INT const_bitregion_start
, const_bitregion_end
;
5132 || !bitsize
.is_constant (&const_bitsize
)
5133 || !bitpos
.is_constant (&const_bitpos
)
5134 || !bitregion_start
.is_constant (&const_bitregion_start
)
5135 || !bitregion_end
.is_constant (&const_bitregion_end
))
5136 return terminate_all_aliasing_chains (NULL
, stmt
);
5139 memset (&n
, 0, sizeof (n
));
5141 class imm_store_chain_info
**chain_info
= NULL
;
5144 chain_info
= m_stores
.get (base_addr
);
5146 store_immediate_info
*info
;
5149 unsigned int ord
= (*chain_info
)->m_store_info
.length ();
5150 info
= new store_immediate_info (const_bitsize
, const_bitpos
,
5151 const_bitregion_start
,
5152 const_bitregion_end
,
5153 stmt
, ord
, rhs_code
, n
, ins_stmt
,
5154 bit_not_p
, lp_nr_for_store (stmt
),
5156 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5158 fprintf (dump_file
, "Recording immediate store from stmt:\n");
5159 print_gimple_stmt (dump_file
, stmt
, 0);
5161 (*chain_info
)->m_store_info
.safe_push (info
);
5162 ret
|= terminate_all_aliasing_chains (chain_info
, stmt
);
5163 /* If we reach the limit of stores to merge in a chain terminate and
5164 process the chain now. */
5165 if ((*chain_info
)->m_store_info
.length ()
5166 == (unsigned int) param_max_stores_to_merge
)
5168 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5170 "Reached maximum number of statements to merge:\n");
5171 ret
|= terminate_and_process_chain (*chain_info
);
5176 /* Store aliases any existing chain? */
5177 ret
|= terminate_all_aliasing_chains (NULL
, stmt
);
5178 /* Start a new chain. */
5179 class imm_store_chain_info
*new_chain
5180 = new imm_store_chain_info (m_stores_head
, base_addr
);
5181 info
= new store_immediate_info (const_bitsize
, const_bitpos
,
5182 const_bitregion_start
,
5183 const_bitregion_end
,
5184 stmt
, 0, rhs_code
, n
, ins_stmt
,
5185 bit_not_p
, lp_nr_for_store (stmt
),
5187 new_chain
->m_store_info
.safe_push (info
);
5188 m_stores
.put (base_addr
, new_chain
);
5189 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5191 fprintf (dump_file
, "Starting new chain with statement:\n");
5192 print_gimple_stmt (dump_file
, stmt
, 0);
5193 fprintf (dump_file
, "The base object is:\n");
5194 print_generic_expr (dump_file
, base_addr
);
5195 fprintf (dump_file
, "\n");
5200 /* Return true if STMT is a store valid for store merging. */
5203 store_valid_for_store_merging_p (gimple
*stmt
)
5205 return gimple_assign_single_p (stmt
)
5206 && gimple_vdef (stmt
)
5207 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt
))
5208 && (!gimple_has_volatile_ops (stmt
) || gimple_clobber_p (stmt
));
5211 enum basic_block_status
{ BB_INVALID
, BB_VALID
, BB_EXTENDED_VALID
};
5213 /* Return the status of basic block BB wrt store merging. */
5215 static enum basic_block_status
5216 get_status_for_store_merging (basic_block bb
)
5218 unsigned int num_statements
= 0;
5219 unsigned int num_constructors
= 0;
5220 gimple_stmt_iterator gsi
;
5223 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5225 gimple
*stmt
= gsi_stmt (gsi
);
5227 if (is_gimple_debug (stmt
))
5230 if (store_valid_for_store_merging_p (stmt
) && ++num_statements
>= 2)
5233 if (is_gimple_assign (stmt
)
5234 && gimple_assign_rhs_code (stmt
) == CONSTRUCTOR
)
5236 tree rhs
= gimple_assign_rhs1 (stmt
);
5237 if (VECTOR_TYPE_P (TREE_TYPE (rhs
))
5238 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs
)))
5239 && gimple_assign_lhs (stmt
) != NULL_TREE
)
5242 = int_size_in_bytes (TREE_TYPE (rhs
)) * BITS_PER_UNIT
;
5243 if (sz
== 16 || sz
== 32 || sz
== 64)
5245 num_constructors
= 1;
5252 if (num_statements
== 0 && num_constructors
== 0)
5255 if (cfun
->can_throw_non_call_exceptions
&& cfun
->eh
5256 && store_valid_for_store_merging_p (gimple_seq_last_stmt (bb_seq (bb
)))
5257 && (e
= find_fallthru_edge (bb
->succs
))
5258 && e
->dest
== bb
->next_bb
)
5259 return BB_EXTENDED_VALID
;
5261 return (num_statements
>= 2 || num_constructors
) ? BB_VALID
: BB_INVALID
;
5264 /* Entry point for the pass. Go over each basic block recording chains of
5265 immediate stores. Upon encountering a terminating statement (as defined
5266 by stmt_terminates_chain_p) process the recorded stores and emit the widened
5270 pass_store_merging::execute (function
*fun
)
5273 hash_set
<gimple
*> orig_stmts
;
5274 bool changed
= false, open_chains
= false;
5276 /* If the function can throw and catch non-call exceptions, we'll be trying
5277 to merge stores across different basic blocks so we need to first unsplit
5278 the EH edges in order to streamline the CFG of the function. */
5279 if (cfun
->can_throw_non_call_exceptions
&& cfun
->eh
)
5280 unsplit_eh_edges ();
5282 calculate_dominance_info (CDI_DOMINATORS
);
5284 FOR_EACH_BB_FN (bb
, fun
)
5286 const basic_block_status bb_status
= get_status_for_store_merging (bb
);
5287 gimple_stmt_iterator gsi
;
5289 if (open_chains
&& (bb_status
== BB_INVALID
|| !single_pred_p (bb
)))
5291 changed
|= terminate_and_process_all_chains ();
5292 open_chains
= false;
5295 if (bb_status
== BB_INVALID
)
5298 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5299 fprintf (dump_file
, "Processing basic block <%d>:\n", bb
->index
);
5301 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); )
5303 gimple
*stmt
= gsi_stmt (gsi
);
5306 if (is_gimple_debug (stmt
))
5309 if (gimple_has_volatile_ops (stmt
) && !gimple_clobber_p (stmt
))
5311 /* Terminate all chains. */
5312 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5313 fprintf (dump_file
, "Volatile access terminates "
5315 changed
|= terminate_and_process_all_chains ();
5316 open_chains
= false;
5320 if (is_gimple_assign (stmt
)
5321 && gimple_assign_rhs_code (stmt
) == CONSTRUCTOR
5322 && maybe_optimize_vector_constructor (stmt
))
5325 if (store_valid_for_store_merging_p (stmt
))
5326 changed
|= process_store (stmt
);
5328 changed
|= terminate_all_aliasing_chains (NULL
, stmt
);
5331 if (bb_status
== BB_EXTENDED_VALID
)
5335 changed
|= terminate_and_process_all_chains ();
5336 open_chains
= false;
5341 changed
|= terminate_and_process_all_chains ();
5343 /* If the function can throw and catch non-call exceptions and something
5344 changed during the pass, then the CFG has (very likely) changed too. */
5345 if (cfun
->can_throw_non_call_exceptions
&& cfun
->eh
&& changed
)
5347 free_dominance_info (CDI_DOMINATORS
);
5348 return TODO_cleanup_cfg
;
5356 /* Construct and return a store merging pass object. */
5359 make_pass_store_merging (gcc::context
*ctxt
)
5361 return new pass_store_merging (ctxt
);
5366 namespace selftest
{
5368 /* Selftests for store merging helpers. */
5370 /* Assert that all elements of the byte arrays X and Y, both of length N
5374 verify_array_eq (unsigned char *x
, unsigned char *y
, unsigned int n
)
5376 for (unsigned int i
= 0; i
< n
; i
++)
5380 fprintf (stderr
, "Arrays do not match. X:\n");
5381 dump_char_array (stderr
, x
, n
);
5382 fprintf (stderr
, "Y:\n");
5383 dump_char_array (stderr
, y
, n
);
5385 ASSERT_EQ (x
[i
], y
[i
]);
5389 /* Test shift_bytes_in_array_left and that it carries bits across between
5393 verify_shift_bytes_in_array_left (void)
5396 00011111 | 11100000. */
5397 unsigned char orig
[2] = { 0xe0, 0x1f };
5398 unsigned char in
[2];
5399 memcpy (in
, orig
, sizeof orig
);
5401 unsigned char expected
[2] = { 0x80, 0x7f };
5402 shift_bytes_in_array_left (in
, sizeof (in
), 2);
5403 verify_array_eq (in
, expected
, sizeof (in
));
5405 memcpy (in
, orig
, sizeof orig
);
5406 memcpy (expected
, orig
, sizeof orig
);
5407 /* Check that shifting by zero doesn't change anything. */
5408 shift_bytes_in_array_left (in
, sizeof (in
), 0);
5409 verify_array_eq (in
, expected
, sizeof (in
));
5413 /* Test shift_bytes_in_array_right and that it carries bits across between
5417 verify_shift_bytes_in_array_right (void)
5420 00011111 | 11100000. */
5421 unsigned char orig
[2] = { 0x1f, 0xe0};
5422 unsigned char in
[2];
5423 memcpy (in
, orig
, sizeof orig
);
5424 unsigned char expected
[2] = { 0x07, 0xf8};
5425 shift_bytes_in_array_right (in
, sizeof (in
), 2);
5426 verify_array_eq (in
, expected
, sizeof (in
));
5428 memcpy (in
, orig
, sizeof orig
);
5429 memcpy (expected
, orig
, sizeof orig
);
5430 /* Check that shifting by zero doesn't change anything. */
5431 shift_bytes_in_array_right (in
, sizeof (in
), 0);
5432 verify_array_eq (in
, expected
, sizeof (in
));
5435 /* Test clear_bit_region that it clears exactly the bits asked and
5439 verify_clear_bit_region (void)
5441 /* Start with all bits set and test clearing various patterns in them. */
5442 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
5443 unsigned char in
[3];
5444 unsigned char expected
[3];
5445 memcpy (in
, orig
, sizeof in
);
5447 /* Check zeroing out all the bits. */
5448 clear_bit_region (in
, 0, 3 * BITS_PER_UNIT
);
5449 expected
[0] = expected
[1] = expected
[2] = 0;
5450 verify_array_eq (in
, expected
, sizeof in
);
5452 memcpy (in
, orig
, sizeof in
);
5453 /* Leave the first and last bits intact. */
5454 clear_bit_region (in
, 1, 3 * BITS_PER_UNIT
- 2);
5458 verify_array_eq (in
, expected
, sizeof in
);
5461 /* Test clear_bit_region_be that it clears exactly the bits asked and
5465 verify_clear_bit_region_be (void)
5467 /* Start with all bits set and test clearing various patterns in them. */
5468 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
5469 unsigned char in
[3];
5470 unsigned char expected
[3];
5471 memcpy (in
, orig
, sizeof in
);
5473 /* Check zeroing out all the bits. */
5474 clear_bit_region_be (in
, BITS_PER_UNIT
- 1, 3 * BITS_PER_UNIT
);
5475 expected
[0] = expected
[1] = expected
[2] = 0;
5476 verify_array_eq (in
, expected
, sizeof in
);
5478 memcpy (in
, orig
, sizeof in
);
5479 /* Leave the first and last bits intact. */
5480 clear_bit_region_be (in
, BITS_PER_UNIT
- 2, 3 * BITS_PER_UNIT
- 2);
5484 verify_array_eq (in
, expected
, sizeof in
);
5488 /* Run all of the selftests within this file. */
5491 store_merging_c_tests (void)
5493 verify_shift_bytes_in_array_left ();
5494 verify_shift_bytes_in_array_right ();
5495 verify_clear_bit_region ();
5496 verify_clear_bit_region_be ();
5499 } // namespace selftest
5500 #endif /* CHECKING_P. */