store-merging: ICE in find_bswap_or_nop_1 PR95854.
[gcc.git] / gcc / gimple-ssa-store-merging.c
1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2009-2020 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
24
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
27 [p ] := imm1;
28 [p + 1B] := imm2;
29 [p + 2B] := imm3;
30 [p + 3B] := imm4;
31 we can transform this into a single 4-byte store if the target supports it:
32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
33
34 Or:
35 [p ] := [q ];
36 [p + 1B] := [q + 1B];
37 [p + 2B] := [q + 2B];
38 [p + 3B] := [q + 3B];
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
41
42 Or:
43 [p ] := [q ] ^ imm1;
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
49
50 Or:
51 [p:1 ] := imm;
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
55
56 The algorithm is applied to each basic block in three phases:
57
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
64 terminate such chains when appropriate (for example when the stored
65 values get used subsequently).
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
74
75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
76 store_immediate_info objects) and coalesce contiguous stores into
77 merged_store_group objects. For bit-field stores, we don't need to
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
83
84 For example, given the stores:
85 [p ] := 0;
86 [p + 1B] := 1;
87 [p + 3B] := 0;
88 [p + 4B] := 1;
89 [p + 5B] := 0;
90 [p + 6B] := 0;
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
94
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
103
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
106 [p ] := 0x1234;
107 [p + 2B] := 0x5678;
108 [p + 4B] := 0xab;
109 [p + 5B] := 0xcd;
110
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
112 p |LE|BE|
113 ---------
114 0 |34|12|
115 1 |12|34|
116 2 |78|56|
117 3 |56|78|
118 4 |ab|ab|
119 5 |cd|cd|
120
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
125
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
129
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
136
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
140
141 #include "config.h"
142 #include "system.h"
143 #include "coretypes.h"
144 #include "backend.h"
145 #include "tree.h"
146 #include "gimple.h"
147 #include "builtins.h"
148 #include "fold-const.h"
149 #include "tree-pass.h"
150 #include "ssa.h"
151 #include "gimple-pretty-print.h"
152 #include "alias.h"
153 #include "fold-const.h"
154 #include "print-tree.h"
155 #include "tree-hash-traits.h"
156 #include "gimple-iterator.h"
157 #include "gimplify.h"
158 #include "gimple-fold.h"
159 #include "stor-layout.h"
160 #include "timevar.h"
161 #include "cfganal.h"
162 #include "cfgcleanup.h"
163 #include "tree-cfg.h"
164 #include "except.h"
165 #include "tree-eh.h"
166 #include "target.h"
167 #include "gimplify-me.h"
168 #include "rtl.h"
169 #include "expr.h" /* For get_bit_range. */
170 #include "optabs-tree.h"
171 #include "dbgcnt.h"
172 #include "selftest.h"
173
174 /* The maximum size (in bits) of the stores this pass should generate. */
175 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
176 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
177
178 /* Limit to bound the number of aliasing checks for loads with the same
179 vuse as the corresponding store. */
180 #define MAX_STORE_ALIAS_CHECKS 64
181
182 namespace {
183
184 struct bswap_stat
185 {
186 /* Number of hand-written 16-bit nop / bswaps found. */
187 int found_16bit;
188
189 /* Number of hand-written 32-bit nop / bswaps found. */
190 int found_32bit;
191
192 /* Number of hand-written 64-bit nop / bswaps found. */
193 int found_64bit;
194 } nop_stats, bswap_stats;
195
196 /* A symbolic number structure is used to detect byte permutation and selection
197 patterns of a source. To achieve that, its field N contains an artificial
198 number consisting of BITS_PER_MARKER sized markers tracking where does each
199 byte come from in the source:
200
201 0 - target byte has the value 0
202 FF - target byte has an unknown value (eg. due to sign extension)
203 1..size - marker value is the byte index in the source (0 for lsb).
204
205 To detect permutations on memory sources (arrays and structures), a symbolic
206 number is also associated:
207 - a base address BASE_ADDR and an OFFSET giving the address of the source;
208 - a range which gives the difference between the highest and lowest accessed
209 memory location to make such a symbolic number;
210 - the address SRC of the source element of lowest address as a convenience
211 to easily get BASE_ADDR + offset + lowest bytepos;
212 - number of expressions N_OPS bitwise ored together to represent
213 approximate cost of the computation.
214
215 Note 1: the range is different from size as size reflects the size of the
216 type of the current expression. For instance, for an array char a[],
217 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
218 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
219 time a range of 1.
220
221 Note 2: for non-memory sources, range holds the same value as size.
222
223 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
224
225 struct symbolic_number {
226 uint64_t n;
227 tree type;
228 tree base_addr;
229 tree offset;
230 poly_int64_pod bytepos;
231 tree src;
232 tree alias_set;
233 tree vuse;
234 unsigned HOST_WIDE_INT range;
235 int n_ops;
236 };
237
238 #define BITS_PER_MARKER 8
239 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
240 #define MARKER_BYTE_UNKNOWN MARKER_MASK
241 #define HEAD_MARKER(n, size) \
242 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
243
244 /* The number which the find_bswap_or_nop_1 result should match in
245 order to have a nop. The number is masked according to the size of
246 the symbolic number before using it. */
247 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
248 (uint64_t)0x08070605 << 32 | 0x04030201)
249
250 /* The number which the find_bswap_or_nop_1 result should match in
251 order to have a byte swap. The number is masked according to the
252 size of the symbolic number before using it. */
253 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
254 (uint64_t)0x01020304 << 32 | 0x05060708)
255
256 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
257 number N. Return false if the requested operation is not permitted
258 on a symbolic number. */
259
260 inline bool
261 do_shift_rotate (enum tree_code code,
262 struct symbolic_number *n,
263 int count)
264 {
265 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
266 unsigned head_marker;
267
268 if (count < 0
269 || count >= TYPE_PRECISION (n->type)
270 || count % BITS_PER_UNIT != 0)
271 return false;
272 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
273
274 /* Zero out the extra bits of N in order to avoid them being shifted
275 into the significant bits. */
276 if (size < 64 / BITS_PER_MARKER)
277 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
278
279 switch (code)
280 {
281 case LSHIFT_EXPR:
282 n->n <<= count;
283 break;
284 case RSHIFT_EXPR:
285 head_marker = HEAD_MARKER (n->n, size);
286 n->n >>= count;
287 /* Arithmetic shift of signed type: result is dependent on the value. */
288 if (!TYPE_UNSIGNED (n->type) && head_marker)
289 for (i = 0; i < count / BITS_PER_MARKER; i++)
290 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
291 << ((size - 1 - i) * BITS_PER_MARKER);
292 break;
293 case LROTATE_EXPR:
294 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
295 break;
296 case RROTATE_EXPR:
297 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
298 break;
299 default:
300 return false;
301 }
302 /* Zero unused bits for size. */
303 if (size < 64 / BITS_PER_MARKER)
304 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
305 return true;
306 }
307
308 /* Perform sanity checking for the symbolic number N and the gimple
309 statement STMT. */
310
311 inline bool
312 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
313 {
314 tree lhs_type;
315
316 lhs_type = gimple_expr_type (stmt);
317
318 if (TREE_CODE (lhs_type) != INTEGER_TYPE
319 && TREE_CODE (lhs_type) != ENUMERAL_TYPE)
320 return false;
321
322 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
323 return false;
324
325 return true;
326 }
327
328 /* Initialize the symbolic number N for the bswap pass from the base element
329 SRC manipulated by the bitwise OR expression. */
330
331 bool
332 init_symbolic_number (struct symbolic_number *n, tree src)
333 {
334 int size;
335
336 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
337 return false;
338
339 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
340 n->src = src;
341
342 /* Set up the symbolic number N by setting each byte to a value between 1 and
343 the byte size of rhs1. The highest order byte is set to n->size and the
344 lowest order byte to 1. */
345 n->type = TREE_TYPE (src);
346 size = TYPE_PRECISION (n->type);
347 if (size % BITS_PER_UNIT != 0)
348 return false;
349 size /= BITS_PER_UNIT;
350 if (size > 64 / BITS_PER_MARKER)
351 return false;
352 n->range = size;
353 n->n = CMPNOP;
354 n->n_ops = 1;
355
356 if (size < 64 / BITS_PER_MARKER)
357 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
358
359 return true;
360 }
361
362 /* Check if STMT might be a byte swap or a nop from a memory source and returns
363 the answer. If so, REF is that memory source and the base of the memory area
364 accessed and the offset of the access from that base are recorded in N. */
365
366 bool
367 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
368 {
369 /* Leaf node is an array or component ref. Memorize its base and
370 offset from base to compare to other such leaf node. */
371 poly_int64 bitsize, bitpos, bytepos;
372 machine_mode mode;
373 int unsignedp, reversep, volatilep;
374 tree offset, base_addr;
375
376 /* Not prepared to handle PDP endian. */
377 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
378 return false;
379
380 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
381 return false;
382
383 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
384 &unsignedp, &reversep, &volatilep);
385
386 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
387 /* Do not rewrite TARGET_MEM_REF. */
388 return false;
389 else if (TREE_CODE (base_addr) == MEM_REF)
390 {
391 poly_offset_int bit_offset = 0;
392 tree off = TREE_OPERAND (base_addr, 1);
393
394 if (!integer_zerop (off))
395 {
396 poly_offset_int boff = mem_ref_offset (base_addr);
397 boff <<= LOG2_BITS_PER_UNIT;
398 bit_offset += boff;
399 }
400
401 base_addr = TREE_OPERAND (base_addr, 0);
402
403 /* Avoid returning a negative bitpos as this may wreak havoc later. */
404 if (maybe_lt (bit_offset, 0))
405 {
406 tree byte_offset = wide_int_to_tree
407 (sizetype, bits_to_bytes_round_down (bit_offset));
408 bit_offset = num_trailing_bits (bit_offset);
409 if (offset)
410 offset = size_binop (PLUS_EXPR, offset, byte_offset);
411 else
412 offset = byte_offset;
413 }
414
415 bitpos += bit_offset.force_shwi ();
416 }
417 else
418 base_addr = build_fold_addr_expr (base_addr);
419
420 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
421 return false;
422 if (!multiple_p (bitsize, BITS_PER_UNIT))
423 return false;
424 if (reversep)
425 return false;
426
427 if (!init_symbolic_number (n, ref))
428 return false;
429 n->base_addr = base_addr;
430 n->offset = offset;
431 n->bytepos = bytepos;
432 n->alias_set = reference_alias_ptr_type (ref);
433 n->vuse = gimple_vuse (stmt);
434 return true;
435 }
436
437 /* Compute the symbolic number N representing the result of a bitwise OR on 2
438 symbolic number N1 and N2 whose source statements are respectively
439 SOURCE_STMT1 and SOURCE_STMT2. */
440
441 gimple *
442 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
443 gimple *source_stmt2, struct symbolic_number *n2,
444 struct symbolic_number *n)
445 {
446 int i, size;
447 uint64_t mask;
448 gimple *source_stmt;
449 struct symbolic_number *n_start;
450
451 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
452 if (TREE_CODE (rhs1) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
454 rhs1 = TREE_OPERAND (rhs1, 0);
455 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
456 if (TREE_CODE (rhs2) == BIT_FIELD_REF
457 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
458 rhs2 = TREE_OPERAND (rhs2, 0);
459
460 /* Sources are different, cancel bswap if they are not memory location with
461 the same base (array, structure, ...). */
462 if (rhs1 != rhs2)
463 {
464 uint64_t inc;
465 HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
466 struct symbolic_number *toinc_n_ptr, *n_end;
467 basic_block bb1, bb2;
468
469 if (!n1->base_addr || !n2->base_addr
470 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
471 return NULL;
472
473 if (!n1->offset != !n2->offset
474 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
475 return NULL;
476
477 start1 = 0;
478 if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
479 return NULL;
480
481 if (start1 < start2)
482 {
483 n_start = n1;
484 start_sub = start2 - start1;
485 }
486 else
487 {
488 n_start = n2;
489 start_sub = start1 - start2;
490 }
491
492 bb1 = gimple_bb (source_stmt1);
493 bb2 = gimple_bb (source_stmt2);
494 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
495 source_stmt = source_stmt1;
496 else
497 source_stmt = source_stmt2;
498
499 /* Find the highest address at which a load is performed and
500 compute related info. */
501 end1 = start1 + (n1->range - 1);
502 end2 = start2 + (n2->range - 1);
503 if (end1 < end2)
504 {
505 end = end2;
506 end_sub = end2 - end1;
507 }
508 else
509 {
510 end = end1;
511 end_sub = end1 - end2;
512 }
513 n_end = (end2 > end1) ? n2 : n1;
514
515 /* Find symbolic number whose lsb is the most significant. */
516 if (BYTES_BIG_ENDIAN)
517 toinc_n_ptr = (n_end == n1) ? n2 : n1;
518 else
519 toinc_n_ptr = (n_start == n1) ? n2 : n1;
520
521 n->range = end - MIN (start1, start2) + 1;
522
523 /* Check that the range of memory covered can be represented by
524 a symbolic number. */
525 if (n->range > 64 / BITS_PER_MARKER)
526 return NULL;
527
528 /* Reinterpret byte marks in symbolic number holding the value of
529 bigger weight according to target endianness. */
530 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
531 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
532 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
533 {
534 unsigned marker
535 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
536 if (marker && marker != MARKER_BYTE_UNKNOWN)
537 toinc_n_ptr->n += inc;
538 }
539 }
540 else
541 {
542 n->range = n1->range;
543 n_start = n1;
544 source_stmt = source_stmt1;
545 }
546
547 if (!n1->alias_set
548 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
549 n->alias_set = n1->alias_set;
550 else
551 n->alias_set = ptr_type_node;
552 n->vuse = n_start->vuse;
553 n->base_addr = n_start->base_addr;
554 n->offset = n_start->offset;
555 n->src = n_start->src;
556 n->bytepos = n_start->bytepos;
557 n->type = n_start->type;
558 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
559
560 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
561 {
562 uint64_t masked1, masked2;
563
564 masked1 = n1->n & mask;
565 masked2 = n2->n & mask;
566 if (masked1 && masked2 && masked1 != masked2)
567 return NULL;
568 }
569 n->n = n1->n | n2->n;
570 n->n_ops = n1->n_ops + n2->n_ops;
571
572 return source_stmt;
573 }
574
575 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
576 the operation given by the rhs of STMT on the result. If the operation
577 could successfully be executed the function returns a gimple stmt whose
578 rhs's first tree is the expression of the source operand and NULL
579 otherwise. */
580
581 gimple *
582 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
583 {
584 enum tree_code code;
585 tree rhs1, rhs2 = NULL;
586 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
587 enum gimple_rhs_class rhs_class;
588
589 if (!limit || !is_gimple_assign (stmt))
590 return NULL;
591
592 rhs1 = gimple_assign_rhs1 (stmt);
593
594 if (find_bswap_or_nop_load (stmt, rhs1, n))
595 return stmt;
596
597 /* Handle BIT_FIELD_REF. */
598 if (TREE_CODE (rhs1) == BIT_FIELD_REF
599 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
600 {
601 if (!tree_fits_uhwi_p (TREE_OPERAND (rhs1, 1))
602 || !tree_fits_uhwi_p (TREE_OPERAND (rhs1, 2)))
603 return NULL;
604
605 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
606 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
607 if (bitpos % BITS_PER_UNIT == 0
608 && bitsize % BITS_PER_UNIT == 0
609 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
610 {
611 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
612 if (BYTES_BIG_ENDIAN)
613 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
614
615 /* Shift. */
616 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
617 return NULL;
618
619 /* Mask. */
620 uint64_t mask = 0;
621 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
622 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
623 i++, tmp <<= BITS_PER_UNIT)
624 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
625 n->n &= mask;
626
627 /* Convert. */
628 n->type = TREE_TYPE (rhs1);
629 if (!n->base_addr)
630 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
631
632 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
633 }
634
635 return NULL;
636 }
637
638 if (TREE_CODE (rhs1) != SSA_NAME)
639 return NULL;
640
641 code = gimple_assign_rhs_code (stmt);
642 rhs_class = gimple_assign_rhs_class (stmt);
643 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
644
645 if (rhs_class == GIMPLE_BINARY_RHS)
646 rhs2 = gimple_assign_rhs2 (stmt);
647
648 /* Handle unary rhs and binary rhs with integer constants as second
649 operand. */
650
651 if (rhs_class == GIMPLE_UNARY_RHS
652 || (rhs_class == GIMPLE_BINARY_RHS
653 && TREE_CODE (rhs2) == INTEGER_CST))
654 {
655 if (code != BIT_AND_EXPR
656 && code != LSHIFT_EXPR
657 && code != RSHIFT_EXPR
658 && code != LROTATE_EXPR
659 && code != RROTATE_EXPR
660 && !CONVERT_EXPR_CODE_P (code))
661 return NULL;
662
663 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
664
665 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
666 we have to initialize the symbolic number. */
667 if (!source_stmt1)
668 {
669 if (gimple_assign_load_p (stmt)
670 || !init_symbolic_number (n, rhs1))
671 return NULL;
672 source_stmt1 = stmt;
673 }
674
675 switch (code)
676 {
677 case BIT_AND_EXPR:
678 {
679 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
680 uint64_t val = int_cst_value (rhs2), mask = 0;
681 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
682
683 /* Only constants masking full bytes are allowed. */
684 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
685 if ((val & tmp) != 0 && (val & tmp) != tmp)
686 return NULL;
687 else if (val & tmp)
688 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
689
690 n->n &= mask;
691 }
692 break;
693 case LSHIFT_EXPR:
694 case RSHIFT_EXPR:
695 case LROTATE_EXPR:
696 case RROTATE_EXPR:
697 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
698 return NULL;
699 break;
700 CASE_CONVERT:
701 {
702 int i, type_size, old_type_size;
703 tree type;
704
705 type = gimple_expr_type (stmt);
706 type_size = TYPE_PRECISION (type);
707 if (type_size % BITS_PER_UNIT != 0)
708 return NULL;
709 type_size /= BITS_PER_UNIT;
710 if (type_size > 64 / BITS_PER_MARKER)
711 return NULL;
712
713 /* Sign extension: result is dependent on the value. */
714 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
715 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
716 && HEAD_MARKER (n->n, old_type_size))
717 for (i = 0; i < type_size - old_type_size; i++)
718 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
719 << ((type_size - 1 - i) * BITS_PER_MARKER);
720
721 if (type_size < 64 / BITS_PER_MARKER)
722 {
723 /* If STMT casts to a smaller type mask out the bits not
724 belonging to the target type. */
725 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
726 }
727 n->type = type;
728 if (!n->base_addr)
729 n->range = type_size;
730 }
731 break;
732 default:
733 return NULL;
734 };
735 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
736 }
737
738 /* Handle binary rhs. */
739
740 if (rhs_class == GIMPLE_BINARY_RHS)
741 {
742 struct symbolic_number n1, n2;
743 gimple *source_stmt, *source_stmt2;
744
745 if (code != BIT_IOR_EXPR)
746 return NULL;
747
748 if (TREE_CODE (rhs2) != SSA_NAME)
749 return NULL;
750
751 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
752
753 switch (code)
754 {
755 case BIT_IOR_EXPR:
756 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
757
758 if (!source_stmt1)
759 return NULL;
760
761 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
762
763 if (!source_stmt2)
764 return NULL;
765
766 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
767 return NULL;
768
769 if (n1.vuse != n2.vuse)
770 return NULL;
771
772 source_stmt
773 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
774
775 if (!source_stmt)
776 return NULL;
777
778 if (!verify_symbolic_number_p (n, stmt))
779 return NULL;
780
781 break;
782 default:
783 return NULL;
784 }
785 return source_stmt;
786 }
787 return NULL;
788 }
789
790 /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
791 *CMPXCHG, *CMPNOP and adjust *N. */
792
793 void
794 find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg,
795 uint64_t *cmpnop)
796 {
797 unsigned rsize;
798 uint64_t tmpn, mask;
799
800 /* The number which the find_bswap_or_nop_1 result should match in order
801 to have a full byte swap. The number is shifted to the right
802 according to the size of the symbolic number before using it. */
803 *cmpxchg = CMPXCHG;
804 *cmpnop = CMPNOP;
805
806 /* Find real size of result (highest non-zero byte). */
807 if (n->base_addr)
808 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
809 else
810 rsize = n->range;
811
812 /* Zero out the bits corresponding to untouched bytes in original gimple
813 expression. */
814 if (n->range < (int) sizeof (int64_t))
815 {
816 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
817 *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
818 *cmpnop &= mask;
819 }
820
821 /* Zero out the bits corresponding to unused bytes in the result of the
822 gimple expression. */
823 if (rsize < n->range)
824 {
825 if (BYTES_BIG_ENDIAN)
826 {
827 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
828 *cmpxchg &= mask;
829 *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
830 }
831 else
832 {
833 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
834 *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
835 *cmpnop &= mask;
836 }
837 n->range = rsize;
838 }
839
840 n->range *= BITS_PER_UNIT;
841 }
842
843 /* Check if STMT completes a bswap implementation or a read in a given
844 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
845 accordingly. It also sets N to represent the kind of operations
846 performed: size of the resulting expression and whether it works on
847 a memory source, and if so alias-set and vuse. At last, the
848 function returns a stmt whose rhs's first tree is the source
849 expression. */
850
851 gimple *
852 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
853 {
854 /* The last parameter determines the depth search limit. It usually
855 correlates directly to the number n of bytes to be touched. We
856 increase that number by 2 * (log2(n) + 1) here in order to also
857 cover signed -> unsigned conversions of the src operand as can be seen
858 in libgcc, and for initial shift/and operation of the src operand. */
859 int limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
860 limit += 2 * (1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit));
861 gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
862
863 if (!ins_stmt)
864 return NULL;
865
866 uint64_t cmpxchg, cmpnop;
867 find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop);
868
869 /* A complete byte swap should make the symbolic number to start with
870 the largest digit in the highest order byte. Unchanged symbolic
871 number indicates a read with same endianness as target architecture. */
872 if (n->n == cmpnop)
873 *bswap = false;
874 else if (n->n == cmpxchg)
875 *bswap = true;
876 else
877 return NULL;
878
879 /* Useless bit manipulation performed by code. */
880 if (!n->base_addr && n->n == cmpnop && n->n_ops == 1)
881 return NULL;
882
883 return ins_stmt;
884 }
885
886 const pass_data pass_data_optimize_bswap =
887 {
888 GIMPLE_PASS, /* type */
889 "bswap", /* name */
890 OPTGROUP_NONE, /* optinfo_flags */
891 TV_NONE, /* tv_id */
892 PROP_ssa, /* properties_required */
893 0, /* properties_provided */
894 0, /* properties_destroyed */
895 0, /* todo_flags_start */
896 0, /* todo_flags_finish */
897 };
898
899 class pass_optimize_bswap : public gimple_opt_pass
900 {
901 public:
902 pass_optimize_bswap (gcc::context *ctxt)
903 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
904 {}
905
906 /* opt_pass methods: */
907 virtual bool gate (function *)
908 {
909 return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8;
910 }
911
912 virtual unsigned int execute (function *);
913
914 }; // class pass_optimize_bswap
915
916 /* Perform the bswap optimization: replace the expression computed in the rhs
917 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
918 bswap, load or load + bswap expression.
919 Which of these alternatives replace the rhs is given by N->base_addr (non
920 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
921 load to perform are also given in N while the builtin bswap invoke is given
922 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
923 load statements involved to construct the rhs in gsi_stmt (GSI) and
924 N->range gives the size of the rhs expression for maintaining some
925 statistics.
926
927 Note that if the replacement involve a load and if gsi_stmt (GSI) is
928 non-NULL, that stmt is moved just after INS_STMT to do the load with the
929 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
930
931 tree
932 bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
933 tree bswap_type, tree load_type, struct symbolic_number *n,
934 bool bswap)
935 {
936 tree src, tmp, tgt = NULL_TREE;
937 gimple *bswap_stmt;
938
939 gimple *cur_stmt = gsi_stmt (gsi);
940 src = n->src;
941 if (cur_stmt)
942 tgt = gimple_assign_lhs (cur_stmt);
943
944 /* Need to load the value from memory first. */
945 if (n->base_addr)
946 {
947 gimple_stmt_iterator gsi_ins = gsi;
948 if (ins_stmt)
949 gsi_ins = gsi_for_stmt (ins_stmt);
950 tree addr_expr, addr_tmp, val_expr, val_tmp;
951 tree load_offset_ptr, aligned_load_type;
952 gimple *load_stmt;
953 unsigned align = get_object_alignment (src);
954 poly_int64 load_offset = 0;
955
956 if (cur_stmt)
957 {
958 basic_block ins_bb = gimple_bb (ins_stmt);
959 basic_block cur_bb = gimple_bb (cur_stmt);
960 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
961 return NULL_TREE;
962
963 /* Move cur_stmt just before one of the load of the original
964 to ensure it has the same VUSE. See PR61517 for what could
965 go wrong. */
966 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
967 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
968 gsi_move_before (&gsi, &gsi_ins);
969 gsi = gsi_for_stmt (cur_stmt);
970 }
971 else
972 gsi = gsi_ins;
973
974 /* Compute address to load from and cast according to the size
975 of the load. */
976 addr_expr = build_fold_addr_expr (src);
977 if (is_gimple_mem_ref_addr (addr_expr))
978 addr_tmp = unshare_expr (addr_expr);
979 else
980 {
981 addr_tmp = unshare_expr (n->base_addr);
982 if (!is_gimple_mem_ref_addr (addr_tmp))
983 addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp,
984 is_gimple_mem_ref_addr,
985 NULL_TREE, true,
986 GSI_SAME_STMT);
987 load_offset = n->bytepos;
988 if (n->offset)
989 {
990 tree off
991 = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset),
992 true, NULL_TREE, true,
993 GSI_SAME_STMT);
994 gimple *stmt
995 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)),
996 POINTER_PLUS_EXPR, addr_tmp, off);
997 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
998 addr_tmp = gimple_assign_lhs (stmt);
999 }
1000 }
1001
1002 /* Perform the load. */
1003 aligned_load_type = load_type;
1004 if (align < TYPE_ALIGN (load_type))
1005 aligned_load_type = build_aligned_type (load_type, align);
1006 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
1007 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
1008 load_offset_ptr);
1009
1010 if (!bswap)
1011 {
1012 if (n->range == 16)
1013 nop_stats.found_16bit++;
1014 else if (n->range == 32)
1015 nop_stats.found_32bit++;
1016 else
1017 {
1018 gcc_assert (n->range == 64);
1019 nop_stats.found_64bit++;
1020 }
1021
1022 /* Convert the result of load if necessary. */
1023 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type))
1024 {
1025 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
1026 "load_dst");
1027 load_stmt = gimple_build_assign (val_tmp, val_expr);
1028 gimple_set_vuse (load_stmt, n->vuse);
1029 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1030 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
1031 update_stmt (cur_stmt);
1032 }
1033 else if (cur_stmt)
1034 {
1035 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
1036 gimple_set_vuse (cur_stmt, n->vuse);
1037 update_stmt (cur_stmt);
1038 }
1039 else
1040 {
1041 tgt = make_ssa_name (load_type);
1042 cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr);
1043 gimple_set_vuse (cur_stmt, n->vuse);
1044 gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT);
1045 }
1046
1047 if (dump_file)
1048 {
1049 fprintf (dump_file,
1050 "%d bit load in target endianness found at: ",
1051 (int) n->range);
1052 print_gimple_stmt (dump_file, cur_stmt, 0);
1053 }
1054 return tgt;
1055 }
1056 else
1057 {
1058 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
1059 load_stmt = gimple_build_assign (val_tmp, val_expr);
1060 gimple_set_vuse (load_stmt, n->vuse);
1061 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1062 }
1063 src = val_tmp;
1064 }
1065 else if (!bswap)
1066 {
1067 gimple *g = NULL;
1068 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src)))
1069 {
1070 if (!is_gimple_val (src))
1071 return NULL_TREE;
1072 g = gimple_build_assign (tgt, NOP_EXPR, src);
1073 }
1074 else if (cur_stmt)
1075 g = gimple_build_assign (tgt, src);
1076 else
1077 tgt = src;
1078 if (n->range == 16)
1079 nop_stats.found_16bit++;
1080 else if (n->range == 32)
1081 nop_stats.found_32bit++;
1082 else
1083 {
1084 gcc_assert (n->range == 64);
1085 nop_stats.found_64bit++;
1086 }
1087 if (dump_file)
1088 {
1089 fprintf (dump_file,
1090 "%d bit reshuffle in target endianness found at: ",
1091 (int) n->range);
1092 if (cur_stmt)
1093 print_gimple_stmt (dump_file, cur_stmt, 0);
1094 else
1095 {
1096 print_generic_expr (dump_file, tgt, TDF_NONE);
1097 fprintf (dump_file, "\n");
1098 }
1099 }
1100 if (cur_stmt)
1101 gsi_replace (&gsi, g, true);
1102 return tgt;
1103 }
1104 else if (TREE_CODE (src) == BIT_FIELD_REF)
1105 src = TREE_OPERAND (src, 0);
1106
1107 if (n->range == 16)
1108 bswap_stats.found_16bit++;
1109 else if (n->range == 32)
1110 bswap_stats.found_32bit++;
1111 else
1112 {
1113 gcc_assert (n->range == 64);
1114 bswap_stats.found_64bit++;
1115 }
1116
1117 tmp = src;
1118
1119 /* Convert the src expression if necessary. */
1120 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
1121 {
1122 gimple *convert_stmt;
1123
1124 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1125 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
1126 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1127 }
1128
1129 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1130 are considered as rotation of 2N bit values by N bits is generally not
1131 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1132 gives 0x03040102 while a bswap for that value is 0x04030201. */
1133 if (bswap && n->range == 16)
1134 {
1135 tree count = build_int_cst (NULL, BITS_PER_UNIT);
1136 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
1137 bswap_stmt = gimple_build_assign (NULL, src);
1138 }
1139 else
1140 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
1141
1142 if (tgt == NULL_TREE)
1143 tgt = make_ssa_name (bswap_type);
1144 tmp = tgt;
1145
1146 /* Convert the result if necessary. */
1147 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
1148 {
1149 gimple *convert_stmt;
1150
1151 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
1152 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
1153 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1154 }
1155
1156 gimple_set_lhs (bswap_stmt, tmp);
1157
1158 if (dump_file)
1159 {
1160 fprintf (dump_file, "%d bit bswap implementation found at: ",
1161 (int) n->range);
1162 if (cur_stmt)
1163 print_gimple_stmt (dump_file, cur_stmt, 0);
1164 else
1165 {
1166 print_generic_expr (dump_file, tgt, TDF_NONE);
1167 fprintf (dump_file, "\n");
1168 }
1169 }
1170
1171 if (cur_stmt)
1172 {
1173 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
1174 gsi_remove (&gsi, true);
1175 }
1176 else
1177 gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT);
1178 return tgt;
1179 }
1180
1181 /* Find manual byte swap implementations as well as load in a given
1182 endianness. Byte swaps are turned into a bswap builtin invokation
1183 while endian loads are converted to bswap builtin invokation or
1184 simple load according to the target endianness. */
1185
1186 unsigned int
1187 pass_optimize_bswap::execute (function *fun)
1188 {
1189 basic_block bb;
1190 bool bswap32_p, bswap64_p;
1191 bool changed = false;
1192 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1193
1194 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1195 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1196 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1197 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1198 || (bswap32_p && word_mode == SImode)));
1199
1200 /* Determine the argument type of the builtins. The code later on
1201 assumes that the return and argument type are the same. */
1202 if (bswap32_p)
1203 {
1204 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1205 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1206 }
1207
1208 if (bswap64_p)
1209 {
1210 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1211 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1212 }
1213
1214 memset (&nop_stats, 0, sizeof (nop_stats));
1215 memset (&bswap_stats, 0, sizeof (bswap_stats));
1216 calculate_dominance_info (CDI_DOMINATORS);
1217
1218 FOR_EACH_BB_FN (bb, fun)
1219 {
1220 gimple_stmt_iterator gsi;
1221
1222 /* We do a reverse scan for bswap patterns to make sure we get the
1223 widest match. As bswap pattern matching doesn't handle previously
1224 inserted smaller bswap replacements as sub-patterns, the wider
1225 variant wouldn't be detected. */
1226 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
1227 {
1228 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
1229 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1230 enum tree_code code;
1231 struct symbolic_number n;
1232 bool bswap;
1233
1234 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1235 might be moved to a different basic block by bswap_replace and gsi
1236 must not points to it if that's the case. Moving the gsi_prev
1237 there make sure that gsi points to the statement previous to
1238 cur_stmt while still making sure that all statements are
1239 considered in this basic block. */
1240 gsi_prev (&gsi);
1241
1242 if (!is_gimple_assign (cur_stmt))
1243 continue;
1244
1245 code = gimple_assign_rhs_code (cur_stmt);
1246 switch (code)
1247 {
1248 case LROTATE_EXPR:
1249 case RROTATE_EXPR:
1250 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
1251 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
1252 % BITS_PER_UNIT)
1253 continue;
1254 /* Fall through. */
1255 case BIT_IOR_EXPR:
1256 break;
1257 default:
1258 continue;
1259 }
1260
1261 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1262
1263 if (!ins_stmt)
1264 continue;
1265
1266 switch (n.range)
1267 {
1268 case 16:
1269 /* Already in canonical form, nothing to do. */
1270 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
1271 continue;
1272 load_type = bswap_type = uint16_type_node;
1273 break;
1274 case 32:
1275 load_type = uint32_type_node;
1276 if (bswap32_p)
1277 {
1278 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1279 bswap_type = bswap32_type;
1280 }
1281 break;
1282 case 64:
1283 load_type = uint64_type_node;
1284 if (bswap64_p)
1285 {
1286 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1287 bswap_type = bswap64_type;
1288 }
1289 break;
1290 default:
1291 continue;
1292 }
1293
1294 if (bswap && !fndecl && n.range != 16)
1295 continue;
1296
1297 if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1298 bswap_type, load_type, &n, bswap))
1299 changed = true;
1300 }
1301 }
1302
1303 statistics_counter_event (fun, "16-bit nop implementations found",
1304 nop_stats.found_16bit);
1305 statistics_counter_event (fun, "32-bit nop implementations found",
1306 nop_stats.found_32bit);
1307 statistics_counter_event (fun, "64-bit nop implementations found",
1308 nop_stats.found_64bit);
1309 statistics_counter_event (fun, "16-bit bswap implementations found",
1310 bswap_stats.found_16bit);
1311 statistics_counter_event (fun, "32-bit bswap implementations found",
1312 bswap_stats.found_32bit);
1313 statistics_counter_event (fun, "64-bit bswap implementations found",
1314 bswap_stats.found_64bit);
1315
1316 return (changed ? TODO_update_ssa : 0);
1317 }
1318
1319 } // anon namespace
1320
1321 gimple_opt_pass *
1322 make_pass_optimize_bswap (gcc::context *ctxt)
1323 {
1324 return new pass_optimize_bswap (ctxt);
1325 }
1326
1327 namespace {
1328
1329 /* Struct recording one operand for the store, which is either a constant,
1330 then VAL represents the constant and all the other fields are zero, or
1331 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1332 and the other fields also reflect the memory load, or an SSA name, then
1333 VAL represents the SSA name and all the other fields are zero, */
1334
1335 class store_operand_info
1336 {
1337 public:
1338 tree val;
1339 tree base_addr;
1340 poly_uint64 bitsize;
1341 poly_uint64 bitpos;
1342 poly_uint64 bitregion_start;
1343 poly_uint64 bitregion_end;
1344 gimple *stmt;
1345 bool bit_not_p;
1346 store_operand_info ();
1347 };
1348
1349 store_operand_info::store_operand_info ()
1350 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
1351 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
1352 {
1353 }
1354
1355 /* Struct recording the information about a single store of an immediate
1356 to memory. These are created in the first phase and coalesced into
1357 merged_store_group objects in the second phase. */
1358
1359 class store_immediate_info
1360 {
1361 public:
1362 unsigned HOST_WIDE_INT bitsize;
1363 unsigned HOST_WIDE_INT bitpos;
1364 unsigned HOST_WIDE_INT bitregion_start;
1365 /* This is one past the last bit of the bit region. */
1366 unsigned HOST_WIDE_INT bitregion_end;
1367 gimple *stmt;
1368 unsigned int order;
1369 /* INTEGER_CST for constant stores, MEM_REF for memory copy,
1370 BIT_*_EXPR for logical bitwise operation, BIT_INSERT_EXPR
1371 for bit insertion.
1372 LROTATE_EXPR if it can be only bswap optimized and
1373 ops are not really meaningful.
1374 NOP_EXPR if bswap optimization detected identity, ops
1375 are not meaningful. */
1376 enum tree_code rhs_code;
1377 /* Two fields for bswap optimization purposes. */
1378 struct symbolic_number n;
1379 gimple *ins_stmt;
1380 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1381 bool bit_not_p;
1382 /* True if ops have been swapped and thus ops[1] represents
1383 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1384 bool ops_swapped_p;
1385 /* The index number of the landing pad, or 0 if there is none. */
1386 int lp_nr;
1387 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1388 just the first one. */
1389 store_operand_info ops[2];
1390 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1391 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1392 gimple *, unsigned int, enum tree_code,
1393 struct symbolic_number &, gimple *, bool, int,
1394 const store_operand_info &,
1395 const store_operand_info &);
1396 };
1397
1398 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
1399 unsigned HOST_WIDE_INT bp,
1400 unsigned HOST_WIDE_INT brs,
1401 unsigned HOST_WIDE_INT bre,
1402 gimple *st,
1403 unsigned int ord,
1404 enum tree_code rhscode,
1405 struct symbolic_number &nr,
1406 gimple *ins_stmtp,
1407 bool bitnotp,
1408 int nr2,
1409 const store_operand_info &op0r,
1410 const store_operand_info &op1r)
1411 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
1412 stmt (st), order (ord), rhs_code (rhscode), n (nr),
1413 ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false),
1414 lp_nr (nr2)
1415 #if __cplusplus >= 201103L
1416 , ops { op0r, op1r }
1417 {
1418 }
1419 #else
1420 {
1421 ops[0] = op0r;
1422 ops[1] = op1r;
1423 }
1424 #endif
1425
1426 /* Struct representing a group of stores to contiguous memory locations.
1427 These are produced by the second phase (coalescing) and consumed in the
1428 third phase that outputs the widened stores. */
1429
1430 class merged_store_group
1431 {
1432 public:
1433 unsigned HOST_WIDE_INT start;
1434 unsigned HOST_WIDE_INT width;
1435 unsigned HOST_WIDE_INT bitregion_start;
1436 unsigned HOST_WIDE_INT bitregion_end;
1437 /* The size of the allocated memory for val and mask. */
1438 unsigned HOST_WIDE_INT buf_size;
1439 unsigned HOST_WIDE_INT align_base;
1440 poly_uint64 load_align_base[2];
1441
1442 unsigned int align;
1443 unsigned int load_align[2];
1444 unsigned int first_order;
1445 unsigned int last_order;
1446 bool bit_insertion;
1447 bool only_constants;
1448 unsigned int first_nonmergeable_order;
1449 int lp_nr;
1450
1451 auto_vec<store_immediate_info *> stores;
1452 /* We record the first and last original statements in the sequence because
1453 we'll need their vuse/vdef and replacement position. It's easier to keep
1454 track of them separately as 'stores' is reordered by apply_stores. */
1455 gimple *last_stmt;
1456 gimple *first_stmt;
1457 unsigned char *val;
1458 unsigned char *mask;
1459
1460 merged_store_group (store_immediate_info *);
1461 ~merged_store_group ();
1462 bool can_be_merged_into (store_immediate_info *);
1463 void merge_into (store_immediate_info *);
1464 void merge_overlapping (store_immediate_info *);
1465 bool apply_stores ();
1466 private:
1467 void do_merge (store_immediate_info *);
1468 };
1469
1470 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1471
1472 static void
1473 dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len)
1474 {
1475 if (!fd)
1476 return;
1477
1478 for (unsigned int i = 0; i < len; i++)
1479 fprintf (fd, "%02x ", ptr[i]);
1480 fprintf (fd, "\n");
1481 }
1482
1483 /* Clear out LEN bits starting from bit START in the byte array
1484 PTR. This clears the bits to the *right* from START.
1485 START must be within [0, BITS_PER_UNIT) and counts starting from
1486 the least significant bit. */
1487
1488 static void
1489 clear_bit_region_be (unsigned char *ptr, unsigned int start,
1490 unsigned int len)
1491 {
1492 if (len == 0)
1493 return;
1494 /* Clear len bits to the right of start. */
1495 else if (len <= start + 1)
1496 {
1497 unsigned char mask = (~(~0U << len));
1498 mask = mask << (start + 1U - len);
1499 ptr[0] &= ~mask;
1500 }
1501 else if (start != BITS_PER_UNIT - 1)
1502 {
1503 clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1);
1504 clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1,
1505 len - (start % BITS_PER_UNIT) - 1);
1506 }
1507 else if (start == BITS_PER_UNIT - 1
1508 && len > BITS_PER_UNIT)
1509 {
1510 unsigned int nbytes = len / BITS_PER_UNIT;
1511 memset (ptr, 0, nbytes);
1512 if (len % BITS_PER_UNIT != 0)
1513 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
1514 len % BITS_PER_UNIT);
1515 }
1516 else
1517 gcc_unreachable ();
1518 }
1519
1520 /* In the byte array PTR clear the bit region starting at bit
1521 START and is LEN bits wide.
1522 For regions spanning multiple bytes do this recursively until we reach
1523 zero LEN or a region contained within a single byte. */
1524
1525 static void
1526 clear_bit_region (unsigned char *ptr, unsigned int start,
1527 unsigned int len)
1528 {
1529 /* Degenerate base case. */
1530 if (len == 0)
1531 return;
1532 else if (start >= BITS_PER_UNIT)
1533 clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len);
1534 /* Second base case. */
1535 else if ((start + len) <= BITS_PER_UNIT)
1536 {
1537 unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len);
1538 mask >>= BITS_PER_UNIT - (start + len);
1539
1540 ptr[0] &= ~mask;
1541
1542 return;
1543 }
1544 /* Clear most significant bits in a byte and proceed with the next byte. */
1545 else if (start != 0)
1546 {
1547 clear_bit_region (ptr, start, BITS_PER_UNIT - start);
1548 clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start));
1549 }
1550 /* Whole bytes need to be cleared. */
1551 else if (start == 0 && len > BITS_PER_UNIT)
1552 {
1553 unsigned int nbytes = len / BITS_PER_UNIT;
1554 /* We could recurse on each byte but we clear whole bytes, so a simple
1555 memset will do. */
1556 memset (ptr, '\0', nbytes);
1557 /* Clear the remaining sub-byte region if there is one. */
1558 if (len % BITS_PER_UNIT != 0)
1559 clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT);
1560 }
1561 else
1562 gcc_unreachable ();
1563 }
1564
1565 /* Write BITLEN bits of EXPR to the byte array PTR at
1566 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1567 Return true if the operation succeeded. */
1568
1569 static bool
1570 encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos,
1571 unsigned int total_bytes)
1572 {
1573 unsigned int first_byte = bitpos / BITS_PER_UNIT;
1574 bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
1575 || (bitpos % BITS_PER_UNIT)
1576 || !int_mode_for_size (bitlen, 0).exists ());
1577 bool empty_ctor_p
1578 = (TREE_CODE (expr) == CONSTRUCTOR
1579 && CONSTRUCTOR_NELTS (expr) == 0
1580 && TYPE_SIZE_UNIT (TREE_TYPE (expr))
1581 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr))));
1582
1583 if (!sub_byte_op_p)
1584 {
1585 if (first_byte >= total_bytes)
1586 return false;
1587 total_bytes -= first_byte;
1588 if (empty_ctor_p)
1589 {
1590 unsigned HOST_WIDE_INT rhs_bytes
1591 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1592 if (rhs_bytes > total_bytes)
1593 return false;
1594 memset (ptr + first_byte, '\0', rhs_bytes);
1595 return true;
1596 }
1597 return native_encode_expr (expr, ptr + first_byte, total_bytes) != 0;
1598 }
1599
1600 /* LITTLE-ENDIAN
1601 We are writing a non byte-sized quantity or at a position that is not
1602 at a byte boundary.
1603 |--------|--------|--------| ptr + first_byte
1604 ^ ^
1605 xxx xxxxxxxx xxx< bp>
1606 |______EXPR____|
1607
1608 First native_encode_expr EXPR into a temporary buffer and shift each
1609 byte in the buffer by 'bp' (carrying the bits over as necessary).
1610 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1611 <------bitlen---->< bp>
1612 Then we clear the destination bits:
1613 |---00000|00000000|000-----| ptr + first_byte
1614 <-------bitlen--->< bp>
1615
1616 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1617 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1618
1619 BIG-ENDIAN
1620 We are writing a non byte-sized quantity or at a position that is not
1621 at a byte boundary.
1622 ptr + first_byte |--------|--------|--------|
1623 ^ ^
1624 <bp >xxx xxxxxxxx xxx
1625 |_____EXPR_____|
1626
1627 First native_encode_expr EXPR into a temporary buffer and shift each
1628 byte in the buffer to the right by (carrying the bits over as necessary).
1629 We shift by as much as needed to align the most significant bit of EXPR
1630 with bitpos:
1631 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1632 <---bitlen----> <bp ><-----bitlen----->
1633 Then we clear the destination bits:
1634 ptr + first_byte |-----000||00000000||00000---|
1635 <bp ><-------bitlen----->
1636
1637 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1638 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1639 The awkwardness comes from the fact that bitpos is counted from the
1640 most significant bit of a byte. */
1641
1642 /* We must be dealing with fixed-size data at this point, since the
1643 total size is also fixed. */
1644 unsigned int byte_size;
1645 if (empty_ctor_p)
1646 {
1647 unsigned HOST_WIDE_INT rhs_bytes
1648 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1649 if (rhs_bytes > total_bytes)
1650 return false;
1651 byte_size = rhs_bytes;
1652 }
1653 else
1654 {
1655 fixed_size_mode mode
1656 = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
1657 byte_size = GET_MODE_SIZE (mode);
1658 }
1659 /* Allocate an extra byte so that we have space to shift into. */
1660 byte_size++;
1661 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
1662 memset (tmpbuf, '\0', byte_size);
1663 /* The store detection code should only have allowed constants that are
1664 accepted by native_encode_expr or empty ctors. */
1665 if (!empty_ctor_p
1666 && native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
1667 gcc_unreachable ();
1668
1669 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1670 bytes to write. This means it can write more than
1671 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1672 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1673 bitlen and zero out the bits that are not relevant as well (that may
1674 contain a sign bit due to sign-extension). */
1675 unsigned int padding
1676 = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1;
1677 /* On big-endian the padding is at the 'front' so just skip the initial
1678 bytes. */
1679 if (BYTES_BIG_ENDIAN)
1680 tmpbuf += padding;
1681
1682 byte_size -= padding;
1683
1684 if (bitlen % BITS_PER_UNIT != 0)
1685 {
1686 if (BYTES_BIG_ENDIAN)
1687 clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1,
1688 BITS_PER_UNIT - (bitlen % BITS_PER_UNIT));
1689 else
1690 clear_bit_region (tmpbuf, bitlen,
1691 byte_size * BITS_PER_UNIT - bitlen);
1692 }
1693 /* Left shifting relies on the last byte being clear if bitlen is
1694 a multiple of BITS_PER_UNIT, which might not be clear if
1695 there are padding bytes. */
1696 else if (!BYTES_BIG_ENDIAN)
1697 tmpbuf[byte_size - 1] = '\0';
1698
1699 /* Clear the bit region in PTR where the bits from TMPBUF will be
1700 inserted into. */
1701 if (BYTES_BIG_ENDIAN)
1702 clear_bit_region_be (ptr + first_byte,
1703 BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen);
1704 else
1705 clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen);
1706
1707 int shift_amnt;
1708 int bitlen_mod = bitlen % BITS_PER_UNIT;
1709 int bitpos_mod = bitpos % BITS_PER_UNIT;
1710
1711 bool skip_byte = false;
1712 if (BYTES_BIG_ENDIAN)
1713 {
1714 /* BITPOS and BITLEN are exactly aligned and no shifting
1715 is necessary. */
1716 if (bitpos_mod + bitlen_mod == BITS_PER_UNIT
1717 || (bitpos_mod == 0 && bitlen_mod == 0))
1718 shift_amnt = 0;
1719 /* |. . . . . . . .|
1720 <bp > <blen >.
1721 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1722 of the value until it aligns with 'bp' in the next byte over. */
1723 else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT)
1724 {
1725 shift_amnt = bitlen_mod + bitpos_mod;
1726 skip_byte = bitlen_mod != 0;
1727 }
1728 /* |. . . . . . . .|
1729 <----bp--->
1730 <---blen---->.
1731 Shift the value right within the same byte so it aligns with 'bp'. */
1732 else
1733 shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT;
1734 }
1735 else
1736 shift_amnt = bitpos % BITS_PER_UNIT;
1737
1738 /* Create the shifted version of EXPR. */
1739 if (!BYTES_BIG_ENDIAN)
1740 {
1741 shift_bytes_in_array_left (tmpbuf, byte_size, shift_amnt);
1742 if (shift_amnt == 0)
1743 byte_size--;
1744 }
1745 else
1746 {
1747 gcc_assert (BYTES_BIG_ENDIAN);
1748 shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt);
1749 /* If shifting right forced us to move into the next byte skip the now
1750 empty byte. */
1751 if (skip_byte)
1752 {
1753 tmpbuf++;
1754 byte_size--;
1755 }
1756 }
1757
1758 /* Insert the bits from TMPBUF. */
1759 for (unsigned int i = 0; i < byte_size; i++)
1760 ptr[first_byte + i] |= tmpbuf[i];
1761
1762 return true;
1763 }
1764
1765 /* Sorting function for store_immediate_info objects.
1766 Sorts them by bitposition. */
1767
1768 static int
1769 sort_by_bitpos (const void *x, const void *y)
1770 {
1771 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1772 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1773
1774 if ((*tmp)->bitpos < (*tmp2)->bitpos)
1775 return -1;
1776 else if ((*tmp)->bitpos > (*tmp2)->bitpos)
1777 return 1;
1778 else
1779 /* If they are the same let's use the order which is guaranteed to
1780 be different. */
1781 return (*tmp)->order - (*tmp2)->order;
1782 }
1783
1784 /* Sorting function for store_immediate_info objects.
1785 Sorts them by the order field. */
1786
1787 static int
1788 sort_by_order (const void *x, const void *y)
1789 {
1790 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1791 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1792
1793 if ((*tmp)->order < (*tmp2)->order)
1794 return -1;
1795 else if ((*tmp)->order > (*tmp2)->order)
1796 return 1;
1797
1798 gcc_unreachable ();
1799 }
1800
1801 /* Initialize a merged_store_group object from a store_immediate_info
1802 object. */
1803
1804 merged_store_group::merged_store_group (store_immediate_info *info)
1805 {
1806 start = info->bitpos;
1807 width = info->bitsize;
1808 bitregion_start = info->bitregion_start;
1809 bitregion_end = info->bitregion_end;
1810 /* VAL has memory allocated for it in apply_stores once the group
1811 width has been finalized. */
1812 val = NULL;
1813 mask = NULL;
1814 bit_insertion = false;
1815 only_constants = info->rhs_code == INTEGER_CST;
1816 first_nonmergeable_order = ~0U;
1817 lp_nr = info->lp_nr;
1818 unsigned HOST_WIDE_INT align_bitpos = 0;
1819 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
1820 &align, &align_bitpos);
1821 align_base = start - align_bitpos;
1822 for (int i = 0; i < 2; ++i)
1823 {
1824 store_operand_info &op = info->ops[i];
1825 if (op.base_addr == NULL_TREE)
1826 {
1827 load_align[i] = 0;
1828 load_align_base[i] = 0;
1829 }
1830 else
1831 {
1832 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
1833 load_align_base[i] = op.bitpos - align_bitpos;
1834 }
1835 }
1836 stores.create (1);
1837 stores.safe_push (info);
1838 last_stmt = info->stmt;
1839 last_order = info->order;
1840 first_stmt = last_stmt;
1841 first_order = last_order;
1842 buf_size = 0;
1843 }
1844
1845 merged_store_group::~merged_store_group ()
1846 {
1847 if (val)
1848 XDELETEVEC (val);
1849 }
1850
1851 /* Return true if the store described by INFO can be merged into the group. */
1852
1853 bool
1854 merged_store_group::can_be_merged_into (store_immediate_info *info)
1855 {
1856 /* Do not merge bswap patterns. */
1857 if (info->rhs_code == LROTATE_EXPR)
1858 return false;
1859
1860 if (info->lp_nr != lp_nr)
1861 return false;
1862
1863 /* The canonical case. */
1864 if (info->rhs_code == stores[0]->rhs_code)
1865 return true;
1866
1867 /* BIT_INSERT_EXPR is compatible with INTEGER_CST. */
1868 if (info->rhs_code == BIT_INSERT_EXPR && stores[0]->rhs_code == INTEGER_CST)
1869 return true;
1870
1871 if (stores[0]->rhs_code == BIT_INSERT_EXPR && info->rhs_code == INTEGER_CST)
1872 return true;
1873
1874 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores, but do it
1875 only for small regions since this can generate a lot of instructions. */
1876 if (info->rhs_code == MEM_REF
1877 && (stores[0]->rhs_code == INTEGER_CST
1878 || stores[0]->rhs_code == BIT_INSERT_EXPR)
1879 && info->bitregion_start == stores[0]->bitregion_start
1880 && info->bitregion_end == stores[0]->bitregion_end
1881 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
1882 return true;
1883
1884 if (stores[0]->rhs_code == MEM_REF
1885 && (info->rhs_code == INTEGER_CST
1886 || info->rhs_code == BIT_INSERT_EXPR)
1887 && info->bitregion_start == stores[0]->bitregion_start
1888 && info->bitregion_end == stores[0]->bitregion_end
1889 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
1890 return true;
1891
1892 return false;
1893 }
1894
1895 /* Helper method for merge_into and merge_overlapping to do
1896 the common part. */
1897
1898 void
1899 merged_store_group::do_merge (store_immediate_info *info)
1900 {
1901 bitregion_start = MIN (bitregion_start, info->bitregion_start);
1902 bitregion_end = MAX (bitregion_end, info->bitregion_end);
1903
1904 unsigned int this_align;
1905 unsigned HOST_WIDE_INT align_bitpos = 0;
1906 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
1907 &this_align, &align_bitpos);
1908 if (this_align > align)
1909 {
1910 align = this_align;
1911 align_base = info->bitpos - align_bitpos;
1912 }
1913 for (int i = 0; i < 2; ++i)
1914 {
1915 store_operand_info &op = info->ops[i];
1916 if (!op.base_addr)
1917 continue;
1918
1919 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
1920 if (this_align > load_align[i])
1921 {
1922 load_align[i] = this_align;
1923 load_align_base[i] = op.bitpos - align_bitpos;
1924 }
1925 }
1926
1927 gimple *stmt = info->stmt;
1928 stores.safe_push (info);
1929 if (info->order > last_order)
1930 {
1931 last_order = info->order;
1932 last_stmt = stmt;
1933 }
1934 else if (info->order < first_order)
1935 {
1936 first_order = info->order;
1937 first_stmt = stmt;
1938 }
1939 if (info->rhs_code != INTEGER_CST)
1940 only_constants = false;
1941 }
1942
1943 /* Merge a store recorded by INFO into this merged store.
1944 The store is not overlapping with the existing recorded
1945 stores. */
1946
1947 void
1948 merged_store_group::merge_into (store_immediate_info *info)
1949 {
1950 /* Make sure we're inserting in the position we think we're inserting. */
1951 gcc_assert (info->bitpos >= start + width
1952 && info->bitregion_start <= bitregion_end);
1953
1954 width = info->bitpos + info->bitsize - start;
1955 do_merge (info);
1956 }
1957
1958 /* Merge a store described by INFO into this merged store.
1959 INFO overlaps in some way with the current store (i.e. it's not contiguous
1960 which is handled by merged_store_group::merge_into). */
1961
1962 void
1963 merged_store_group::merge_overlapping (store_immediate_info *info)
1964 {
1965 /* If the store extends the size of the group, extend the width. */
1966 if (info->bitpos + info->bitsize > start + width)
1967 width = info->bitpos + info->bitsize - start;
1968
1969 do_merge (info);
1970 }
1971
1972 /* Go through all the recorded stores in this group in program order and
1973 apply their values to the VAL byte array to create the final merged
1974 value. Return true if the operation succeeded. */
1975
1976 bool
1977 merged_store_group::apply_stores ()
1978 {
1979 /* Make sure we have more than one store in the group, otherwise we cannot
1980 merge anything. */
1981 if (bitregion_start % BITS_PER_UNIT != 0
1982 || bitregion_end % BITS_PER_UNIT != 0
1983 || stores.length () == 1)
1984 return false;
1985
1986 stores.qsort (sort_by_order);
1987 store_immediate_info *info;
1988 unsigned int i;
1989 /* Create a power-of-2-sized buffer for native_encode_expr. */
1990 buf_size = 1 << ceil_log2 ((bitregion_end - bitregion_start) / BITS_PER_UNIT);
1991 val = XNEWVEC (unsigned char, 2 * buf_size);
1992 mask = val + buf_size;
1993 memset (val, 0, buf_size);
1994 memset (mask, ~0U, buf_size);
1995
1996 FOR_EACH_VEC_ELT (stores, i, info)
1997 {
1998 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
1999 tree cst;
2000 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
2001 cst = info->ops[0].val;
2002 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
2003 cst = info->ops[1].val;
2004 else
2005 cst = NULL_TREE;
2006 bool ret = true;
2007 if (cst)
2008 {
2009 if (info->rhs_code == BIT_INSERT_EXPR)
2010 bit_insertion = true;
2011 else
2012 ret = encode_tree_to_bitpos (cst, val, info->bitsize,
2013 pos_in_buffer, buf_size);
2014 }
2015 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
2016 if (BYTES_BIG_ENDIAN)
2017 clear_bit_region_be (m, (BITS_PER_UNIT - 1
2018 - (pos_in_buffer % BITS_PER_UNIT)),
2019 info->bitsize);
2020 else
2021 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
2022 if (cst && dump_file && (dump_flags & TDF_DETAILS))
2023 {
2024 if (ret)
2025 {
2026 fputs ("After writing ", dump_file);
2027 print_generic_expr (dump_file, cst, TDF_NONE);
2028 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
2029 " at position %d\n", info->bitsize, pos_in_buffer);
2030 fputs (" the merged value contains ", dump_file);
2031 dump_char_array (dump_file, val, buf_size);
2032 fputs (" the merged mask contains ", dump_file);
2033 dump_char_array (dump_file, mask, buf_size);
2034 if (bit_insertion)
2035 fputs (" bit insertion is required\n", dump_file);
2036 }
2037 else
2038 fprintf (dump_file, "Failed to merge stores\n");
2039 }
2040 if (!ret)
2041 return false;
2042 }
2043 stores.qsort (sort_by_bitpos);
2044 return true;
2045 }
2046
2047 /* Structure describing the store chain. */
2048
2049 class imm_store_chain_info
2050 {
2051 public:
2052 /* Doubly-linked list that imposes an order on chain processing.
2053 PNXP (prev's next pointer) points to the head of a list, or to
2054 the next field in the previous chain in the list.
2055 See pass_store_merging::m_stores_head for more rationale. */
2056 imm_store_chain_info *next, **pnxp;
2057 tree base_addr;
2058 auto_vec<store_immediate_info *> m_store_info;
2059 auto_vec<merged_store_group *> m_merged_store_groups;
2060
2061 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
2062 : next (inspt), pnxp (&inspt), base_addr (b_a)
2063 {
2064 inspt = this;
2065 if (next)
2066 {
2067 gcc_checking_assert (pnxp == next->pnxp);
2068 next->pnxp = &next;
2069 }
2070 }
2071 ~imm_store_chain_info ()
2072 {
2073 *pnxp = next;
2074 if (next)
2075 {
2076 gcc_checking_assert (&next == next->pnxp);
2077 next->pnxp = pnxp;
2078 }
2079 }
2080 bool terminate_and_process_chain ();
2081 bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int);
2082 bool coalesce_immediate_stores ();
2083 bool output_merged_store (merged_store_group *);
2084 bool output_merged_stores ();
2085 };
2086
2087 const pass_data pass_data_tree_store_merging = {
2088 GIMPLE_PASS, /* type */
2089 "store-merging", /* name */
2090 OPTGROUP_NONE, /* optinfo_flags */
2091 TV_GIMPLE_STORE_MERGING, /* tv_id */
2092 PROP_ssa, /* properties_required */
2093 0, /* properties_provided */
2094 0, /* properties_destroyed */
2095 0, /* todo_flags_start */
2096 TODO_update_ssa, /* todo_flags_finish */
2097 };
2098
2099 class pass_store_merging : public gimple_opt_pass
2100 {
2101 public:
2102 pass_store_merging (gcc::context *ctxt)
2103 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head ()
2104 {
2105 }
2106
2107 /* Pass not supported for PDP-endian, nor for insane hosts or
2108 target character sizes where native_{encode,interpret}_expr
2109 doesn't work properly. */
2110 virtual bool
2111 gate (function *)
2112 {
2113 return flag_store_merging
2114 && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
2115 && CHAR_BIT == 8
2116 && BITS_PER_UNIT == 8;
2117 }
2118
2119 virtual unsigned int execute (function *);
2120
2121 private:
2122 hash_map<tree_operand_hash, class imm_store_chain_info *> m_stores;
2123
2124 /* Form a doubly-linked stack of the elements of m_stores, so that
2125 we can iterate over them in a predictable way. Using this order
2126 avoids extraneous differences in the compiler output just because
2127 of tree pointer variations (e.g. different chains end up in
2128 different positions of m_stores, so they are handled in different
2129 orders, so they allocate or release SSA names in different
2130 orders, and when they get reused, subsequent passes end up
2131 getting different SSA names, which may ultimately change
2132 decisions when going out of SSA). */
2133 imm_store_chain_info *m_stores_head;
2134
2135 bool process_store (gimple *);
2136 bool terminate_and_process_chain (imm_store_chain_info *);
2137 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
2138 bool terminate_and_process_all_chains ();
2139 }; // class pass_store_merging
2140
2141 /* Terminate and process all recorded chains. Return true if any changes
2142 were made. */
2143
2144 bool
2145 pass_store_merging::terminate_and_process_all_chains ()
2146 {
2147 bool ret = false;
2148 while (m_stores_head)
2149 ret |= terminate_and_process_chain (m_stores_head);
2150 gcc_assert (m_stores.is_empty ());
2151 return ret;
2152 }
2153
2154 /* Terminate all chains that are affected by the statement STMT.
2155 CHAIN_INFO is the chain we should ignore from the checks if
2156 non-NULL. Return true if any changes were made. */
2157
2158 bool
2159 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
2160 **chain_info,
2161 gimple *stmt)
2162 {
2163 bool ret = false;
2164
2165 /* If the statement doesn't touch memory it can't alias. */
2166 if (!gimple_vuse (stmt))
2167 return false;
2168
2169 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
2170 ao_ref store_lhs_ref;
2171 ao_ref_init (&store_lhs_ref, store_lhs);
2172 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
2173 {
2174 next = cur->next;
2175
2176 /* We already checked all the stores in chain_info and terminated the
2177 chain if necessary. Skip it here. */
2178 if (chain_info && *chain_info == cur)
2179 continue;
2180
2181 store_immediate_info *info;
2182 unsigned int i;
2183 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
2184 {
2185 tree lhs = gimple_assign_lhs (info->stmt);
2186 ao_ref lhs_ref;
2187 ao_ref_init (&lhs_ref, lhs);
2188 if (ref_maybe_used_by_stmt_p (stmt, &lhs_ref)
2189 || stmt_may_clobber_ref_p_1 (stmt, &lhs_ref)
2190 || (store_lhs && refs_may_alias_p_1 (&store_lhs_ref,
2191 &lhs_ref, false)))
2192 {
2193 if (dump_file && (dump_flags & TDF_DETAILS))
2194 {
2195 fprintf (dump_file, "stmt causes chain termination:\n");
2196 print_gimple_stmt (dump_file, stmt, 0);
2197 }
2198 ret |= terminate_and_process_chain (cur);
2199 break;
2200 }
2201 }
2202 }
2203
2204 return ret;
2205 }
2206
2207 /* Helper function. Terminate the recorded chain storing to base object
2208 BASE. Return true if the merging and output was successful. The m_stores
2209 entry is removed after the processing in any case. */
2210
2211 bool
2212 pass_store_merging::terminate_and_process_chain (imm_store_chain_info *chain_info)
2213 {
2214 bool ret = chain_info->terminate_and_process_chain ();
2215 m_stores.remove (chain_info->base_addr);
2216 delete chain_info;
2217 return ret;
2218 }
2219
2220 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2221 may clobber REF. FIRST and LAST must have non-NULL vdef. We want to
2222 be able to sink load of REF across stores between FIRST and LAST, up
2223 to right before LAST. */
2224
2225 bool
2226 stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
2227 {
2228 ao_ref r;
2229 ao_ref_init (&r, ref);
2230 unsigned int count = 0;
2231 tree vop = gimple_vdef (last);
2232 gimple *stmt;
2233
2234 /* Return true conservatively if the basic blocks are different. */
2235 if (gimple_bb (first) != gimple_bb (last))
2236 return true;
2237
2238 do
2239 {
2240 stmt = SSA_NAME_DEF_STMT (vop);
2241 if (stmt_may_clobber_ref_p_1 (stmt, &r))
2242 return true;
2243 if (gimple_store_p (stmt)
2244 && refs_anti_dependent_p (ref, gimple_get_lhs (stmt)))
2245 return true;
2246 /* Avoid quadratic compile time by bounding the number of checks
2247 we perform. */
2248 if (++count > MAX_STORE_ALIAS_CHECKS)
2249 return true;
2250 vop = gimple_vuse (stmt);
2251 }
2252 while (stmt != first);
2253
2254 return false;
2255 }
2256
2257 /* Return true if INFO->ops[IDX] is mergeable with the
2258 corresponding loads already in MERGED_STORE group.
2259 BASE_ADDR is the base address of the whole store group. */
2260
2261 bool
2262 compatible_load_p (merged_store_group *merged_store,
2263 store_immediate_info *info,
2264 tree base_addr, int idx)
2265 {
2266 store_immediate_info *infof = merged_store->stores[0];
2267 if (!info->ops[idx].base_addr
2268 || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos,
2269 info->bitpos - infof->bitpos)
2270 || !operand_equal_p (info->ops[idx].base_addr,
2271 infof->ops[idx].base_addr, 0))
2272 return false;
2273
2274 store_immediate_info *infol = merged_store->stores.last ();
2275 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
2276 /* In this case all vuses should be the same, e.g.
2277 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2278 or
2279 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2280 and we can emit the coalesced load next to any of those loads. */
2281 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
2282 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
2283 return true;
2284
2285 /* Otherwise, at least for now require that the load has the same
2286 vuse as the store. See following examples. */
2287 if (gimple_vuse (info->stmt) != load_vuse)
2288 return false;
2289
2290 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
2291 || (infof != infol
2292 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
2293 return false;
2294
2295 /* If the load is from the same location as the store, already
2296 the construction of the immediate chain info guarantees no intervening
2297 stores, so no further checks are needed. Example:
2298 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2299 if (known_eq (info->ops[idx].bitpos, info->bitpos)
2300 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
2301 return true;
2302
2303 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2304 of the stores in the group, or any other stores in between those.
2305 Previous calls to compatible_load_p ensured that for all the
2306 merged_store->stores IDX loads, no stmts starting with
2307 merged_store->first_stmt and ending right before merged_store->last_stmt
2308 clobbers those loads. */
2309 gimple *first = merged_store->first_stmt;
2310 gimple *last = merged_store->last_stmt;
2311 unsigned int i;
2312 store_immediate_info *infoc;
2313 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2314 comes before the so far first load, we'll be changing
2315 merged_store->first_stmt. In that case we need to give up if
2316 any of the earlier processed loads clobber with the stmts in the new
2317 range. */
2318 if (info->order < merged_store->first_order)
2319 {
2320 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2321 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
2322 return false;
2323 first = info->stmt;
2324 }
2325 /* Similarly, we could change merged_store->last_stmt, so ensure
2326 in that case no stmts in the new range clobber any of the earlier
2327 processed loads. */
2328 else if (info->order > merged_store->last_order)
2329 {
2330 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2331 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
2332 return false;
2333 last = info->stmt;
2334 }
2335 /* And finally, we'd be adding a new load to the set, ensure it isn't
2336 clobbered in the new range. */
2337 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
2338 return false;
2339
2340 /* Otherwise, we are looking for:
2341 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2342 or
2343 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2344 return true;
2345 }
2346
2347 /* Add all refs loaded to compute VAL to REFS vector. */
2348
2349 void
2350 gather_bswap_load_refs (vec<tree> *refs, tree val)
2351 {
2352 if (TREE_CODE (val) != SSA_NAME)
2353 return;
2354
2355 gimple *stmt = SSA_NAME_DEF_STMT (val);
2356 if (!is_gimple_assign (stmt))
2357 return;
2358
2359 if (gimple_assign_load_p (stmt))
2360 {
2361 refs->safe_push (gimple_assign_rhs1 (stmt));
2362 return;
2363 }
2364
2365 switch (gimple_assign_rhs_class (stmt))
2366 {
2367 case GIMPLE_BINARY_RHS:
2368 gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt));
2369 /* FALLTHRU */
2370 case GIMPLE_UNARY_RHS:
2371 gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt));
2372 break;
2373 default:
2374 gcc_unreachable ();
2375 }
2376 }
2377
2378 /* Check if there are any stores in M_STORE_INFO after index I
2379 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2380 a potential group ending with END that have their order
2381 smaller than LAST_ORDER. ALL_INTEGER_CST_P is true if
2382 all the stores already merged and the one under consideration
2383 have rhs_code of INTEGER_CST. Return true if there are no such stores.
2384 Consider:
2385 MEM[(long long int *)p_28] = 0;
2386 MEM[(long long int *)p_28 + 8B] = 0;
2387 MEM[(long long int *)p_28 + 16B] = 0;
2388 MEM[(long long int *)p_28 + 24B] = 0;
2389 _129 = (int) _130;
2390 MEM[(int *)p_28 + 8B] = _129;
2391 MEM[(int *)p_28].a = -1;
2392 We already have
2393 MEM[(long long int *)p_28] = 0;
2394 MEM[(int *)p_28].a = -1;
2395 stmts in the current group and need to consider if it is safe to
2396 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2397 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2398 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2399 into the group and merging of those 3 stores is successful, merged
2400 stmts will be emitted at the latest store from that group, i.e.
2401 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2402 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2403 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2404 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2405 into the group. That way it will be its own store group and will
2406 not be touched. If ALL_INTEGER_CST_P and there are overlapping
2407 INTEGER_CST stores, those are mergeable using merge_overlapping,
2408 so don't return false for those. */
2409
2410 static bool
2411 check_no_overlap (vec<store_immediate_info *> m_store_info, unsigned int i,
2412 bool all_integer_cst_p, unsigned int last_order,
2413 unsigned HOST_WIDE_INT end)
2414 {
2415 unsigned int len = m_store_info.length ();
2416 for (++i; i < len; ++i)
2417 {
2418 store_immediate_info *info = m_store_info[i];
2419 if (info->bitpos >= end)
2420 break;
2421 if (info->order < last_order
2422 && (!all_integer_cst_p || info->rhs_code != INTEGER_CST))
2423 return false;
2424 }
2425 return true;
2426 }
2427
2428 /* Return true if m_store_info[first] and at least one following store
2429 form a group which store try_size bitsize value which is byte swapped
2430 from a memory load or some value, or identity from some value.
2431 This uses the bswap pass APIs. */
2432
2433 bool
2434 imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
2435 unsigned int first,
2436 unsigned int try_size)
2437 {
2438 unsigned int len = m_store_info.length (), last = first;
2439 unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize;
2440 if (width >= try_size)
2441 return false;
2442 for (unsigned int i = first + 1; i < len; ++i)
2443 {
2444 if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width
2445 || m_store_info[i]->lp_nr != merged_store->lp_nr
2446 || m_store_info[i]->ins_stmt == NULL)
2447 return false;
2448 width += m_store_info[i]->bitsize;
2449 if (width >= try_size)
2450 {
2451 last = i;
2452 break;
2453 }
2454 }
2455 if (width != try_size)
2456 return false;
2457
2458 bool allow_unaligned
2459 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
2460 /* Punt if the combined store would not be aligned and we need alignment. */
2461 if (!allow_unaligned)
2462 {
2463 unsigned int align = merged_store->align;
2464 unsigned HOST_WIDE_INT align_base = merged_store->align_base;
2465 for (unsigned int i = first + 1; i <= last; ++i)
2466 {
2467 unsigned int this_align;
2468 unsigned HOST_WIDE_INT align_bitpos = 0;
2469 get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt),
2470 &this_align, &align_bitpos);
2471 if (this_align > align)
2472 {
2473 align = this_align;
2474 align_base = m_store_info[i]->bitpos - align_bitpos;
2475 }
2476 }
2477 unsigned HOST_WIDE_INT align_bitpos
2478 = (m_store_info[first]->bitpos - align_base) & (align - 1);
2479 if (align_bitpos)
2480 align = least_bit_hwi (align_bitpos);
2481 if (align < try_size)
2482 return false;
2483 }
2484
2485 tree type;
2486 switch (try_size)
2487 {
2488 case 16: type = uint16_type_node; break;
2489 case 32: type = uint32_type_node; break;
2490 case 64: type = uint64_type_node; break;
2491 default: gcc_unreachable ();
2492 }
2493 struct symbolic_number n;
2494 gimple *ins_stmt = NULL;
2495 int vuse_store = -1;
2496 unsigned int first_order = merged_store->first_order;
2497 unsigned int last_order = merged_store->last_order;
2498 gimple *first_stmt = merged_store->first_stmt;
2499 gimple *last_stmt = merged_store->last_stmt;
2500 unsigned HOST_WIDE_INT end = merged_store->start + merged_store->width;
2501 store_immediate_info *infof = m_store_info[first];
2502
2503 for (unsigned int i = first; i <= last; ++i)
2504 {
2505 store_immediate_info *info = m_store_info[i];
2506 struct symbolic_number this_n = info->n;
2507 this_n.type = type;
2508 if (!this_n.base_addr)
2509 this_n.range = try_size / BITS_PER_UNIT;
2510 else
2511 /* Update vuse in case it has changed by output_merged_stores. */
2512 this_n.vuse = gimple_vuse (info->ins_stmt);
2513 unsigned int bitpos = info->bitpos - infof->bitpos;
2514 if (!do_shift_rotate (LSHIFT_EXPR, &this_n,
2515 BYTES_BIG_ENDIAN
2516 ? try_size - info->bitsize - bitpos
2517 : bitpos))
2518 return false;
2519 if (this_n.base_addr && vuse_store)
2520 {
2521 unsigned int j;
2522 for (j = first; j <= last; ++j)
2523 if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt))
2524 break;
2525 if (j > last)
2526 {
2527 if (vuse_store == 1)
2528 return false;
2529 vuse_store = 0;
2530 }
2531 }
2532 if (i == first)
2533 {
2534 n = this_n;
2535 ins_stmt = info->ins_stmt;
2536 }
2537 else
2538 {
2539 if (n.base_addr && n.vuse != this_n.vuse)
2540 {
2541 if (vuse_store == 0)
2542 return false;
2543 vuse_store = 1;
2544 }
2545 if (info->order > last_order)
2546 {
2547 last_order = info->order;
2548 last_stmt = info->stmt;
2549 }
2550 else if (info->order < first_order)
2551 {
2552 first_order = info->order;
2553 first_stmt = info->stmt;
2554 }
2555 end = MAX (end, info->bitpos + info->bitsize);
2556
2557 ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt,
2558 &this_n, &n);
2559 if (ins_stmt == NULL)
2560 return false;
2561 }
2562 }
2563
2564 uint64_t cmpxchg, cmpnop;
2565 find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop);
2566
2567 /* A complete byte swap should make the symbolic number to start with
2568 the largest digit in the highest order byte. Unchanged symbolic
2569 number indicates a read with same endianness as target architecture. */
2570 if (n.n != cmpnop && n.n != cmpxchg)
2571 return false;
2572
2573 if (n.base_addr == NULL_TREE && !is_gimple_val (n.src))
2574 return false;
2575
2576 if (!check_no_overlap (m_store_info, last, false, last_order, end))
2577 return false;
2578
2579 /* Don't handle memory copy this way if normal non-bswap processing
2580 would handle it too. */
2581 if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1)
2582 {
2583 unsigned int i;
2584 for (i = first; i <= last; ++i)
2585 if (m_store_info[i]->rhs_code != MEM_REF)
2586 break;
2587 if (i == last + 1)
2588 return false;
2589 }
2590
2591 if (n.n == cmpxchg)
2592 switch (try_size)
2593 {
2594 case 16:
2595 /* Will emit LROTATE_EXPR. */
2596 break;
2597 case 32:
2598 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2599 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
2600 break;
2601 return false;
2602 case 64:
2603 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2604 && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing)
2605 break;
2606 return false;
2607 default:
2608 gcc_unreachable ();
2609 }
2610
2611 if (!allow_unaligned && n.base_addr)
2612 {
2613 unsigned int align = get_object_alignment (n.src);
2614 if (align < try_size)
2615 return false;
2616 }
2617
2618 /* If each load has vuse of the corresponding store, need to verify
2619 the loads can be sunk right before the last store. */
2620 if (vuse_store == 1)
2621 {
2622 auto_vec<tree, 64> refs;
2623 for (unsigned int i = first; i <= last; ++i)
2624 gather_bswap_load_refs (&refs,
2625 gimple_assign_rhs1 (m_store_info[i]->stmt));
2626
2627 unsigned int i;
2628 tree ref;
2629 FOR_EACH_VEC_ELT (refs, i, ref)
2630 if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref))
2631 return false;
2632 n.vuse = NULL_TREE;
2633 }
2634
2635 infof->n = n;
2636 infof->ins_stmt = ins_stmt;
2637 for (unsigned int i = first; i <= last; ++i)
2638 {
2639 m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR;
2640 m_store_info[i]->ops[0].base_addr = NULL_TREE;
2641 m_store_info[i]->ops[1].base_addr = NULL_TREE;
2642 if (i != first)
2643 merged_store->merge_into (m_store_info[i]);
2644 }
2645
2646 return true;
2647 }
2648
2649 /* Go through the candidate stores recorded in m_store_info and merge them
2650 into merged_store_group objects recorded into m_merged_store_groups
2651 representing the widened stores. Return true if coalescing was successful
2652 and the number of widened stores is fewer than the original number
2653 of stores. */
2654
2655 bool
2656 imm_store_chain_info::coalesce_immediate_stores ()
2657 {
2658 /* Anything less can't be processed. */
2659 if (m_store_info.length () < 2)
2660 return false;
2661
2662 if (dump_file && (dump_flags & TDF_DETAILS))
2663 fprintf (dump_file, "Attempting to coalesce %u stores in chain\n",
2664 m_store_info.length ());
2665
2666 store_immediate_info *info;
2667 unsigned int i, ignore = 0;
2668
2669 /* Order the stores by the bitposition they write to. */
2670 m_store_info.qsort (sort_by_bitpos);
2671
2672 info = m_store_info[0];
2673 merged_store_group *merged_store = new merged_store_group (info);
2674 if (dump_file && (dump_flags & TDF_DETAILS))
2675 fputs ("New store group\n", dump_file);
2676
2677 FOR_EACH_VEC_ELT (m_store_info, i, info)
2678 {
2679 unsigned HOST_WIDE_INT new_bitregion_start, new_bitregion_end;
2680
2681 if (i <= ignore)
2682 goto done;
2683
2684 /* First try to handle group of stores like:
2685 p[0] = data >> 24;
2686 p[1] = data >> 16;
2687 p[2] = data >> 8;
2688 p[3] = data;
2689 using the bswap framework. */
2690 if (info->bitpos == merged_store->start + merged_store->width
2691 && merged_store->stores.length () == 1
2692 && merged_store->stores[0]->ins_stmt != NULL
2693 && info->lp_nr == merged_store->lp_nr
2694 && info->ins_stmt != NULL)
2695 {
2696 unsigned int try_size;
2697 for (try_size = 64; try_size >= 16; try_size >>= 1)
2698 if (try_coalesce_bswap (merged_store, i - 1, try_size))
2699 break;
2700
2701 if (try_size >= 16)
2702 {
2703 ignore = i + merged_store->stores.length () - 1;
2704 m_merged_store_groups.safe_push (merged_store);
2705 if (ignore < m_store_info.length ())
2706 merged_store = new merged_store_group (m_store_info[ignore]);
2707 else
2708 merged_store = NULL;
2709 goto done;
2710 }
2711 }
2712
2713 new_bitregion_start
2714 = MIN (merged_store->bitregion_start, info->bitregion_start);
2715 new_bitregion_end
2716 = MAX (merged_store->bitregion_end, info->bitregion_end);
2717
2718 if (info->order >= merged_store->first_nonmergeable_order
2719 || (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT)
2720 > (unsigned) param_store_merging_max_size))
2721 ;
2722
2723 /* |---store 1---|
2724 |---store 2---|
2725 Overlapping stores. */
2726 else if (IN_RANGE (info->bitpos, merged_store->start,
2727 merged_store->start + merged_store->width - 1)
2728 /* |---store 1---||---store 2---|
2729 Handle also the consecutive INTEGER_CST stores case here,
2730 as we have here the code to deal with overlaps. */
2731 || (info->bitregion_start <= merged_store->bitregion_end
2732 && info->rhs_code == INTEGER_CST
2733 && merged_store->only_constants
2734 && merged_store->can_be_merged_into (info)))
2735 {
2736 /* Only allow overlapping stores of constants. */
2737 if (info->rhs_code == INTEGER_CST
2738 && merged_store->only_constants
2739 && info->lp_nr == merged_store->lp_nr)
2740 {
2741 unsigned int last_order
2742 = MAX (merged_store->last_order, info->order);
2743 unsigned HOST_WIDE_INT end
2744 = MAX (merged_store->start + merged_store->width,
2745 info->bitpos + info->bitsize);
2746 if (check_no_overlap (m_store_info, i, true, last_order, end))
2747 {
2748 /* check_no_overlap call above made sure there are no
2749 overlapping stores with non-INTEGER_CST rhs_code
2750 in between the first and last of the stores we've
2751 just merged. If there are any INTEGER_CST rhs_code
2752 stores in between, we need to merge_overlapping them
2753 even if in the sort_by_bitpos order there are other
2754 overlapping stores in between. Keep those stores as is.
2755 Example:
2756 MEM[(int *)p_28] = 0;
2757 MEM[(char *)p_28 + 3B] = 1;
2758 MEM[(char *)p_28 + 1B] = 2;
2759 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
2760 We can't merge the zero store with the store of two and
2761 not merge anything else, because the store of one is
2762 in the original order in between those two, but in
2763 store_by_bitpos order it comes after the last store that
2764 we can't merge with them. We can merge the first 3 stores
2765 and keep the last store as is though. */
2766 unsigned int len = m_store_info.length ();
2767 unsigned int try_order = last_order;
2768 unsigned int first_nonmergeable_order;
2769 unsigned int k;
2770 bool last_iter = false;
2771 int attempts = 0;
2772 do
2773 {
2774 unsigned int max_order = 0;
2775 unsigned first_nonmergeable_int_order = ~0U;
2776 unsigned HOST_WIDE_INT this_end = end;
2777 k = i;
2778 first_nonmergeable_order = ~0U;
2779 for (unsigned int j = i + 1; j < len; ++j)
2780 {
2781 store_immediate_info *info2 = m_store_info[j];
2782 if (info2->bitpos >= this_end)
2783 break;
2784 if (info2->order < try_order)
2785 {
2786 if (info2->rhs_code != INTEGER_CST
2787 || info2->lp_nr != merged_store->lp_nr)
2788 {
2789 /* Normally check_no_overlap makes sure this
2790 doesn't happen, but if end grows below,
2791 then we need to process more stores than
2792 check_no_overlap verified. Example:
2793 MEM[(int *)p_5] = 0;
2794 MEM[(short *)p_5 + 3B] = 1;
2795 MEM[(char *)p_5 + 4B] = _9;
2796 MEM[(char *)p_5 + 2B] = 2; */
2797 k = 0;
2798 break;
2799 }
2800 k = j;
2801 this_end = MAX (this_end,
2802 info2->bitpos + info2->bitsize);
2803 }
2804 else if (info2->rhs_code == INTEGER_CST
2805 && info2->lp_nr == merged_store->lp_nr
2806 && !last_iter)
2807 {
2808 max_order = MAX (max_order, info2->order + 1);
2809 first_nonmergeable_int_order
2810 = MIN (first_nonmergeable_int_order,
2811 info2->order);
2812 }
2813 else
2814 first_nonmergeable_order
2815 = MIN (first_nonmergeable_order, info2->order);
2816 }
2817 if (k == 0)
2818 {
2819 if (last_order == try_order)
2820 break;
2821 /* If this failed, but only because we grew
2822 try_order, retry with the last working one,
2823 so that we merge at least something. */
2824 try_order = last_order;
2825 last_iter = true;
2826 continue;
2827 }
2828 last_order = try_order;
2829 /* Retry with a larger try_order to see if we could
2830 merge some further INTEGER_CST stores. */
2831 if (max_order
2832 && (first_nonmergeable_int_order
2833 < first_nonmergeable_order))
2834 {
2835 try_order = MIN (max_order,
2836 first_nonmergeable_order);
2837 try_order
2838 = MIN (try_order,
2839 merged_store->first_nonmergeable_order);
2840 if (try_order > last_order && ++attempts < 16)
2841 continue;
2842 }
2843 first_nonmergeable_order
2844 = MIN (first_nonmergeable_order,
2845 first_nonmergeable_int_order);
2846 end = this_end;
2847 break;
2848 }
2849 while (1);
2850
2851 if (k != 0)
2852 {
2853 merged_store->merge_overlapping (info);
2854
2855 merged_store->first_nonmergeable_order
2856 = MIN (merged_store->first_nonmergeable_order,
2857 first_nonmergeable_order);
2858
2859 for (unsigned int j = i + 1; j <= k; j++)
2860 {
2861 store_immediate_info *info2 = m_store_info[j];
2862 gcc_assert (info2->bitpos < end);
2863 if (info2->order < last_order)
2864 {
2865 gcc_assert (info2->rhs_code == INTEGER_CST);
2866 if (info != info2)
2867 merged_store->merge_overlapping (info2);
2868 }
2869 /* Other stores are kept and not merged in any
2870 way. */
2871 }
2872 ignore = k;
2873 goto done;
2874 }
2875 }
2876 }
2877 }
2878 /* |---store 1---||---store 2---|
2879 This store is consecutive to the previous one.
2880 Merge it into the current store group. There can be gaps in between
2881 the stores, but there can't be gaps in between bitregions. */
2882 else if (info->bitregion_start <= merged_store->bitregion_end
2883 && merged_store->can_be_merged_into (info))
2884 {
2885 store_immediate_info *infof = merged_store->stores[0];
2886
2887 /* All the rhs_code ops that take 2 operands are commutative,
2888 swap the operands if it could make the operands compatible. */
2889 if (infof->ops[0].base_addr
2890 && infof->ops[1].base_addr
2891 && info->ops[0].base_addr
2892 && info->ops[1].base_addr
2893 && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos,
2894 info->bitpos - infof->bitpos)
2895 && operand_equal_p (info->ops[1].base_addr,
2896 infof->ops[0].base_addr, 0))
2897 {
2898 std::swap (info->ops[0], info->ops[1]);
2899 info->ops_swapped_p = true;
2900 }
2901 if (check_no_overlap (m_store_info, i, false,
2902 MAX (merged_store->last_order, info->order),
2903 MAX (merged_store->start + merged_store->width,
2904 info->bitpos + info->bitsize)))
2905 {
2906 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
2907 if (info->rhs_code == MEM_REF && infof->rhs_code != MEM_REF)
2908 {
2909 info->rhs_code = BIT_INSERT_EXPR;
2910 info->ops[0].val = gimple_assign_rhs1 (info->stmt);
2911 info->ops[0].base_addr = NULL_TREE;
2912 }
2913 else if (infof->rhs_code == MEM_REF && info->rhs_code != MEM_REF)
2914 {
2915 store_immediate_info *infoj;
2916 unsigned int j;
2917 FOR_EACH_VEC_ELT (merged_store->stores, j, infoj)
2918 {
2919 infoj->rhs_code = BIT_INSERT_EXPR;
2920 infoj->ops[0].val = gimple_assign_rhs1 (infoj->stmt);
2921 infoj->ops[0].base_addr = NULL_TREE;
2922 }
2923 }
2924 if ((infof->ops[0].base_addr
2925 ? compatible_load_p (merged_store, info, base_addr, 0)
2926 : !info->ops[0].base_addr)
2927 && (infof->ops[1].base_addr
2928 ? compatible_load_p (merged_store, info, base_addr, 1)
2929 : !info->ops[1].base_addr))
2930 {
2931 merged_store->merge_into (info);
2932 goto done;
2933 }
2934 }
2935 }
2936
2937 /* |---store 1---| <gap> |---store 2---|.
2938 Gap between stores or the rhs not compatible. Start a new group. */
2939
2940 /* Try to apply all the stores recorded for the group to determine
2941 the bitpattern they write and discard it if that fails.
2942 This will also reject single-store groups. */
2943 if (merged_store->apply_stores ())
2944 m_merged_store_groups.safe_push (merged_store);
2945 else
2946 delete merged_store;
2947
2948 merged_store = new merged_store_group (info);
2949 if (dump_file && (dump_flags & TDF_DETAILS))
2950 fputs ("New store group\n", dump_file);
2951
2952 done:
2953 if (dump_file && (dump_flags & TDF_DETAILS))
2954 {
2955 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
2956 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:",
2957 i, info->bitsize, info->bitpos);
2958 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
2959 fputc ('\n', dump_file);
2960 }
2961 }
2962
2963 /* Record or discard the last store group. */
2964 if (merged_store)
2965 {
2966 if (merged_store->apply_stores ())
2967 m_merged_store_groups.safe_push (merged_store);
2968 else
2969 delete merged_store;
2970 }
2971
2972 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
2973
2974 bool success
2975 = !m_merged_store_groups.is_empty ()
2976 && m_merged_store_groups.length () < m_store_info.length ();
2977
2978 if (success && dump_file)
2979 fprintf (dump_file, "Coalescing successful!\nMerged into %u stores\n",
2980 m_merged_store_groups.length ());
2981
2982 return success;
2983 }
2984
2985 /* Return the type to use for the merged stores or loads described by STMTS.
2986 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
2987 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
2988 of the MEM_REFs if any. */
2989
2990 static tree
2991 get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
2992 unsigned short *cliquep, unsigned short *basep)
2993 {
2994 gimple *stmt;
2995 unsigned int i;
2996 tree type = NULL_TREE;
2997 tree ret = NULL_TREE;
2998 *cliquep = 0;
2999 *basep = 0;
3000
3001 FOR_EACH_VEC_ELT (stmts, i, stmt)
3002 {
3003 tree ref = is_load ? gimple_assign_rhs1 (stmt)
3004 : gimple_assign_lhs (stmt);
3005 tree type1 = reference_alias_ptr_type (ref);
3006 tree base = get_base_address (ref);
3007
3008 if (i == 0)
3009 {
3010 if (TREE_CODE (base) == MEM_REF)
3011 {
3012 *cliquep = MR_DEPENDENCE_CLIQUE (base);
3013 *basep = MR_DEPENDENCE_BASE (base);
3014 }
3015 ret = type = type1;
3016 continue;
3017 }
3018 if (!alias_ptr_types_compatible_p (type, type1))
3019 ret = ptr_type_node;
3020 if (TREE_CODE (base) != MEM_REF
3021 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
3022 || *basep != MR_DEPENDENCE_BASE (base))
3023 {
3024 *cliquep = 0;
3025 *basep = 0;
3026 }
3027 }
3028 return ret;
3029 }
3030
3031 /* Return the location_t information we can find among the statements
3032 in STMTS. */
3033
3034 static location_t
3035 get_location_for_stmts (vec<gimple *> &stmts)
3036 {
3037 gimple *stmt;
3038 unsigned int i;
3039
3040 FOR_EACH_VEC_ELT (stmts, i, stmt)
3041 if (gimple_has_location (stmt))
3042 return gimple_location (stmt);
3043
3044 return UNKNOWN_LOCATION;
3045 }
3046
3047 /* Used to decribe a store resulting from splitting a wide store in smaller
3048 regularly-sized stores in split_group. */
3049
3050 class split_store
3051 {
3052 public:
3053 unsigned HOST_WIDE_INT bytepos;
3054 unsigned HOST_WIDE_INT size;
3055 unsigned HOST_WIDE_INT align;
3056 auto_vec<store_immediate_info *> orig_stores;
3057 /* True if there is a single orig stmt covering the whole split store. */
3058 bool orig;
3059 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
3060 unsigned HOST_WIDE_INT);
3061 };
3062
3063 /* Simple constructor. */
3064
3065 split_store::split_store (unsigned HOST_WIDE_INT bp,
3066 unsigned HOST_WIDE_INT sz,
3067 unsigned HOST_WIDE_INT al)
3068 : bytepos (bp), size (sz), align (al), orig (false)
3069 {
3070 orig_stores.create (0);
3071 }
3072
3073 /* Record all stores in GROUP that write to the region starting at BITPOS and
3074 is of size BITSIZE. Record infos for such statements in STORES if
3075 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
3076 if there is exactly one original store in the range (in that case ignore
3077 clobber stmts, unless there are only clobber stmts). */
3078
3079 static store_immediate_info *
3080 find_constituent_stores (class merged_store_group *group,
3081 vec<store_immediate_info *> *stores,
3082 unsigned int *first,
3083 unsigned HOST_WIDE_INT bitpos,
3084 unsigned HOST_WIDE_INT bitsize)
3085 {
3086 store_immediate_info *info, *ret = NULL;
3087 unsigned int i;
3088 bool second = false;
3089 bool update_first = true;
3090 unsigned HOST_WIDE_INT end = bitpos + bitsize;
3091 for (i = *first; group->stores.iterate (i, &info); ++i)
3092 {
3093 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
3094 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
3095 if (stmt_end <= bitpos)
3096 {
3097 /* BITPOS passed to this function never decreases from within the
3098 same split_group call, so optimize and don't scan info records
3099 which are known to end before or at BITPOS next time.
3100 Only do it if all stores before this one also pass this. */
3101 if (update_first)
3102 *first = i + 1;
3103 continue;
3104 }
3105 else
3106 update_first = false;
3107
3108 /* The stores in GROUP are ordered by bitposition so if we're past
3109 the region for this group return early. */
3110 if (stmt_start >= end)
3111 return ret;
3112
3113 if (gimple_clobber_p (info->stmt))
3114 {
3115 if (stores)
3116 stores->safe_push (info);
3117 if (ret == NULL)
3118 ret = info;
3119 continue;
3120 }
3121 if (stores)
3122 {
3123 stores->safe_push (info);
3124 if (ret && !gimple_clobber_p (ret->stmt))
3125 {
3126 ret = NULL;
3127 second = true;
3128 }
3129 }
3130 else if (ret && !gimple_clobber_p (ret->stmt))
3131 return NULL;
3132 if (!second)
3133 ret = info;
3134 }
3135 return ret;
3136 }
3137
3138 /* Return how many SSA_NAMEs used to compute value to store in the INFO
3139 store have multiple uses. If any SSA_NAME has multiple uses, also
3140 count statements needed to compute it. */
3141
3142 static unsigned
3143 count_multiple_uses (store_immediate_info *info)
3144 {
3145 gimple *stmt = info->stmt;
3146 unsigned ret = 0;
3147 switch (info->rhs_code)
3148 {
3149 case INTEGER_CST:
3150 return 0;
3151 case BIT_AND_EXPR:
3152 case BIT_IOR_EXPR:
3153 case BIT_XOR_EXPR:
3154 if (info->bit_not_p)
3155 {
3156 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3157 ret = 1; /* Fall through below to return
3158 the BIT_NOT_EXPR stmt and then
3159 BIT_{AND,IOR,XOR}_EXPR and anything it
3160 uses. */
3161 else
3162 /* stmt is after this the BIT_NOT_EXPR. */
3163 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3164 }
3165 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3166 {
3167 ret += 1 + info->ops[0].bit_not_p;
3168 if (info->ops[1].base_addr)
3169 ret += 1 + info->ops[1].bit_not_p;
3170 return ret + 1;
3171 }
3172 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3173 /* stmt is now the BIT_*_EXPR. */
3174 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3175 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
3176 else if (info->ops[info->ops_swapped_p].bit_not_p)
3177 {
3178 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3179 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3180 ++ret;
3181 }
3182 if (info->ops[1].base_addr == NULL_TREE)
3183 {
3184 gcc_checking_assert (!info->ops_swapped_p);
3185 return ret;
3186 }
3187 if (!has_single_use (gimple_assign_rhs2 (stmt)))
3188 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
3189 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
3190 {
3191 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
3192 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3193 ++ret;
3194 }
3195 return ret;
3196 case MEM_REF:
3197 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3198 return 1 + info->ops[0].bit_not_p;
3199 else if (info->ops[0].bit_not_p)
3200 {
3201 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3202 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3203 return 1;
3204 }
3205 return 0;
3206 case BIT_INSERT_EXPR:
3207 return has_single_use (gimple_assign_rhs1 (stmt)) ? 0 : 1;
3208 default:
3209 gcc_unreachable ();
3210 }
3211 }
3212
3213 /* Split a merged store described by GROUP by populating the SPLIT_STORES
3214 vector (if non-NULL) with split_store structs describing the byte offset
3215 (from the base), the bit size and alignment of each store as well as the
3216 original statements involved in each such split group.
3217 This is to separate the splitting strategy from the statement
3218 building/emission/linking done in output_merged_store.
3219 Return number of new stores.
3220 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3221 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3222 BZERO_FIRST may be true only when the first store covers the whole group
3223 and clears it; if BZERO_FIRST is true, keep that first store in the set
3224 unmodified and emit further stores for the overrides only.
3225 If SPLIT_STORES is NULL, it is just a dry run to count number of
3226 new stores. */
3227
3228 static unsigned int
3229 split_group (merged_store_group *group, bool allow_unaligned_store,
3230 bool allow_unaligned_load, bool bzero_first,
3231 vec<split_store *> *split_stores,
3232 unsigned *total_orig,
3233 unsigned *total_new)
3234 {
3235 unsigned HOST_WIDE_INT pos = group->bitregion_start;
3236 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
3237 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
3238 unsigned HOST_WIDE_INT group_align = group->align;
3239 unsigned HOST_WIDE_INT align_base = group->align_base;
3240 unsigned HOST_WIDE_INT group_load_align = group_align;
3241 bool any_orig = false;
3242
3243 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
3244
3245 if (group->stores[0]->rhs_code == LROTATE_EXPR
3246 || group->stores[0]->rhs_code == NOP_EXPR)
3247 {
3248 gcc_assert (!bzero_first);
3249 /* For bswap framework using sets of stores, all the checking
3250 has been done earlier in try_coalesce_bswap and needs to be
3251 emitted as a single store. */
3252 if (total_orig)
3253 {
3254 /* Avoid the old/new stmt count heuristics. It should be
3255 always beneficial. */
3256 total_new[0] = 1;
3257 total_orig[0] = 2;
3258 }
3259
3260 if (split_stores)
3261 {
3262 unsigned HOST_WIDE_INT align_bitpos
3263 = (group->start - align_base) & (group_align - 1);
3264 unsigned HOST_WIDE_INT align = group_align;
3265 if (align_bitpos)
3266 align = least_bit_hwi (align_bitpos);
3267 bytepos = group->start / BITS_PER_UNIT;
3268 split_store *store
3269 = new split_store (bytepos, group->width, align);
3270 unsigned int first = 0;
3271 find_constituent_stores (group, &store->orig_stores,
3272 &first, group->start, group->width);
3273 split_stores->safe_push (store);
3274 }
3275
3276 return 1;
3277 }
3278
3279 unsigned int ret = 0, first = 0;
3280 unsigned HOST_WIDE_INT try_pos = bytepos;
3281
3282 if (total_orig)
3283 {
3284 unsigned int i;
3285 store_immediate_info *info = group->stores[0];
3286
3287 total_new[0] = 0;
3288 total_orig[0] = 1; /* The orig store. */
3289 info = group->stores[0];
3290 if (info->ops[0].base_addr)
3291 total_orig[0]++;
3292 if (info->ops[1].base_addr)
3293 total_orig[0]++;
3294 switch (info->rhs_code)
3295 {
3296 case BIT_AND_EXPR:
3297 case BIT_IOR_EXPR:
3298 case BIT_XOR_EXPR:
3299 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
3300 break;
3301 default:
3302 break;
3303 }
3304 total_orig[0] *= group->stores.length ();
3305
3306 FOR_EACH_VEC_ELT (group->stores, i, info)
3307 {
3308 total_new[0] += count_multiple_uses (info);
3309 total_orig[0] += (info->bit_not_p
3310 + info->ops[0].bit_not_p
3311 + info->ops[1].bit_not_p);
3312 }
3313 }
3314
3315 if (!allow_unaligned_load)
3316 for (int i = 0; i < 2; ++i)
3317 if (group->load_align[i])
3318 group_load_align = MIN (group_load_align, group->load_align[i]);
3319
3320 if (bzero_first)
3321 {
3322 store_immediate_info *gstore;
3323 FOR_EACH_VEC_ELT (group->stores, first, gstore)
3324 if (!gimple_clobber_p (gstore->stmt))
3325 break;
3326 ++first;
3327 ret = 1;
3328 if (split_stores)
3329 {
3330 split_store *store
3331 = new split_store (bytepos, gstore->bitsize, align_base);
3332 store->orig_stores.safe_push (gstore);
3333 store->orig = true;
3334 any_orig = true;
3335 split_stores->safe_push (store);
3336 }
3337 }
3338
3339 while (size > 0)
3340 {
3341 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
3342 && (group->mask[try_pos - bytepos] == (unsigned char) ~0U
3343 || (bzero_first && group->val[try_pos - bytepos] == 0)))
3344 {
3345 /* Skip padding bytes. */
3346 ++try_pos;
3347 size -= BITS_PER_UNIT;
3348 continue;
3349 }
3350
3351 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
3352 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
3353 unsigned HOST_WIDE_INT align_bitpos
3354 = (try_bitpos - align_base) & (group_align - 1);
3355 unsigned HOST_WIDE_INT align = group_align;
3356 bool found_orig = false;
3357 if (align_bitpos)
3358 align = least_bit_hwi (align_bitpos);
3359 if (!allow_unaligned_store)
3360 try_size = MIN (try_size, align);
3361 if (!allow_unaligned_load)
3362 {
3363 /* If we can't do or don't want to do unaligned stores
3364 as well as loads, we need to take the loads into account
3365 as well. */
3366 unsigned HOST_WIDE_INT load_align = group_load_align;
3367 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
3368 if (align_bitpos)
3369 load_align = least_bit_hwi (align_bitpos);
3370 for (int i = 0; i < 2; ++i)
3371 if (group->load_align[i])
3372 {
3373 align_bitpos
3374 = known_alignment (try_bitpos
3375 - group->stores[0]->bitpos
3376 + group->stores[0]->ops[i].bitpos
3377 - group->load_align_base[i]);
3378 if (align_bitpos & (group_load_align - 1))
3379 {
3380 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
3381 load_align = MIN (load_align, a);
3382 }
3383 }
3384 try_size = MIN (try_size, load_align);
3385 }
3386 store_immediate_info *info
3387 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
3388 if (info && !gimple_clobber_p (info->stmt))
3389 {
3390 /* If there is just one original statement for the range, see if
3391 we can just reuse the original store which could be even larger
3392 than try_size. */
3393 unsigned HOST_WIDE_INT stmt_end
3394 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
3395 info = find_constituent_stores (group, NULL, &first, try_bitpos,
3396 stmt_end - try_bitpos);
3397 if (info && info->bitpos >= try_bitpos)
3398 {
3399 store_immediate_info *info2 = NULL;
3400 unsigned int first_copy = first;
3401 if (info->bitpos > try_bitpos
3402 && stmt_end - try_bitpos <= try_size)
3403 {
3404 info2 = find_constituent_stores (group, NULL, &first_copy,
3405 try_bitpos,
3406 info->bitpos - try_bitpos);
3407 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3408 }
3409 if (info2 == NULL && stmt_end - try_bitpos < try_size)
3410 {
3411 info2 = find_constituent_stores (group, NULL, &first_copy,
3412 stmt_end,
3413 (try_bitpos + try_size)
3414 - stmt_end);
3415 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3416 }
3417 if (info2 == NULL)
3418 {
3419 try_size = stmt_end - try_bitpos;
3420 found_orig = true;
3421 goto found;
3422 }
3423 }
3424 }
3425
3426 /* Approximate store bitsize for the case when there are no padding
3427 bits. */
3428 while (try_size > size)
3429 try_size /= 2;
3430 /* Now look for whole padding bytes at the end of that bitsize. */
3431 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
3432 if (group->mask[try_pos - bytepos + nonmasked - 1]
3433 != (unsigned char) ~0U
3434 && (!bzero_first
3435 || group->val[try_pos - bytepos + nonmasked - 1] != 0))
3436 break;
3437 if (nonmasked == 0 || (info && gimple_clobber_p (info->stmt)))
3438 {
3439 /* If entire try_size range is padding, skip it. */
3440 try_pos += try_size / BITS_PER_UNIT;
3441 size -= try_size;
3442 continue;
3443 }
3444 /* Otherwise try to decrease try_size if second half, last 3 quarters
3445 etc. are padding. */
3446 nonmasked *= BITS_PER_UNIT;
3447 while (nonmasked <= try_size / 2)
3448 try_size /= 2;
3449 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
3450 {
3451 /* Now look for whole padding bytes at the start of that bitsize. */
3452 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
3453 for (masked = 0; masked < try_bytesize; ++masked)
3454 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U
3455 && (!bzero_first
3456 || group->val[try_pos - bytepos + masked] != 0))
3457 break;
3458 masked *= BITS_PER_UNIT;
3459 gcc_assert (masked < try_size);
3460 if (masked >= try_size / 2)
3461 {
3462 while (masked >= try_size / 2)
3463 {
3464 try_size /= 2;
3465 try_pos += try_size / BITS_PER_UNIT;
3466 size -= try_size;
3467 masked -= try_size;
3468 }
3469 /* Need to recompute the alignment, so just retry at the new
3470 position. */
3471 continue;
3472 }
3473 }
3474
3475 found:
3476 ++ret;
3477
3478 if (split_stores)
3479 {
3480 split_store *store
3481 = new split_store (try_pos, try_size, align);
3482 info = find_constituent_stores (group, &store->orig_stores,
3483 &first, try_bitpos, try_size);
3484 if (info
3485 && !gimple_clobber_p (info->stmt)
3486 && info->bitpos >= try_bitpos
3487 && info->bitpos + info->bitsize <= try_bitpos + try_size
3488 && (store->orig_stores.length () == 1
3489 || found_orig
3490 || (info->bitpos == try_bitpos
3491 && (info->bitpos + info->bitsize
3492 == try_bitpos + try_size))))
3493 {
3494 store->orig = true;
3495 any_orig = true;
3496 }
3497 split_stores->safe_push (store);
3498 }
3499
3500 try_pos += try_size / BITS_PER_UNIT;
3501 size -= try_size;
3502 }
3503
3504 if (total_orig)
3505 {
3506 unsigned int i;
3507 split_store *store;
3508 /* If we are reusing some original stores and any of the
3509 original SSA_NAMEs had multiple uses, we need to subtract
3510 those now before we add the new ones. */
3511 if (total_new[0] && any_orig)
3512 {
3513 FOR_EACH_VEC_ELT (*split_stores, i, store)
3514 if (store->orig)
3515 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
3516 }
3517 total_new[0] += ret; /* The new store. */
3518 store_immediate_info *info = group->stores[0];
3519 if (info->ops[0].base_addr)
3520 total_new[0] += ret;
3521 if (info->ops[1].base_addr)
3522 total_new[0] += ret;
3523 switch (info->rhs_code)
3524 {
3525 case BIT_AND_EXPR:
3526 case BIT_IOR_EXPR:
3527 case BIT_XOR_EXPR:
3528 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
3529 break;
3530 default:
3531 break;
3532 }
3533 FOR_EACH_VEC_ELT (*split_stores, i, store)
3534 {
3535 unsigned int j;
3536 bool bit_not_p[3] = { false, false, false };
3537 /* If all orig_stores have certain bit_not_p set, then
3538 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3539 If some orig_stores have certain bit_not_p set, then
3540 we'd use a BIT_XOR_EXPR with a mask and need to account for
3541 it. */
3542 FOR_EACH_VEC_ELT (store->orig_stores, j, info)
3543 {
3544 if (info->ops[0].bit_not_p)
3545 bit_not_p[0] = true;
3546 if (info->ops[1].bit_not_p)
3547 bit_not_p[1] = true;
3548 if (info->bit_not_p)
3549 bit_not_p[2] = true;
3550 }
3551 total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
3552 }
3553
3554 }
3555
3556 return ret;
3557 }
3558
3559 /* Return the operation through which the operand IDX (if < 2) or
3560 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3561 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3562 the bits should be xored with mask. */
3563
3564 static enum tree_code
3565 invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
3566 {
3567 unsigned int i;
3568 store_immediate_info *info;
3569 unsigned int cnt = 0;
3570 bool any_paddings = false;
3571 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3572 {
3573 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3574 if (bit_not_p)
3575 {
3576 ++cnt;
3577 tree lhs = gimple_assign_lhs (info->stmt);
3578 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3579 && TYPE_PRECISION (TREE_TYPE (lhs)) < info->bitsize)
3580 any_paddings = true;
3581 }
3582 }
3583 mask = NULL_TREE;
3584 if (cnt == 0)
3585 return NOP_EXPR;
3586 if (cnt == split_store->orig_stores.length () && !any_paddings)
3587 return BIT_NOT_EXPR;
3588
3589 unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
3590 unsigned buf_size = split_store->size / BITS_PER_UNIT;
3591 unsigned char *buf
3592 = XALLOCAVEC (unsigned char, buf_size);
3593 memset (buf, ~0U, buf_size);
3594 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3595 {
3596 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3597 if (!bit_not_p)
3598 continue;
3599 /* Clear regions with bit_not_p and invert afterwards, rather than
3600 clear regions with !bit_not_p, so that gaps in between stores aren't
3601 set in the mask. */
3602 unsigned HOST_WIDE_INT bitsize = info->bitsize;
3603 unsigned HOST_WIDE_INT prec = bitsize;
3604 unsigned int pos_in_buffer = 0;
3605 if (any_paddings)
3606 {
3607 tree lhs = gimple_assign_lhs (info->stmt);
3608 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3609 && TYPE_PRECISION (TREE_TYPE (lhs)) < bitsize)
3610 prec = TYPE_PRECISION (TREE_TYPE (lhs));
3611 }
3612 if (info->bitpos < try_bitpos)
3613 {
3614 gcc_assert (info->bitpos + bitsize > try_bitpos);
3615 if (!BYTES_BIG_ENDIAN)
3616 {
3617 if (prec <= try_bitpos - info->bitpos)
3618 continue;
3619 prec -= try_bitpos - info->bitpos;
3620 }
3621 bitsize -= try_bitpos - info->bitpos;
3622 if (BYTES_BIG_ENDIAN && prec > bitsize)
3623 prec = bitsize;
3624 }
3625 else
3626 pos_in_buffer = info->bitpos - try_bitpos;
3627 if (prec < bitsize)
3628 {
3629 /* If this is a bool inversion, invert just the least significant
3630 prec bits rather than all bits of it. */
3631 if (BYTES_BIG_ENDIAN)
3632 {
3633 pos_in_buffer += bitsize - prec;
3634 if (pos_in_buffer >= split_store->size)
3635 continue;
3636 }
3637 bitsize = prec;
3638 }
3639 if (pos_in_buffer + bitsize > split_store->size)
3640 bitsize = split_store->size - pos_in_buffer;
3641 unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
3642 if (BYTES_BIG_ENDIAN)
3643 clear_bit_region_be (p, (BITS_PER_UNIT - 1
3644 - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
3645 else
3646 clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
3647 }
3648 for (unsigned int i = 0; i < buf_size; ++i)
3649 buf[i] = ~buf[i];
3650 mask = native_interpret_expr (int_type, buf, buf_size);
3651 return BIT_XOR_EXPR;
3652 }
3653
3654 /* Given a merged store group GROUP output the widened version of it.
3655 The store chain is against the base object BASE.
3656 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3657 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3658 Make sure that the number of statements output is less than the number of
3659 original statements. If a better sequence is possible emit it and
3660 return true. */
3661
3662 bool
3663 imm_store_chain_info::output_merged_store (merged_store_group *group)
3664 {
3665 split_store *split_store;
3666 unsigned int i;
3667 unsigned HOST_WIDE_INT start_byte_pos
3668 = group->bitregion_start / BITS_PER_UNIT;
3669
3670 unsigned int orig_num_stmts = group->stores.length ();
3671 if (orig_num_stmts < 2)
3672 return false;
3673
3674 auto_vec<class split_store *, 32> split_stores;
3675 bool allow_unaligned_store
3676 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
3677 bool allow_unaligned_load = allow_unaligned_store;
3678 bool bzero_first = false;
3679 store_immediate_info *store;
3680 unsigned int num_clobber_stmts = 0;
3681 if (group->stores[0]->rhs_code == INTEGER_CST)
3682 {
3683 FOR_EACH_VEC_ELT (group->stores, i, store)
3684 if (gimple_clobber_p (store->stmt))
3685 num_clobber_stmts++;
3686 else if (TREE_CODE (gimple_assign_rhs1 (store->stmt)) == CONSTRUCTOR
3687 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (store->stmt)) == 0
3688 && group->start == store->bitpos
3689 && group->width == store->bitsize
3690 && (group->start % BITS_PER_UNIT) == 0
3691 && (group->width % BITS_PER_UNIT) == 0)
3692 {
3693 bzero_first = true;
3694 break;
3695 }
3696 else
3697 break;
3698 FOR_EACH_VEC_ELT_FROM (group->stores, i, store, i)
3699 if (gimple_clobber_p (store->stmt))
3700 num_clobber_stmts++;
3701 if (num_clobber_stmts == orig_num_stmts)
3702 return false;
3703 orig_num_stmts -= num_clobber_stmts;
3704 }
3705 if (allow_unaligned_store || bzero_first)
3706 {
3707 /* If unaligned stores are allowed, see how many stores we'd emit
3708 for unaligned and how many stores we'd emit for aligned stores.
3709 Only use unaligned stores if it allows fewer stores than aligned.
3710 Similarly, if there is a whole region clear first, prefer expanding
3711 it together compared to expanding clear first followed by merged
3712 further stores. */
3713 unsigned cnt[4] = { ~0, ~0, ~0, ~0 };
3714 int pass_min = 0;
3715 for (int pass = 0; pass < 4; ++pass)
3716 {
3717 if (!allow_unaligned_store && (pass & 1) != 0)
3718 continue;
3719 if (!bzero_first && (pass & 2) != 0)
3720 continue;
3721 cnt[pass] = split_group (group, (pass & 1) != 0,
3722 allow_unaligned_load, (pass & 2) != 0,
3723 NULL, NULL, NULL);
3724 if (cnt[pass] < cnt[pass_min])
3725 pass_min = pass;
3726 }
3727 if ((pass_min & 1) == 0)
3728 allow_unaligned_store = false;
3729 if ((pass_min & 2) == 0)
3730 bzero_first = false;
3731 }
3732 unsigned total_orig, total_new;
3733 split_group (group, allow_unaligned_store, allow_unaligned_load, bzero_first,
3734 &split_stores, &total_orig, &total_new);
3735
3736 /* Determine if there is a clobber covering the whole group at the start,
3737 followed by proposed split stores that cover the whole group. In that
3738 case, prefer the transformation even if
3739 split_stores.length () == orig_num_stmts. */
3740 bool clobber_first = false;
3741 if (num_clobber_stmts
3742 && gimple_clobber_p (group->stores[0]->stmt)
3743 && group->start == group->stores[0]->bitpos
3744 && group->width == group->stores[0]->bitsize
3745 && (group->start % BITS_PER_UNIT) == 0
3746 && (group->width % BITS_PER_UNIT) == 0)
3747 {
3748 clobber_first = true;
3749 unsigned HOST_WIDE_INT pos = group->start / BITS_PER_UNIT;
3750 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3751 if (split_store->bytepos != pos)
3752 {
3753 clobber_first = false;
3754 break;
3755 }
3756 else
3757 pos += split_store->size / BITS_PER_UNIT;
3758 if (pos != (group->start + group->width) / BITS_PER_UNIT)
3759 clobber_first = false;
3760 }
3761
3762 if (split_stores.length () >= orig_num_stmts + clobber_first)
3763 {
3764
3765 /* We didn't manage to reduce the number of statements. Bail out. */
3766 if (dump_file && (dump_flags & TDF_DETAILS))
3767 fprintf (dump_file, "Exceeded original number of stmts (%u)."
3768 " Not profitable to emit new sequence.\n",
3769 orig_num_stmts);
3770 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3771 delete split_store;
3772 return false;
3773 }
3774 if (total_orig <= total_new)
3775 {
3776 /* If number of estimated new statements is above estimated original
3777 statements, bail out too. */
3778 if (dump_file && (dump_flags & TDF_DETAILS))
3779 fprintf (dump_file, "Estimated number of original stmts (%u)"
3780 " not larger than estimated number of new"
3781 " stmts (%u).\n",
3782 total_orig, total_new);
3783 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3784 delete split_store;
3785 return false;
3786 }
3787 if (group->stores[0]->rhs_code == INTEGER_CST)
3788 {
3789 bool all_orig = true;
3790 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3791 if (!split_store->orig)
3792 {
3793 all_orig = false;
3794 break;
3795 }
3796 if (all_orig)
3797 {
3798 unsigned int cnt = split_stores.length ();
3799 store_immediate_info *store;
3800 FOR_EACH_VEC_ELT (group->stores, i, store)
3801 if (gimple_clobber_p (store->stmt))
3802 ++cnt;
3803 /* Punt if we wouldn't make any real changes, i.e. keep all
3804 orig stmts + all clobbers. */
3805 if (cnt == group->stores.length ())
3806 {
3807 if (dump_file && (dump_flags & TDF_DETAILS))
3808 fprintf (dump_file, "Exceeded original number of stmts (%u)."
3809 " Not profitable to emit new sequence.\n",
3810 orig_num_stmts);
3811 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3812 delete split_store;
3813 return false;
3814 }
3815 }
3816 }
3817
3818 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
3819 gimple_seq seq = NULL;
3820 tree last_vdef, new_vuse;
3821 last_vdef = gimple_vdef (group->last_stmt);
3822 new_vuse = gimple_vuse (group->last_stmt);
3823 tree bswap_res = NULL_TREE;
3824
3825 /* Clobbers are not removed. */
3826 if (gimple_clobber_p (group->last_stmt))
3827 {
3828 new_vuse = make_ssa_name (gimple_vop (cfun), group->last_stmt);
3829 gimple_set_vdef (group->last_stmt, new_vuse);
3830 }
3831
3832 if (group->stores[0]->rhs_code == LROTATE_EXPR
3833 || group->stores[0]->rhs_code == NOP_EXPR)
3834 {
3835 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
3836 gimple *ins_stmt = group->stores[0]->ins_stmt;
3837 struct symbolic_number *n = &group->stores[0]->n;
3838 bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR;
3839
3840 switch (n->range)
3841 {
3842 case 16:
3843 load_type = bswap_type = uint16_type_node;
3844 break;
3845 case 32:
3846 load_type = uint32_type_node;
3847 if (bswap)
3848 {
3849 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
3850 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
3851 }
3852 break;
3853 case 64:
3854 load_type = uint64_type_node;
3855 if (bswap)
3856 {
3857 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
3858 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
3859 }
3860 break;
3861 default:
3862 gcc_unreachable ();
3863 }
3864
3865 /* If the loads have each vuse of the corresponding store,
3866 we've checked the aliasing already in try_coalesce_bswap and
3867 we want to sink the need load into seq. So need to use new_vuse
3868 on the load. */
3869 if (n->base_addr)
3870 {
3871 if (n->vuse == NULL)
3872 {
3873 n->vuse = new_vuse;
3874 ins_stmt = NULL;
3875 }
3876 else
3877 /* Update vuse in case it has changed by output_merged_stores. */
3878 n->vuse = gimple_vuse (ins_stmt);
3879 }
3880 bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl,
3881 bswap_type, load_type, n, bswap);
3882 gcc_assert (bswap_res);
3883 }
3884
3885 gimple *stmt = NULL;
3886 auto_vec<gimple *, 32> orig_stmts;
3887 gimple_seq this_seq;
3888 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq,
3889 is_gimple_mem_ref_addr, NULL_TREE);
3890 gimple_seq_add_seq_without_update (&seq, this_seq);
3891
3892 tree load_addr[2] = { NULL_TREE, NULL_TREE };
3893 gimple_seq load_seq[2] = { NULL, NULL };
3894 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
3895 for (int j = 0; j < 2; ++j)
3896 {
3897 store_operand_info &op = group->stores[0]->ops[j];
3898 if (op.base_addr == NULL_TREE)
3899 continue;
3900
3901 store_immediate_info *infol = group->stores.last ();
3902 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
3903 {
3904 /* We can't pick the location randomly; while we've verified
3905 all the loads have the same vuse, they can be still in different
3906 basic blocks and we need to pick the one from the last bb:
3907 int x = q[0];
3908 if (x == N) return;
3909 int y = q[1];
3910 p[0] = x;
3911 p[1] = y;
3912 otherwise if we put the wider load at the q[0] load, we might
3913 segfault if q[1] is not mapped. */
3914 basic_block bb = gimple_bb (op.stmt);
3915 gimple *ostmt = op.stmt;
3916 store_immediate_info *info;
3917 FOR_EACH_VEC_ELT (group->stores, i, info)
3918 {
3919 gimple *tstmt = info->ops[j].stmt;
3920 basic_block tbb = gimple_bb (tstmt);
3921 if (dominated_by_p (CDI_DOMINATORS, tbb, bb))
3922 {
3923 ostmt = tstmt;
3924 bb = tbb;
3925 }
3926 }
3927 load_gsi[j] = gsi_for_stmt (ostmt);
3928 load_addr[j]
3929 = force_gimple_operand_1 (unshare_expr (op.base_addr),
3930 &load_seq[j], is_gimple_mem_ref_addr,
3931 NULL_TREE);
3932 }
3933 else if (operand_equal_p (base_addr, op.base_addr, 0))
3934 load_addr[j] = addr;
3935 else
3936 {
3937 load_addr[j]
3938 = force_gimple_operand_1 (unshare_expr (op.base_addr),
3939 &this_seq, is_gimple_mem_ref_addr,
3940 NULL_TREE);
3941 gimple_seq_add_seq_without_update (&seq, this_seq);
3942 }
3943 }
3944
3945 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3946 {
3947 unsigned HOST_WIDE_INT try_size = split_store->size;
3948 unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
3949 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
3950 unsigned HOST_WIDE_INT align = split_store->align;
3951 tree dest, src;
3952 location_t loc;
3953 if (split_store->orig)
3954 {
3955 /* If there is just a single non-clobber constituent store
3956 which covers the whole area, just reuse the lhs and rhs. */
3957 gimple *orig_stmt = NULL;
3958 store_immediate_info *store;
3959 unsigned int j;
3960 FOR_EACH_VEC_ELT (split_store->orig_stores, j, store)
3961 if (!gimple_clobber_p (store->stmt))
3962 {
3963 orig_stmt = store->stmt;
3964 break;
3965 }
3966 dest = gimple_assign_lhs (orig_stmt);
3967 src = gimple_assign_rhs1 (orig_stmt);
3968 loc = gimple_location (orig_stmt);
3969 }
3970 else
3971 {
3972 store_immediate_info *info;
3973 unsigned short clique, base;
3974 unsigned int k;
3975 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
3976 orig_stmts.safe_push (info->stmt);
3977 tree offset_type
3978 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
3979 loc = get_location_for_stmts (orig_stmts);
3980 orig_stmts.truncate (0);
3981
3982 tree int_type = build_nonstandard_integer_type (try_size, UNSIGNED);
3983 int_type = build_aligned_type (int_type, align);
3984 dest = fold_build2 (MEM_REF, int_type, addr,
3985 build_int_cst (offset_type, try_pos));
3986 if (TREE_CODE (dest) == MEM_REF)
3987 {
3988 MR_DEPENDENCE_CLIQUE (dest) = clique;
3989 MR_DEPENDENCE_BASE (dest) = base;
3990 }
3991
3992 tree mask;
3993 if (bswap_res)
3994 mask = integer_zero_node;
3995 else
3996 mask = native_interpret_expr (int_type,
3997 group->mask + try_pos
3998 - start_byte_pos,
3999 group->buf_size);
4000
4001 tree ops[2];
4002 for (int j = 0;
4003 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
4004 ++j)
4005 {
4006 store_operand_info &op = split_store->orig_stores[0]->ops[j];
4007 if (bswap_res)
4008 ops[j] = bswap_res;
4009 else if (op.base_addr)
4010 {
4011 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4012 orig_stmts.safe_push (info->ops[j].stmt);
4013
4014 offset_type = get_alias_type_for_stmts (orig_stmts, true,
4015 &clique, &base);
4016 location_t load_loc = get_location_for_stmts (orig_stmts);
4017 orig_stmts.truncate (0);
4018
4019 unsigned HOST_WIDE_INT load_align = group->load_align[j];
4020 unsigned HOST_WIDE_INT align_bitpos
4021 = known_alignment (try_bitpos
4022 - split_store->orig_stores[0]->bitpos
4023 + op.bitpos);
4024 if (align_bitpos & (load_align - 1))
4025 load_align = least_bit_hwi (align_bitpos);
4026
4027 tree load_int_type
4028 = build_nonstandard_integer_type (try_size, UNSIGNED);
4029 load_int_type
4030 = build_aligned_type (load_int_type, load_align);
4031
4032 poly_uint64 load_pos
4033 = exact_div (try_bitpos
4034 - split_store->orig_stores[0]->bitpos
4035 + op.bitpos,
4036 BITS_PER_UNIT);
4037 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
4038 build_int_cst (offset_type, load_pos));
4039 if (TREE_CODE (ops[j]) == MEM_REF)
4040 {
4041 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
4042 MR_DEPENDENCE_BASE (ops[j]) = base;
4043 }
4044 if (!integer_zerop (mask))
4045 /* The load might load some bits (that will be masked off
4046 later on) uninitialized, avoid -W*uninitialized
4047 warnings in that case. */
4048 TREE_NO_WARNING (ops[j]) = 1;
4049
4050 stmt = gimple_build_assign (make_ssa_name (int_type),
4051 ops[j]);
4052 gimple_set_location (stmt, load_loc);
4053 if (gsi_bb (load_gsi[j]))
4054 {
4055 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
4056 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
4057 }
4058 else
4059 {
4060 gimple_set_vuse (stmt, new_vuse);
4061 gimple_seq_add_stmt_without_update (&seq, stmt);
4062 }
4063 ops[j] = gimple_assign_lhs (stmt);
4064 tree xor_mask;
4065 enum tree_code inv_op
4066 = invert_op (split_store, j, int_type, xor_mask);
4067 if (inv_op != NOP_EXPR)
4068 {
4069 stmt = gimple_build_assign (make_ssa_name (int_type),
4070 inv_op, ops[j], xor_mask);
4071 gimple_set_location (stmt, load_loc);
4072 ops[j] = gimple_assign_lhs (stmt);
4073
4074 if (gsi_bb (load_gsi[j]))
4075 gimple_seq_add_stmt_without_update (&load_seq[j],
4076 stmt);
4077 else
4078 gimple_seq_add_stmt_without_update (&seq, stmt);
4079 }
4080 }
4081 else
4082 ops[j] = native_interpret_expr (int_type,
4083 group->val + try_pos
4084 - start_byte_pos,
4085 group->buf_size);
4086 }
4087
4088 switch (split_store->orig_stores[0]->rhs_code)
4089 {
4090 case BIT_AND_EXPR:
4091 case BIT_IOR_EXPR:
4092 case BIT_XOR_EXPR:
4093 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4094 {
4095 tree rhs1 = gimple_assign_rhs1 (info->stmt);
4096 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
4097 }
4098 location_t bit_loc;
4099 bit_loc = get_location_for_stmts (orig_stmts);
4100 orig_stmts.truncate (0);
4101
4102 stmt
4103 = gimple_build_assign (make_ssa_name (int_type),
4104 split_store->orig_stores[0]->rhs_code,
4105 ops[0], ops[1]);
4106 gimple_set_location (stmt, bit_loc);
4107 /* If there is just one load and there is a separate
4108 load_seq[0], emit the bitwise op right after it. */
4109 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4110 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4111 /* Otherwise, if at least one load is in seq, we need to
4112 emit the bitwise op right before the store. If there
4113 are two loads and are emitted somewhere else, it would
4114 be better to emit the bitwise op as early as possible;
4115 we don't track where that would be possible right now
4116 though. */
4117 else
4118 gimple_seq_add_stmt_without_update (&seq, stmt);
4119 src = gimple_assign_lhs (stmt);
4120 tree xor_mask;
4121 enum tree_code inv_op;
4122 inv_op = invert_op (split_store, 2, int_type, xor_mask);
4123 if (inv_op != NOP_EXPR)
4124 {
4125 stmt = gimple_build_assign (make_ssa_name (int_type),
4126 inv_op, src, xor_mask);
4127 gimple_set_location (stmt, bit_loc);
4128 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4129 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4130 else
4131 gimple_seq_add_stmt_without_update (&seq, stmt);
4132 src = gimple_assign_lhs (stmt);
4133 }
4134 break;
4135 case LROTATE_EXPR:
4136 case NOP_EXPR:
4137 src = ops[0];
4138 if (!is_gimple_val (src))
4139 {
4140 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)),
4141 src);
4142 gimple_seq_add_stmt_without_update (&seq, stmt);
4143 src = gimple_assign_lhs (stmt);
4144 }
4145 if (!useless_type_conversion_p (int_type, TREE_TYPE (src)))
4146 {
4147 stmt = gimple_build_assign (make_ssa_name (int_type),
4148 NOP_EXPR, src);
4149 gimple_seq_add_stmt_without_update (&seq, stmt);
4150 src = gimple_assign_lhs (stmt);
4151 }
4152 inv_op = invert_op (split_store, 2, int_type, xor_mask);
4153 if (inv_op != NOP_EXPR)
4154 {
4155 stmt = gimple_build_assign (make_ssa_name (int_type),
4156 inv_op, src, xor_mask);
4157 gimple_set_location (stmt, loc);
4158 gimple_seq_add_stmt_without_update (&seq, stmt);
4159 src = gimple_assign_lhs (stmt);
4160 }
4161 break;
4162 default:
4163 src = ops[0];
4164 break;
4165 }
4166
4167 /* If bit insertion is required, we use the source as an accumulator
4168 into which the successive bit-field values are manually inserted.
4169 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4170 if (group->bit_insertion)
4171 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4172 if (info->rhs_code == BIT_INSERT_EXPR
4173 && info->bitpos < try_bitpos + try_size
4174 && info->bitpos + info->bitsize > try_bitpos)
4175 {
4176 /* Mask, truncate, convert to final type, shift and ior into
4177 the accumulator. Note that every step can be a no-op. */
4178 const HOST_WIDE_INT start_gap = info->bitpos - try_bitpos;
4179 const HOST_WIDE_INT end_gap
4180 = (try_bitpos + try_size) - (info->bitpos + info->bitsize);
4181 tree tem = info->ops[0].val;
4182 if (!INTEGRAL_TYPE_P (TREE_TYPE (tem)))
4183 {
4184 const unsigned HOST_WIDE_INT size
4185 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tem)));
4186 tree integer_type
4187 = build_nonstandard_integer_type (size, UNSIGNED);
4188 tem = gimple_build (&seq, loc, VIEW_CONVERT_EXPR,
4189 integer_type, tem);
4190 }
4191 if (TYPE_PRECISION (TREE_TYPE (tem)) <= info->bitsize)
4192 {
4193 tree bitfield_type
4194 = build_nonstandard_integer_type (info->bitsize,
4195 UNSIGNED);
4196 tem = gimple_convert (&seq, loc, bitfield_type, tem);
4197 }
4198 else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0)
4199 {
4200 const unsigned HOST_WIDE_INT imask
4201 = (HOST_WIDE_INT_1U << info->bitsize) - 1;
4202 tem = gimple_build (&seq, loc,
4203 BIT_AND_EXPR, TREE_TYPE (tem), tem,
4204 build_int_cst (TREE_TYPE (tem),
4205 imask));
4206 }
4207 const HOST_WIDE_INT shift
4208 = (BYTES_BIG_ENDIAN ? end_gap : start_gap);
4209 if (shift < 0)
4210 tem = gimple_build (&seq, loc,
4211 RSHIFT_EXPR, TREE_TYPE (tem), tem,
4212 build_int_cst (NULL_TREE, -shift));
4213 tem = gimple_convert (&seq, loc, int_type, tem);
4214 if (shift > 0)
4215 tem = gimple_build (&seq, loc,
4216 LSHIFT_EXPR, int_type, tem,
4217 build_int_cst (NULL_TREE, shift));
4218 src = gimple_build (&seq, loc,
4219 BIT_IOR_EXPR, int_type, tem, src);
4220 }
4221
4222 if (!integer_zerop (mask))
4223 {
4224 tree tem = make_ssa_name (int_type);
4225 tree load_src = unshare_expr (dest);
4226 /* The load might load some or all bits uninitialized,
4227 avoid -W*uninitialized warnings in that case.
4228 As optimization, it would be nice if all the bits are
4229 provably uninitialized (no stores at all yet or previous
4230 store a CLOBBER) we'd optimize away the load and replace
4231 it e.g. with 0. */
4232 TREE_NO_WARNING (load_src) = 1;
4233 stmt = gimple_build_assign (tem, load_src);
4234 gimple_set_location (stmt, loc);
4235 gimple_set_vuse (stmt, new_vuse);
4236 gimple_seq_add_stmt_without_update (&seq, stmt);
4237
4238 /* FIXME: If there is a single chunk of zero bits in mask,
4239 perhaps use BIT_INSERT_EXPR instead? */
4240 stmt = gimple_build_assign (make_ssa_name (int_type),
4241 BIT_AND_EXPR, tem, mask);
4242 gimple_set_location (stmt, loc);
4243 gimple_seq_add_stmt_without_update (&seq, stmt);
4244 tem = gimple_assign_lhs (stmt);
4245
4246 if (TREE_CODE (src) == INTEGER_CST)
4247 src = wide_int_to_tree (int_type,
4248 wi::bit_and_not (wi::to_wide (src),
4249 wi::to_wide (mask)));
4250 else
4251 {
4252 tree nmask
4253 = wide_int_to_tree (int_type,
4254 wi::bit_not (wi::to_wide (mask)));
4255 stmt = gimple_build_assign (make_ssa_name (int_type),
4256 BIT_AND_EXPR, src, nmask);
4257 gimple_set_location (stmt, loc);
4258 gimple_seq_add_stmt_without_update (&seq, stmt);
4259 src = gimple_assign_lhs (stmt);
4260 }
4261 stmt = gimple_build_assign (make_ssa_name (int_type),
4262 BIT_IOR_EXPR, tem, src);
4263 gimple_set_location (stmt, loc);
4264 gimple_seq_add_stmt_without_update (&seq, stmt);
4265 src = gimple_assign_lhs (stmt);
4266 }
4267 }
4268
4269 stmt = gimple_build_assign (dest, src);
4270 gimple_set_location (stmt, loc);
4271 gimple_set_vuse (stmt, new_vuse);
4272 gimple_seq_add_stmt_without_update (&seq, stmt);
4273
4274 if (group->lp_nr && stmt_could_throw_p (cfun, stmt))
4275 add_stmt_to_eh_lp (stmt, group->lp_nr);
4276
4277 tree new_vdef;
4278 if (i < split_stores.length () - 1)
4279 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
4280 else
4281 new_vdef = last_vdef;
4282
4283 gimple_set_vdef (stmt, new_vdef);
4284 SSA_NAME_DEF_STMT (new_vdef) = stmt;
4285 new_vuse = new_vdef;
4286 }
4287
4288 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4289 delete split_store;
4290
4291 gcc_assert (seq);
4292 if (dump_file)
4293 {
4294 fprintf (dump_file,
4295 "New sequence of %u stores to replace old one of %u stores\n",
4296 split_stores.length (), orig_num_stmts);
4297 if (dump_flags & TDF_DETAILS)
4298 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
4299 }
4300
4301 if (gimple_clobber_p (group->last_stmt))
4302 update_stmt (group->last_stmt);
4303
4304 if (group->lp_nr > 0)
4305 {
4306 /* We're going to insert a sequence of (potentially) throwing stores
4307 into an active EH region. This means that we're going to create
4308 new basic blocks with EH edges pointing to the post landing pad
4309 and, therefore, to have to update its PHI nodes, if any. For the
4310 virtual PHI node, we're going to use the VDEFs created above, but
4311 for the other nodes, we need to record the original reaching defs. */
4312 eh_landing_pad lp = get_eh_landing_pad_from_number (group->lp_nr);
4313 basic_block lp_bb = label_to_block (cfun, lp->post_landing_pad);
4314 basic_block last_bb = gimple_bb (group->last_stmt);
4315 edge last_edge = find_edge (last_bb, lp_bb);
4316 auto_vec<tree, 16> last_defs;
4317 gphi_iterator gpi;
4318 for (gpi = gsi_start_phis (lp_bb); !gsi_end_p (gpi); gsi_next (&gpi))
4319 {
4320 gphi *phi = gpi.phi ();
4321 tree last_def;
4322 if (virtual_operand_p (gimple_phi_result (phi)))
4323 last_def = NULL_TREE;
4324 else
4325 last_def = gimple_phi_arg_def (phi, last_edge->dest_idx);
4326 last_defs.safe_push (last_def);
4327 }
4328
4329 /* Do the insertion. Then, if new basic blocks have been created in the
4330 process, rewind the chain of VDEFs create above to walk the new basic
4331 blocks and update the corresponding arguments of the PHI nodes. */
4332 update_modified_stmts (seq);
4333 if (gimple_find_sub_bbs (seq, &last_gsi))
4334 while (last_vdef != gimple_vuse (group->last_stmt))
4335 {
4336 gimple *stmt = SSA_NAME_DEF_STMT (last_vdef);
4337 if (stmt_could_throw_p (cfun, stmt))
4338 {
4339 edge new_edge = find_edge (gimple_bb (stmt), lp_bb);
4340 unsigned int i;
4341 for (gpi = gsi_start_phis (lp_bb), i = 0;
4342 !gsi_end_p (gpi);
4343 gsi_next (&gpi), i++)
4344 {
4345 gphi *phi = gpi.phi ();
4346 tree new_def;
4347 if (virtual_operand_p (gimple_phi_result (phi)))
4348 new_def = last_vdef;
4349 else
4350 new_def = last_defs[i];
4351 add_phi_arg (phi, new_def, new_edge, UNKNOWN_LOCATION);
4352 }
4353 }
4354 last_vdef = gimple_vuse (stmt);
4355 }
4356 }
4357 else
4358 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
4359
4360 for (int j = 0; j < 2; ++j)
4361 if (load_seq[j])
4362 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
4363
4364 return true;
4365 }
4366
4367 /* Process the merged_store_group objects created in the coalescing phase.
4368 The stores are all against the base object BASE.
4369 Try to output the widened stores and delete the original statements if
4370 successful. Return true iff any changes were made. */
4371
4372 bool
4373 imm_store_chain_info::output_merged_stores ()
4374 {
4375 unsigned int i;
4376 merged_store_group *merged_store;
4377 bool ret = false;
4378 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store)
4379 {
4380 if (dbg_cnt (store_merging)
4381 && output_merged_store (merged_store))
4382 {
4383 unsigned int j;
4384 store_immediate_info *store;
4385 FOR_EACH_VEC_ELT (merged_store->stores, j, store)
4386 {
4387 gimple *stmt = store->stmt;
4388 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4389 /* Don't remove clobbers, they are still useful even if
4390 everything is overwritten afterwards. */
4391 if (gimple_clobber_p (stmt))
4392 continue;
4393 gsi_remove (&gsi, true);
4394 if (store->lp_nr)
4395 remove_stmt_from_eh_lp (stmt);
4396 if (stmt != merged_store->last_stmt)
4397 {
4398 unlink_stmt_vdef (stmt);
4399 release_defs (stmt);
4400 }
4401 }
4402 ret = true;
4403 }
4404 }
4405 if (ret && dump_file)
4406 fprintf (dump_file, "Merging successful!\n");
4407
4408 return ret;
4409 }
4410
4411 /* Coalesce the store_immediate_info objects recorded against the base object
4412 BASE in the first phase and output them.
4413 Delete the allocated structures.
4414 Return true if any changes were made. */
4415
4416 bool
4417 imm_store_chain_info::terminate_and_process_chain ()
4418 {
4419 /* Process store chain. */
4420 bool ret = false;
4421 if (m_store_info.length () > 1)
4422 {
4423 ret = coalesce_immediate_stores ();
4424 if (ret)
4425 ret = output_merged_stores ();
4426 }
4427
4428 /* Delete all the entries we allocated ourselves. */
4429 store_immediate_info *info;
4430 unsigned int i;
4431 FOR_EACH_VEC_ELT (m_store_info, i, info)
4432 delete info;
4433
4434 merged_store_group *merged_info;
4435 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info)
4436 delete merged_info;
4437
4438 return ret;
4439 }
4440
4441 /* Return true iff LHS is a destination potentially interesting for
4442 store merging. In practice these are the codes that get_inner_reference
4443 can process. */
4444
4445 static bool
4446 lhs_valid_for_store_merging_p (tree lhs)
4447 {
4448 if (DECL_P (lhs))
4449 return true;
4450
4451 switch (TREE_CODE (lhs))
4452 {
4453 case ARRAY_REF:
4454 case ARRAY_RANGE_REF:
4455 case BIT_FIELD_REF:
4456 case COMPONENT_REF:
4457 case MEM_REF:
4458 return true;
4459 default:
4460 return false;
4461 }
4462
4463 gcc_unreachable ();
4464 }
4465
4466 /* Return true if the tree RHS is a constant we want to consider
4467 during store merging. In practice accept all codes that
4468 native_encode_expr accepts. */
4469
4470 static bool
4471 rhs_valid_for_store_merging_p (tree rhs)
4472 {
4473 unsigned HOST_WIDE_INT size;
4474 if (TREE_CODE (rhs) == CONSTRUCTOR
4475 && CONSTRUCTOR_NELTS (rhs) == 0
4476 && TYPE_SIZE_UNIT (TREE_TYPE (rhs))
4477 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs))))
4478 return true;
4479 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size)
4480 && native_encode_expr (rhs, NULL, size) != 0);
4481 }
4482
4483 /* Adjust *PBITPOS, *PBITREGION_START and *PBITREGION_END by BYTE_OFF bytes
4484 and return true on success or false on failure. */
4485
4486 static bool
4487 adjust_bit_pos (poly_offset_int byte_off,
4488 poly_int64 *pbitpos,
4489 poly_uint64 *pbitregion_start,
4490 poly_uint64 *pbitregion_end)
4491 {
4492 poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT;
4493 bit_off += *pbitpos;
4494
4495 if (known_ge (bit_off, 0) && bit_off.to_shwi (pbitpos))
4496 {
4497 if (maybe_ne (*pbitregion_end, 0U))
4498 {
4499 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4500 bit_off += *pbitregion_start;
4501 if (bit_off.to_uhwi (pbitregion_start))
4502 {
4503 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4504 bit_off += *pbitregion_end;
4505 if (!bit_off.to_uhwi (pbitregion_end))
4506 *pbitregion_end = 0;
4507 }
4508 else
4509 *pbitregion_end = 0;
4510 }
4511 return true;
4512 }
4513 else
4514 return false;
4515 }
4516
4517 /* If MEM is a memory reference usable for store merging (either as
4518 store destination or for loads), return the non-NULL base_addr
4519 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4520 Otherwise return NULL, *PBITPOS should be still valid even for that
4521 case. */
4522
4523 static tree
4524 mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize,
4525 poly_uint64 *pbitpos,
4526 poly_uint64 *pbitregion_start,
4527 poly_uint64 *pbitregion_end)
4528 {
4529 poly_int64 bitsize, bitpos;
4530 poly_uint64 bitregion_start = 0, bitregion_end = 0;
4531 machine_mode mode;
4532 int unsignedp = 0, reversep = 0, volatilep = 0;
4533 tree offset;
4534 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
4535 &unsignedp, &reversep, &volatilep);
4536 *pbitsize = bitsize;
4537 if (known_eq (bitsize, 0))
4538 return NULL_TREE;
4539
4540 if (TREE_CODE (mem) == COMPONENT_REF
4541 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
4542 {
4543 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
4544 if (maybe_ne (bitregion_end, 0U))
4545 bitregion_end += 1;
4546 }
4547
4548 if (reversep)
4549 return NULL_TREE;
4550
4551 /* We do not want to rewrite TARGET_MEM_REFs. */
4552 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
4553 return NULL_TREE;
4554 /* In some cases get_inner_reference may return a
4555 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4556 canonicalize the base_addr to MEM_REF [ptr] and take
4557 byteoffset into account in the bitpos. This occurs in
4558 PR 23684 and this way we can catch more chains. */
4559 else if (TREE_CODE (base_addr) == MEM_REF)
4560 {
4561 if (!adjust_bit_pos (mem_ref_offset (base_addr), &bitpos,
4562 &bitregion_start, &bitregion_end))
4563 return NULL_TREE;
4564 base_addr = TREE_OPERAND (base_addr, 0);
4565 }
4566 /* get_inner_reference returns the base object, get at its
4567 address now. */
4568 else
4569 {
4570 if (maybe_lt (bitpos, 0))
4571 return NULL_TREE;
4572 base_addr = build_fold_addr_expr (base_addr);
4573 }
4574
4575 if (offset)
4576 {
4577 /* If the access is variable offset then a base decl has to be
4578 address-taken to be able to emit pointer-based stores to it.
4579 ??? We might be able to get away with re-using the original
4580 base up to the first variable part and then wrapping that inside
4581 a BIT_FIELD_REF. */
4582 tree base = get_base_address (base_addr);
4583 if (!base || (DECL_P (base) && !TREE_ADDRESSABLE (base)))
4584 return NULL_TREE;
4585
4586 /* Similarly to above for the base, remove constant from the offset. */
4587 if (TREE_CODE (offset) == PLUS_EXPR
4588 && TREE_CODE (TREE_OPERAND (offset, 1)) == INTEGER_CST
4589 && adjust_bit_pos (wi::to_poly_offset (TREE_OPERAND (offset, 1)),
4590 &bitpos, &bitregion_start, &bitregion_end))
4591 offset = TREE_OPERAND (offset, 0);
4592
4593 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
4594 base_addr, offset);
4595 }
4596
4597 if (known_eq (bitregion_end, 0U))
4598 {
4599 bitregion_start = round_down_to_byte_boundary (bitpos);
4600 bitregion_end = round_up_to_byte_boundary (bitpos + bitsize);
4601 }
4602
4603 *pbitsize = bitsize;
4604 *pbitpos = bitpos;
4605 *pbitregion_start = bitregion_start;
4606 *pbitregion_end = bitregion_end;
4607 return base_addr;
4608 }
4609
4610 /* Return true if STMT is a load that can be used for store merging.
4611 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4612 BITREGION_END are properties of the corresponding store. */
4613
4614 static bool
4615 handled_load (gimple *stmt, store_operand_info *op,
4616 poly_uint64 bitsize, poly_uint64 bitpos,
4617 poly_uint64 bitregion_start, poly_uint64 bitregion_end)
4618 {
4619 if (!is_gimple_assign (stmt))
4620 return false;
4621 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
4622 {
4623 tree rhs1 = gimple_assign_rhs1 (stmt);
4624 if (TREE_CODE (rhs1) == SSA_NAME
4625 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
4626 bitregion_start, bitregion_end))
4627 {
4628 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4629 been optimized earlier, but if allowed here, would confuse the
4630 multiple uses counting. */
4631 if (op->bit_not_p)
4632 return false;
4633 op->bit_not_p = !op->bit_not_p;
4634 return true;
4635 }
4636 return false;
4637 }
4638 if (gimple_vuse (stmt)
4639 && gimple_assign_load_p (stmt)
4640 && !stmt_can_throw_internal (cfun, stmt)
4641 && !gimple_has_volatile_ops (stmt))
4642 {
4643 tree mem = gimple_assign_rhs1 (stmt);
4644 op->base_addr
4645 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
4646 &op->bitregion_start,
4647 &op->bitregion_end);
4648 if (op->base_addr != NULL_TREE
4649 && known_eq (op->bitsize, bitsize)
4650 && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT)
4651 && known_ge (op->bitpos - op->bitregion_start,
4652 bitpos - bitregion_start)
4653 && known_ge (op->bitregion_end - op->bitpos,
4654 bitregion_end - bitpos))
4655 {
4656 op->stmt = stmt;
4657 op->val = mem;
4658 op->bit_not_p = false;
4659 return true;
4660 }
4661 }
4662 return false;
4663 }
4664
4665 /* Return the index number of the landing pad for STMT, if any. */
4666
4667 static int
4668 lp_nr_for_store (gimple *stmt)
4669 {
4670 if (!cfun->can_throw_non_call_exceptions || !cfun->eh)
4671 return 0;
4672
4673 if (!stmt_could_throw_p (cfun, stmt))
4674 return 0;
4675
4676 return lookup_stmt_eh_lp (stmt);
4677 }
4678
4679 /* Record the store STMT for store merging optimization if it can be
4680 optimized. Return true if any changes were made. */
4681
4682 bool
4683 pass_store_merging::process_store (gimple *stmt)
4684 {
4685 tree lhs = gimple_assign_lhs (stmt);
4686 tree rhs = gimple_assign_rhs1 (stmt);
4687 poly_uint64 bitsize, bitpos = 0;
4688 poly_uint64 bitregion_start = 0, bitregion_end = 0;
4689 tree base_addr
4690 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
4691 &bitregion_start, &bitregion_end);
4692 if (known_eq (bitsize, 0U))
4693 return false;
4694
4695 bool invalid = (base_addr == NULL_TREE
4696 || (maybe_gt (bitsize,
4697 (unsigned int) MAX_BITSIZE_MODE_ANY_INT)
4698 && TREE_CODE (rhs) != INTEGER_CST
4699 && (TREE_CODE (rhs) != CONSTRUCTOR
4700 || CONSTRUCTOR_NELTS (rhs) != 0)));
4701 enum tree_code rhs_code = ERROR_MARK;
4702 bool bit_not_p = false;
4703 struct symbolic_number n;
4704 gimple *ins_stmt = NULL;
4705 store_operand_info ops[2];
4706 if (invalid)
4707 ;
4708 else if (rhs_valid_for_store_merging_p (rhs))
4709 {
4710 rhs_code = INTEGER_CST;
4711 ops[0].val = rhs;
4712 }
4713 else if (TREE_CODE (rhs) != SSA_NAME)
4714 invalid = true;
4715 else
4716 {
4717 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
4718 if (!is_gimple_assign (def_stmt))
4719 invalid = true;
4720 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
4721 bitregion_start, bitregion_end))
4722 rhs_code = MEM_REF;
4723 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
4724 {
4725 tree rhs1 = gimple_assign_rhs1 (def_stmt);
4726 if (TREE_CODE (rhs1) == SSA_NAME
4727 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
4728 {
4729 bit_not_p = true;
4730 def_stmt = SSA_NAME_DEF_STMT (rhs1);
4731 }
4732 }
4733
4734 if (rhs_code == ERROR_MARK && !invalid)
4735 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
4736 {
4737 case BIT_AND_EXPR:
4738 case BIT_IOR_EXPR:
4739 case BIT_XOR_EXPR:
4740 tree rhs1, rhs2;
4741 rhs1 = gimple_assign_rhs1 (def_stmt);
4742 rhs2 = gimple_assign_rhs2 (def_stmt);
4743 invalid = true;
4744 if (TREE_CODE (rhs1) != SSA_NAME)
4745 break;
4746 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
4747 if (!is_gimple_assign (def_stmt1)
4748 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
4749 bitregion_start, bitregion_end))
4750 break;
4751 if (rhs_valid_for_store_merging_p (rhs2))
4752 ops[1].val = rhs2;
4753 else if (TREE_CODE (rhs2) != SSA_NAME)
4754 break;
4755 else
4756 {
4757 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
4758 if (!is_gimple_assign (def_stmt2))
4759 break;
4760 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
4761 bitregion_start, bitregion_end))
4762 break;
4763 }
4764 invalid = false;
4765 break;
4766 default:
4767 invalid = true;
4768 break;
4769 }
4770
4771 unsigned HOST_WIDE_INT const_bitsize;
4772 if (bitsize.is_constant (&const_bitsize)
4773 && (const_bitsize % BITS_PER_UNIT) == 0
4774 && const_bitsize <= 64
4775 && multiple_p (bitpos, BITS_PER_UNIT))
4776 {
4777 ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12);
4778 if (ins_stmt)
4779 {
4780 uint64_t nn = n.n;
4781 for (unsigned HOST_WIDE_INT i = 0;
4782 i < const_bitsize;
4783 i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER)
4784 if ((nn & MARKER_MASK) == 0
4785 || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN)
4786 {
4787 ins_stmt = NULL;
4788 break;
4789 }
4790 if (ins_stmt)
4791 {
4792 if (invalid)
4793 {
4794 rhs_code = LROTATE_EXPR;
4795 ops[0].base_addr = NULL_TREE;
4796 ops[1].base_addr = NULL_TREE;
4797 }
4798 invalid = false;
4799 }
4800 }
4801 }
4802
4803 if (invalid
4804 && bitsize.is_constant (&const_bitsize)
4805 && ((const_bitsize % BITS_PER_UNIT) != 0
4806 || !multiple_p (bitpos, BITS_PER_UNIT))
4807 && const_bitsize <= MAX_FIXED_MODE_SIZE)
4808 {
4809 /* Bypass a conversion to the bit-field type. */
4810 if (!bit_not_p
4811 && is_gimple_assign (def_stmt)
4812 && CONVERT_EXPR_CODE_P (rhs_code))
4813 {
4814 tree rhs1 = gimple_assign_rhs1 (def_stmt);
4815 if (TREE_CODE (rhs1) == SSA_NAME
4816 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
4817 rhs = rhs1;
4818 }
4819 rhs_code = BIT_INSERT_EXPR;
4820 bit_not_p = false;
4821 ops[0].val = rhs;
4822 ops[0].base_addr = NULL_TREE;
4823 ops[1].base_addr = NULL_TREE;
4824 invalid = false;
4825 }
4826 }
4827
4828 unsigned HOST_WIDE_INT const_bitsize, const_bitpos;
4829 unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end;
4830 if (invalid
4831 || !bitsize.is_constant (&const_bitsize)
4832 || !bitpos.is_constant (&const_bitpos)
4833 || !bitregion_start.is_constant (&const_bitregion_start)
4834 || !bitregion_end.is_constant (&const_bitregion_end))
4835 return terminate_all_aliasing_chains (NULL, stmt);
4836
4837 if (!ins_stmt)
4838 memset (&n, 0, sizeof (n));
4839
4840 class imm_store_chain_info **chain_info = NULL;
4841 bool ret = false;
4842 if (base_addr)
4843 chain_info = m_stores.get (base_addr);
4844
4845 store_immediate_info *info;
4846 if (chain_info)
4847 {
4848 unsigned int ord = (*chain_info)->m_store_info.length ();
4849 info = new store_immediate_info (const_bitsize, const_bitpos,
4850 const_bitregion_start,
4851 const_bitregion_end,
4852 stmt, ord, rhs_code, n, ins_stmt,
4853 bit_not_p, lp_nr_for_store (stmt),
4854 ops[0], ops[1]);
4855 if (dump_file && (dump_flags & TDF_DETAILS))
4856 {
4857 fprintf (dump_file, "Recording immediate store from stmt:\n");
4858 print_gimple_stmt (dump_file, stmt, 0);
4859 }
4860 (*chain_info)->m_store_info.safe_push (info);
4861 ret |= terminate_all_aliasing_chains (chain_info, stmt);
4862 /* If we reach the limit of stores to merge in a chain terminate and
4863 process the chain now. */
4864 if ((*chain_info)->m_store_info.length ()
4865 == (unsigned int) param_max_stores_to_merge)
4866 {
4867 if (dump_file && (dump_flags & TDF_DETAILS))
4868 fprintf (dump_file,
4869 "Reached maximum number of statements to merge:\n");
4870 ret |= terminate_and_process_chain (*chain_info);
4871 }
4872 return ret;
4873 }
4874
4875 /* Store aliases any existing chain? */
4876 ret |= terminate_all_aliasing_chains (NULL, stmt);
4877 /* Start a new chain. */
4878 class imm_store_chain_info *new_chain
4879 = new imm_store_chain_info (m_stores_head, base_addr);
4880 info = new store_immediate_info (const_bitsize, const_bitpos,
4881 const_bitregion_start,
4882 const_bitregion_end,
4883 stmt, 0, rhs_code, n, ins_stmt,
4884 bit_not_p, lp_nr_for_store (stmt),
4885 ops[0], ops[1]);
4886 new_chain->m_store_info.safe_push (info);
4887 m_stores.put (base_addr, new_chain);
4888 if (dump_file && (dump_flags & TDF_DETAILS))
4889 {
4890 fprintf (dump_file, "Starting new chain with statement:\n");
4891 print_gimple_stmt (dump_file, stmt, 0);
4892 fprintf (dump_file, "The base object is:\n");
4893 print_generic_expr (dump_file, base_addr);
4894 fprintf (dump_file, "\n");
4895 }
4896 return ret;
4897 }
4898
4899 /* Return true if STMT is a store valid for store merging. */
4900
4901 static bool
4902 store_valid_for_store_merging_p (gimple *stmt)
4903 {
4904 return gimple_assign_single_p (stmt)
4905 && gimple_vdef (stmt)
4906 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt))
4907 && (!gimple_has_volatile_ops (stmt) || gimple_clobber_p (stmt));
4908 }
4909
4910 enum basic_block_status { BB_INVALID, BB_VALID, BB_EXTENDED_VALID };
4911
4912 /* Return the status of basic block BB wrt store merging. */
4913
4914 static enum basic_block_status
4915 get_status_for_store_merging (basic_block bb)
4916 {
4917 unsigned int num_statements = 0;
4918 gimple_stmt_iterator gsi;
4919 edge e;
4920
4921 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4922 {
4923 gimple *stmt = gsi_stmt (gsi);
4924
4925 if (is_gimple_debug (stmt))
4926 continue;
4927
4928 if (store_valid_for_store_merging_p (stmt) && ++num_statements >= 2)
4929 break;
4930 }
4931
4932 if (num_statements == 0)
4933 return BB_INVALID;
4934
4935 if (cfun->can_throw_non_call_exceptions && cfun->eh
4936 && store_valid_for_store_merging_p (gimple_seq_last_stmt (bb_seq (bb)))
4937 && (e = find_fallthru_edge (bb->succs))
4938 && e->dest == bb->next_bb)
4939 return BB_EXTENDED_VALID;
4940
4941 return num_statements >= 2 ? BB_VALID : BB_INVALID;
4942 }
4943
4944 /* Entry point for the pass. Go over each basic block recording chains of
4945 immediate stores. Upon encountering a terminating statement (as defined
4946 by stmt_terminates_chain_p) process the recorded stores and emit the widened
4947 variants. */
4948
4949 unsigned int
4950 pass_store_merging::execute (function *fun)
4951 {
4952 basic_block bb;
4953 hash_set<gimple *> orig_stmts;
4954 bool changed = false, open_chains = false;
4955
4956 /* If the function can throw and catch non-call exceptions, we'll be trying
4957 to merge stores across different basic blocks so we need to first unsplit
4958 the EH edges in order to streamline the CFG of the function. */
4959 if (cfun->can_throw_non_call_exceptions && cfun->eh)
4960 unsplit_eh_edges ();
4961
4962 calculate_dominance_info (CDI_DOMINATORS);
4963
4964 FOR_EACH_BB_FN (bb, fun)
4965 {
4966 const basic_block_status bb_status = get_status_for_store_merging (bb);
4967 gimple_stmt_iterator gsi;
4968
4969 if (open_chains && (bb_status == BB_INVALID || !single_pred_p (bb)))
4970 {
4971 changed |= terminate_and_process_all_chains ();
4972 open_chains = false;
4973 }
4974
4975 if (bb_status == BB_INVALID)
4976 continue;
4977
4978 if (dump_file && (dump_flags & TDF_DETAILS))
4979 fprintf (dump_file, "Processing basic block <%d>:\n", bb->index);
4980
4981 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4982 {
4983 gimple *stmt = gsi_stmt (gsi);
4984
4985 if (is_gimple_debug (stmt))
4986 continue;
4987
4988 if (gimple_has_volatile_ops (stmt) && !gimple_clobber_p (stmt))
4989 {
4990 /* Terminate all chains. */
4991 if (dump_file && (dump_flags & TDF_DETAILS))
4992 fprintf (dump_file, "Volatile access terminates "
4993 "all chains\n");
4994 changed |= terminate_and_process_all_chains ();
4995 open_chains = false;
4996 continue;
4997 }
4998
4999 if (store_valid_for_store_merging_p (stmt))
5000 changed |= process_store (stmt);
5001 else
5002 changed |= terminate_all_aliasing_chains (NULL, stmt);
5003 }
5004
5005 if (bb_status == BB_EXTENDED_VALID)
5006 open_chains = true;
5007 else
5008 {
5009 changed |= terminate_and_process_all_chains ();
5010 open_chains = false;
5011 }
5012 }
5013
5014 if (open_chains)
5015 changed |= terminate_and_process_all_chains ();
5016
5017 /* If the function can throw and catch non-call exceptions and something
5018 changed during the pass, then the CFG has (very likely) changed too. */
5019 if (cfun->can_throw_non_call_exceptions && cfun->eh && changed)
5020 {
5021 free_dominance_info (CDI_DOMINATORS);
5022 return TODO_cleanup_cfg;
5023 }
5024
5025 return 0;
5026 }
5027
5028 } // anon namespace
5029
5030 /* Construct and return a store merging pass object. */
5031
5032 gimple_opt_pass *
5033 make_pass_store_merging (gcc::context *ctxt)
5034 {
5035 return new pass_store_merging (ctxt);
5036 }
5037
5038 #if CHECKING_P
5039
5040 namespace selftest {
5041
5042 /* Selftests for store merging helpers. */
5043
5044 /* Assert that all elements of the byte arrays X and Y, both of length N
5045 are equal. */
5046
5047 static void
5048 verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n)
5049 {
5050 for (unsigned int i = 0; i < n; i++)
5051 {
5052 if (x[i] != y[i])
5053 {
5054 fprintf (stderr, "Arrays do not match. X:\n");
5055 dump_char_array (stderr, x, n);
5056 fprintf (stderr, "Y:\n");
5057 dump_char_array (stderr, y, n);
5058 }
5059 ASSERT_EQ (x[i], y[i]);
5060 }
5061 }
5062
5063 /* Test shift_bytes_in_array_left and that it carries bits across between
5064 bytes correctly. */
5065
5066 static void
5067 verify_shift_bytes_in_array_left (void)
5068 {
5069 /* byte 1 | byte 0
5070 00011111 | 11100000. */
5071 unsigned char orig[2] = { 0xe0, 0x1f };
5072 unsigned char in[2];
5073 memcpy (in, orig, sizeof orig);
5074
5075 unsigned char expected[2] = { 0x80, 0x7f };
5076 shift_bytes_in_array_left (in, sizeof (in), 2);
5077 verify_array_eq (in, expected, sizeof (in));
5078
5079 memcpy (in, orig, sizeof orig);
5080 memcpy (expected, orig, sizeof orig);
5081 /* Check that shifting by zero doesn't change anything. */
5082 shift_bytes_in_array_left (in, sizeof (in), 0);
5083 verify_array_eq (in, expected, sizeof (in));
5084
5085 }
5086
5087 /* Test shift_bytes_in_array_right and that it carries bits across between
5088 bytes correctly. */
5089
5090 static void
5091 verify_shift_bytes_in_array_right (void)
5092 {
5093 /* byte 1 | byte 0
5094 00011111 | 11100000. */
5095 unsigned char orig[2] = { 0x1f, 0xe0};
5096 unsigned char in[2];
5097 memcpy (in, orig, sizeof orig);
5098 unsigned char expected[2] = { 0x07, 0xf8};
5099 shift_bytes_in_array_right (in, sizeof (in), 2);
5100 verify_array_eq (in, expected, sizeof (in));
5101
5102 memcpy (in, orig, sizeof orig);
5103 memcpy (expected, orig, sizeof orig);
5104 /* Check that shifting by zero doesn't change anything. */
5105 shift_bytes_in_array_right (in, sizeof (in), 0);
5106 verify_array_eq (in, expected, sizeof (in));
5107 }
5108
5109 /* Test clear_bit_region that it clears exactly the bits asked and
5110 nothing more. */
5111
5112 static void
5113 verify_clear_bit_region (void)
5114 {
5115 /* Start with all bits set and test clearing various patterns in them. */
5116 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5117 unsigned char in[3];
5118 unsigned char expected[3];
5119 memcpy (in, orig, sizeof in);
5120
5121 /* Check zeroing out all the bits. */
5122 clear_bit_region (in, 0, 3 * BITS_PER_UNIT);
5123 expected[0] = expected[1] = expected[2] = 0;
5124 verify_array_eq (in, expected, sizeof in);
5125
5126 memcpy (in, orig, sizeof in);
5127 /* Leave the first and last bits intact. */
5128 clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2);
5129 expected[0] = 0x1;
5130 expected[1] = 0;
5131 expected[2] = 0x80;
5132 verify_array_eq (in, expected, sizeof in);
5133 }
5134
5135 /* Test clear_bit_region_be that it clears exactly the bits asked and
5136 nothing more. */
5137
5138 static void
5139 verify_clear_bit_region_be (void)
5140 {
5141 /* Start with all bits set and test clearing various patterns in them. */
5142 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5143 unsigned char in[3];
5144 unsigned char expected[3];
5145 memcpy (in, orig, sizeof in);
5146
5147 /* Check zeroing out all the bits. */
5148 clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT);
5149 expected[0] = expected[1] = expected[2] = 0;
5150 verify_array_eq (in, expected, sizeof in);
5151
5152 memcpy (in, orig, sizeof in);
5153 /* Leave the first and last bits intact. */
5154 clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2);
5155 expected[0] = 0x80;
5156 expected[1] = 0;
5157 expected[2] = 0x1;
5158 verify_array_eq (in, expected, sizeof in);
5159 }
5160
5161
5162 /* Run all of the selftests within this file. */
5163
5164 void
5165 store_merging_c_tests (void)
5166 {
5167 verify_shift_bytes_in_array_left ();
5168 verify_shift_bytes_in_array_right ();
5169 verify_clear_bit_region ();
5170 verify_clear_bit_region_be ();
5171 }
5172
5173 } // namespace selftest
5174 #endif /* CHECKING_P. */