sh.md (negc): Delete expander.
[gcc.git] / gcc / trans-mem.c
1 /* Passes for transactional memory support.
2 Copyright (C) 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "gimple.h"
25 #include "tree-flow.h"
26 #include "tree-pass.h"
27 #include "tree-inline.h"
28 #include "diagnostic-core.h"
29 #include "demangle.h"
30 #include "output.h"
31 #include "trans-mem.h"
32 #include "params.h"
33 #include "target.h"
34 #include "langhooks.h"
35 #include "gimple-pretty-print.h"
36 #include "cfgloop.h"
37
38
39 #define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
40 #define PROB_ALWAYS (REG_BR_PROB_BASE)
41
42 #define A_RUNINSTRUMENTEDCODE 0x0001
43 #define A_RUNUNINSTRUMENTEDCODE 0x0002
44 #define A_SAVELIVEVARIABLES 0x0004
45 #define A_RESTORELIVEVARIABLES 0x0008
46 #define A_ABORTTRANSACTION 0x0010
47
48 #define AR_USERABORT 0x0001
49 #define AR_USERRETRY 0x0002
50 #define AR_TMCONFLICT 0x0004
51 #define AR_EXCEPTIONBLOCKABORT 0x0008
52 #define AR_OUTERABORT 0x0010
53
54 #define MODE_SERIALIRREVOCABLE 0x0000
55
56
57 /* The representation of a transaction changes several times during the
58 lowering process. In the beginning, in the front-end we have the
59 GENERIC tree TRANSACTION_EXPR. For example,
60
61 __transaction {
62 local++;
63 if (++global == 10)
64 __tm_abort;
65 }
66
67 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
68 trivially replaced with a GIMPLE_TRANSACTION node.
69
70 During pass_lower_tm, we examine the body of transactions looking
71 for aborts. Transactions that do not contain an abort may be
72 merged into an outer transaction. We also add a TRY-FINALLY node
73 to arrange for the transaction to be committed on any exit.
74
75 [??? Think about how this arrangement affects throw-with-commit
76 and throw-with-abort operations. In this case we want the TRY to
77 handle gotos, but not to catch any exceptions because the transaction
78 will already be closed.]
79
80 GIMPLE_TRANSACTION [label=NULL] {
81 try {
82 local = local + 1;
83 t0 = global;
84 t1 = t0 + 1;
85 global = t1;
86 if (t1 == 10)
87 __builtin___tm_abort ();
88 } finally {
89 __builtin___tm_commit ();
90 }
91 }
92
93 During pass_lower_eh, we create EH regions for the transactions,
94 intermixed with the regular EH stuff. This gives us a nice persistent
95 mapping (all the way through rtl) from transactional memory operation
96 back to the transaction, which allows us to get the abnormal edges
97 correct to model transaction aborts and restarts:
98
99 GIMPLE_TRANSACTION [label=over]
100 local = local + 1;
101 t0 = global;
102 t1 = t0 + 1;
103 global = t1;
104 if (t1 == 10)
105 __builtin___tm_abort ();
106 __builtin___tm_commit ();
107 over:
108
109 This is the end of all_lowering_passes, and so is what is present
110 during the IPA passes, and through all of the optimization passes.
111
112 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
113 functions and mark functions for cloning.
114
115 At the end of gimple optimization, before exiting SSA form,
116 pass_tm_edges replaces statements that perform transactional
117 memory operations with the appropriate TM builtins, and swap
118 out function calls with their transactional clones. At this
119 point we introduce the abnormal transaction restart edges and
120 complete lowering of the GIMPLE_TRANSACTION node.
121
122 x = __builtin___tm_start (MAY_ABORT);
123 eh_label:
124 if (x & abort_transaction)
125 goto over;
126 local = local + 1;
127 t0 = __builtin___tm_load (global);
128 t1 = t0 + 1;
129 __builtin___tm_store (&global, t1);
130 if (t1 == 10)
131 __builtin___tm_abort ();
132 __builtin___tm_commit ();
133 over:
134 */
135
136 \f
137 /* Return the attributes we want to examine for X, or NULL if it's not
138 something we examine. We look at function types, but allow pointers
139 to function types and function decls and peek through. */
140
141 static tree
142 get_attrs_for (const_tree x)
143 {
144 switch (TREE_CODE (x))
145 {
146 case FUNCTION_DECL:
147 return TYPE_ATTRIBUTES (TREE_TYPE (x));
148 break;
149
150 default:
151 if (TYPE_P (x))
152 return NULL;
153 x = TREE_TYPE (x);
154 if (TREE_CODE (x) != POINTER_TYPE)
155 return NULL;
156 /* FALLTHRU */
157
158 case POINTER_TYPE:
159 x = TREE_TYPE (x);
160 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
161 return NULL;
162 /* FALLTHRU */
163
164 case FUNCTION_TYPE:
165 case METHOD_TYPE:
166 return TYPE_ATTRIBUTES (x);
167 }
168 }
169
170 /* Return true if X has been marked TM_PURE. */
171
172 bool
173 is_tm_pure (const_tree x)
174 {
175 unsigned flags;
176
177 switch (TREE_CODE (x))
178 {
179 case FUNCTION_DECL:
180 case FUNCTION_TYPE:
181 case METHOD_TYPE:
182 break;
183
184 default:
185 if (TYPE_P (x))
186 return false;
187 x = TREE_TYPE (x);
188 if (TREE_CODE (x) != POINTER_TYPE)
189 return false;
190 /* FALLTHRU */
191
192 case POINTER_TYPE:
193 x = TREE_TYPE (x);
194 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
195 return false;
196 break;
197 }
198
199 flags = flags_from_decl_or_type (x);
200 return (flags & ECF_TM_PURE) != 0;
201 }
202
203 /* Return true if X has been marked TM_IRREVOCABLE. */
204
205 static bool
206 is_tm_irrevocable (tree x)
207 {
208 tree attrs = get_attrs_for (x);
209
210 if (attrs && lookup_attribute ("transaction_unsafe", attrs))
211 return true;
212
213 /* A call to the irrevocable builtin is by definition,
214 irrevocable. */
215 if (TREE_CODE (x) == ADDR_EXPR)
216 x = TREE_OPERAND (x, 0);
217 if (TREE_CODE (x) == FUNCTION_DECL
218 && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
219 && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
220 return true;
221
222 return false;
223 }
224
225 /* Return true if X has been marked TM_SAFE. */
226
227 bool
228 is_tm_safe (const_tree x)
229 {
230 if (flag_tm)
231 {
232 tree attrs = get_attrs_for (x);
233 if (attrs)
234 {
235 if (lookup_attribute ("transaction_safe", attrs))
236 return true;
237 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
238 return true;
239 }
240 }
241 return false;
242 }
243
244 /* Return true if CALL is const, or tm_pure. */
245
246 static bool
247 is_tm_pure_call (gimple call)
248 {
249 tree fn = gimple_call_fn (call);
250
251 if (TREE_CODE (fn) == ADDR_EXPR)
252 {
253 fn = TREE_OPERAND (fn, 0);
254 gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
255 }
256 else
257 fn = TREE_TYPE (fn);
258
259 return is_tm_pure (fn);
260 }
261
262 /* Return true if X has been marked TM_CALLABLE. */
263
264 static bool
265 is_tm_callable (tree x)
266 {
267 tree attrs = get_attrs_for (x);
268 if (attrs)
269 {
270 if (lookup_attribute ("transaction_callable", attrs))
271 return true;
272 if (lookup_attribute ("transaction_safe", attrs))
273 return true;
274 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
275 return true;
276 }
277 return false;
278 }
279
280 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
281
282 bool
283 is_tm_may_cancel_outer (tree x)
284 {
285 tree attrs = get_attrs_for (x);
286 if (attrs)
287 return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
288 return false;
289 }
290
291 /* Return true for built in functions that "end" a transaction. */
292
293 bool
294 is_tm_ending_fndecl (tree fndecl)
295 {
296 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
297 switch (DECL_FUNCTION_CODE (fndecl))
298 {
299 case BUILT_IN_TM_COMMIT:
300 case BUILT_IN_TM_COMMIT_EH:
301 case BUILT_IN_TM_ABORT:
302 case BUILT_IN_TM_IRREVOCABLE:
303 return true;
304 default:
305 break;
306 }
307
308 return false;
309 }
310
311 /* Return true if STMT is a TM load. */
312
313 static bool
314 is_tm_load (gimple stmt)
315 {
316 tree fndecl;
317
318 if (gimple_code (stmt) != GIMPLE_CALL)
319 return false;
320
321 fndecl = gimple_call_fndecl (stmt);
322 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
323 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
324 }
325
326 /* Same as above, but for simple TM loads, that is, not the
327 after-write, after-read, etc optimized variants. */
328
329 static bool
330 is_tm_simple_load (gimple stmt)
331 {
332 tree fndecl;
333
334 if (gimple_code (stmt) != GIMPLE_CALL)
335 return false;
336
337 fndecl = gimple_call_fndecl (stmt);
338 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
339 {
340 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
341 return (fcode == BUILT_IN_TM_LOAD_1
342 || fcode == BUILT_IN_TM_LOAD_2
343 || fcode == BUILT_IN_TM_LOAD_4
344 || fcode == BUILT_IN_TM_LOAD_8
345 || fcode == BUILT_IN_TM_LOAD_FLOAT
346 || fcode == BUILT_IN_TM_LOAD_DOUBLE
347 || fcode == BUILT_IN_TM_LOAD_LDOUBLE
348 || fcode == BUILT_IN_TM_LOAD_M64
349 || fcode == BUILT_IN_TM_LOAD_M128
350 || fcode == BUILT_IN_TM_LOAD_M256);
351 }
352 return false;
353 }
354
355 /* Return true if STMT is a TM store. */
356
357 static bool
358 is_tm_store (gimple stmt)
359 {
360 tree fndecl;
361
362 if (gimple_code (stmt) != GIMPLE_CALL)
363 return false;
364
365 fndecl = gimple_call_fndecl (stmt);
366 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
367 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
368 }
369
370 /* Same as above, but for simple TM stores, that is, not the
371 after-write, after-read, etc optimized variants. */
372
373 static bool
374 is_tm_simple_store (gimple stmt)
375 {
376 tree fndecl;
377
378 if (gimple_code (stmt) != GIMPLE_CALL)
379 return false;
380
381 fndecl = gimple_call_fndecl (stmt);
382 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
383 {
384 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
385 return (fcode == BUILT_IN_TM_STORE_1
386 || fcode == BUILT_IN_TM_STORE_2
387 || fcode == BUILT_IN_TM_STORE_4
388 || fcode == BUILT_IN_TM_STORE_8
389 || fcode == BUILT_IN_TM_STORE_FLOAT
390 || fcode == BUILT_IN_TM_STORE_DOUBLE
391 || fcode == BUILT_IN_TM_STORE_LDOUBLE
392 || fcode == BUILT_IN_TM_STORE_M64
393 || fcode == BUILT_IN_TM_STORE_M128
394 || fcode == BUILT_IN_TM_STORE_M256);
395 }
396 return false;
397 }
398
399 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
400
401 static bool
402 is_tm_abort (tree fndecl)
403 {
404 return (fndecl
405 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
406 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
407 }
408
409 /* Build a GENERIC tree for a user abort. This is called by front ends
410 while transforming the __tm_abort statement. */
411
412 tree
413 build_tm_abort_call (location_t loc, bool is_outer)
414 {
415 return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
416 build_int_cst (integer_type_node,
417 AR_USERABORT
418 | (is_outer ? AR_OUTERABORT : 0)));
419 }
420
421 /* Common gateing function for several of the TM passes. */
422
423 static bool
424 gate_tm (void)
425 {
426 return flag_tm;
427 }
428 \f
429 /* Map for aribtrary function replacement under TM, as created
430 by the tm_wrap attribute. */
431
432 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
433 htab_t tm_wrap_map;
434
435 void
436 record_tm_replacement (tree from, tree to)
437 {
438 struct tree_map **slot, *h;
439
440 /* Do not inline wrapper functions that will get replaced in the TM
441 pass.
442
443 Suppose you have foo() that will get replaced into tmfoo(). Make
444 sure the inliner doesn't try to outsmart us and inline foo()
445 before we get a chance to do the TM replacement. */
446 DECL_UNINLINABLE (from) = 1;
447
448 if (tm_wrap_map == NULL)
449 tm_wrap_map = htab_create_ggc (32, tree_map_hash, tree_map_eq, 0);
450
451 h = ggc_alloc_tree_map ();
452 h->hash = htab_hash_pointer (from);
453 h->base.from = from;
454 h->to = to;
455
456 slot = (struct tree_map **)
457 htab_find_slot_with_hash (tm_wrap_map, h, h->hash, INSERT);
458 *slot = h;
459 }
460
461 /* Return a TM-aware replacement function for DECL. */
462
463 static tree
464 find_tm_replacement_function (tree fndecl)
465 {
466 if (tm_wrap_map)
467 {
468 struct tree_map *h, in;
469
470 in.base.from = fndecl;
471 in.hash = htab_hash_pointer (fndecl);
472 h = (struct tree_map *) htab_find_with_hash (tm_wrap_map, &in, in.hash);
473 if (h)
474 return h->to;
475 }
476
477 /* ??? We may well want TM versions of most of the common <string.h>
478 functions. For now, we've already these two defined. */
479 /* Adjust expand_call_tm() attributes as necessary for the cases
480 handled here: */
481 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
482 switch (DECL_FUNCTION_CODE (fndecl))
483 {
484 case BUILT_IN_MEMCPY:
485 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
486 case BUILT_IN_MEMMOVE:
487 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
488 case BUILT_IN_MEMSET:
489 return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
490 default:
491 return NULL;
492 }
493
494 return NULL;
495 }
496
497 /* When appropriate, record TM replacement for memory allocation functions.
498
499 FROM is the FNDECL to wrap. */
500 void
501 tm_malloc_replacement (tree from)
502 {
503 const char *str;
504 tree to;
505
506 if (TREE_CODE (from) != FUNCTION_DECL)
507 return;
508
509 /* If we have a previous replacement, the user must be explicitly
510 wrapping malloc/calloc/free. They better know what they're
511 doing... */
512 if (find_tm_replacement_function (from))
513 return;
514
515 str = IDENTIFIER_POINTER (DECL_NAME (from));
516
517 if (!strcmp (str, "malloc"))
518 to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
519 else if (!strcmp (str, "calloc"))
520 to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
521 else if (!strcmp (str, "free"))
522 to = builtin_decl_explicit (BUILT_IN_TM_FREE);
523 else
524 return;
525
526 TREE_NOTHROW (to) = 0;
527
528 record_tm_replacement (from, to);
529 }
530 \f
531 /* Diagnostics for tm_safe functions/regions. Called by the front end
532 once we've lowered the function to high-gimple. */
533
534 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
535 Process exactly one statement. WI->INFO is set to non-null when in
536 the context of a tm_safe function, and null for a __transaction block. */
537
538 #define DIAG_TM_OUTER 1
539 #define DIAG_TM_SAFE 2
540 #define DIAG_TM_RELAXED 4
541
542 struct diagnose_tm
543 {
544 unsigned int summary_flags : 8;
545 unsigned int block_flags : 8;
546 unsigned int func_flags : 8;
547 unsigned int saw_volatile : 1;
548 gimple stmt;
549 };
550
551 /* Tree callback function for diagnose_tm pass. */
552
553 static tree
554 diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
555 void *data)
556 {
557 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
558 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
559 enum tree_code code = TREE_CODE (*tp);
560
561 if ((code == VAR_DECL
562 || code == RESULT_DECL
563 || code == PARM_DECL)
564 && d->block_flags & (DIAG_TM_SAFE | DIAG_TM_RELAXED)
565 && TREE_THIS_VOLATILE (TREE_TYPE (*tp))
566 && !d->saw_volatile)
567 {
568 d->saw_volatile = 1;
569 error_at (gimple_location (d->stmt),
570 "invalid volatile use of %qD inside transaction",
571 *tp);
572 }
573
574 return NULL_TREE;
575 }
576
577 static tree
578 diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
579 struct walk_stmt_info *wi)
580 {
581 gimple stmt = gsi_stmt (*gsi);
582 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
583
584 /* Save stmt for use in leaf analysis. */
585 d->stmt = stmt;
586
587 switch (gimple_code (stmt))
588 {
589 case GIMPLE_CALL:
590 {
591 tree fn = gimple_call_fn (stmt);
592
593 if ((d->summary_flags & DIAG_TM_OUTER) == 0
594 && is_tm_may_cancel_outer (fn))
595 error_at (gimple_location (stmt),
596 "%<transaction_may_cancel_outer%> function call not within"
597 " outer transaction or %<transaction_may_cancel_outer%>");
598
599 if (d->summary_flags & DIAG_TM_SAFE)
600 {
601 bool is_safe, direct_call_p;
602 tree replacement;
603
604 if (TREE_CODE (fn) == ADDR_EXPR
605 && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
606 {
607 direct_call_p = true;
608 replacement = TREE_OPERAND (fn, 0);
609 replacement = find_tm_replacement_function (replacement);
610 if (replacement)
611 fn = replacement;
612 }
613 else
614 {
615 direct_call_p = false;
616 replacement = NULL_TREE;
617 }
618
619 if (is_tm_safe_or_pure (fn))
620 is_safe = true;
621 else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
622 {
623 /* A function explicitly marked transaction_callable as
624 opposed to transaction_safe is being defined to be
625 unsafe as part of its ABI, regardless of its contents. */
626 is_safe = false;
627 }
628 else if (direct_call_p)
629 {
630 if (flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
631 is_safe = true;
632 else if (replacement)
633 {
634 /* ??? At present we've been considering replacements
635 merely transaction_callable, and therefore might
636 enter irrevocable. The tm_wrap attribute has not
637 yet made it into the new language spec. */
638 is_safe = false;
639 }
640 else
641 {
642 /* ??? Diagnostics for unmarked direct calls moved into
643 the IPA pass. Section 3.2 of the spec details how
644 functions not marked should be considered "implicitly
645 safe" based on having examined the function body. */
646 is_safe = true;
647 }
648 }
649 else
650 {
651 /* An unmarked indirect call. Consider it unsafe even
652 though optimization may yet figure out how to inline. */
653 is_safe = false;
654 }
655
656 if (!is_safe)
657 {
658 if (TREE_CODE (fn) == ADDR_EXPR)
659 fn = TREE_OPERAND (fn, 0);
660 if (d->block_flags & DIAG_TM_SAFE)
661 {
662 if (direct_call_p)
663 error_at (gimple_location (stmt),
664 "unsafe function call %qD within "
665 "atomic transaction", fn);
666 else
667 {
668 if (!DECL_P (fn) || DECL_NAME (fn))
669 error_at (gimple_location (stmt),
670 "unsafe function call %qE within "
671 "atomic transaction", fn);
672 else
673 error_at (gimple_location (stmt),
674 "unsafe indirect function call within "
675 "atomic transaction");
676 }
677 }
678 else
679 {
680 if (direct_call_p)
681 error_at (gimple_location (stmt),
682 "unsafe function call %qD within "
683 "%<transaction_safe%> function", fn);
684 else
685 {
686 if (!DECL_P (fn) || DECL_NAME (fn))
687 error_at (gimple_location (stmt),
688 "unsafe function call %qE within "
689 "%<transaction_safe%> function", fn);
690 else
691 error_at (gimple_location (stmt),
692 "unsafe indirect function call within "
693 "%<transaction_safe%> function");
694 }
695 }
696 }
697 }
698 }
699 break;
700
701 case GIMPLE_ASM:
702 /* ??? We ought to come up with a way to add attributes to
703 asm statements, and then add "transaction_safe" to it.
704 Either that or get the language spec to resurrect __tm_waiver. */
705 if (d->block_flags & DIAG_TM_SAFE)
706 error_at (gimple_location (stmt),
707 "asm not allowed in atomic transaction");
708 else if (d->func_flags & DIAG_TM_SAFE)
709 error_at (gimple_location (stmt),
710 "asm not allowed in %<transaction_safe%> function");
711 break;
712
713 case GIMPLE_TRANSACTION:
714 {
715 unsigned char inner_flags = DIAG_TM_SAFE;
716
717 if (gimple_transaction_subcode (stmt) & GTMA_IS_RELAXED)
718 {
719 if (d->block_flags & DIAG_TM_SAFE)
720 error_at (gimple_location (stmt),
721 "relaxed transaction in atomic transaction");
722 else if (d->func_flags & DIAG_TM_SAFE)
723 error_at (gimple_location (stmt),
724 "relaxed transaction in %<transaction_safe%> function");
725 inner_flags = DIAG_TM_RELAXED;
726 }
727 else if (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)
728 {
729 if (d->block_flags)
730 error_at (gimple_location (stmt),
731 "outer transaction in transaction");
732 else if (d->func_flags & DIAG_TM_OUTER)
733 error_at (gimple_location (stmt),
734 "outer transaction in "
735 "%<transaction_may_cancel_outer%> function");
736 else if (d->func_flags & DIAG_TM_SAFE)
737 error_at (gimple_location (stmt),
738 "outer transaction in %<transaction_safe%> function");
739 inner_flags |= DIAG_TM_OUTER;
740 }
741
742 *handled_ops_p = true;
743 if (gimple_transaction_body (stmt))
744 {
745 struct walk_stmt_info wi_inner;
746 struct diagnose_tm d_inner;
747
748 memset (&d_inner, 0, sizeof (d_inner));
749 d_inner.func_flags = d->func_flags;
750 d_inner.block_flags = d->block_flags | inner_flags;
751 d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
752
753 memset (&wi_inner, 0, sizeof (wi_inner));
754 wi_inner.info = &d_inner;
755
756 walk_gimple_seq (gimple_transaction_body (stmt),
757 diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
758 }
759 }
760 break;
761
762 default:
763 break;
764 }
765
766 return NULL_TREE;
767 }
768
769 static unsigned int
770 diagnose_tm_blocks (void)
771 {
772 struct walk_stmt_info wi;
773 struct diagnose_tm d;
774
775 memset (&d, 0, sizeof (d));
776 if (is_tm_may_cancel_outer (current_function_decl))
777 d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
778 else if (is_tm_safe (current_function_decl))
779 d.func_flags = DIAG_TM_SAFE;
780 d.summary_flags = d.func_flags;
781
782 memset (&wi, 0, sizeof (wi));
783 wi.info = &d;
784
785 walk_gimple_seq (gimple_body (current_function_decl),
786 diagnose_tm_1, diagnose_tm_1_op, &wi);
787
788 return 0;
789 }
790
791 struct gimple_opt_pass pass_diagnose_tm_blocks =
792 {
793 {
794 GIMPLE_PASS,
795 "*diagnose_tm_blocks", /* name */
796 gate_tm, /* gate */
797 diagnose_tm_blocks, /* execute */
798 NULL, /* sub */
799 NULL, /* next */
800 0, /* static_pass_number */
801 TV_TRANS_MEM, /* tv_id */
802 PROP_gimple_any, /* properties_required */
803 0, /* properties_provided */
804 0, /* properties_destroyed */
805 0, /* todo_flags_start */
806 0, /* todo_flags_finish */
807 }
808 };
809 \f
810 /* Instead of instrumenting thread private memory, we save the
811 addresses in a log which we later use to save/restore the addresses
812 upon transaction start/restart.
813
814 The log is keyed by address, where each element contains individual
815 statements among different code paths that perform the store.
816
817 This log is later used to generate either plain save/restore of the
818 addresses upon transaction start/restart, or calls to the ITM_L*
819 logging functions.
820
821 So for something like:
822
823 struct large { int x[1000]; };
824 struct large lala = { 0 };
825 __transaction {
826 lala.x[i] = 123;
827 ...
828 }
829
830 We can either save/restore:
831
832 lala = { 0 };
833 trxn = _ITM_startTransaction ();
834 if (trxn & a_saveLiveVariables)
835 tmp_lala1 = lala.x[i];
836 else if (a & a_restoreLiveVariables)
837 lala.x[i] = tmp_lala1;
838
839 or use the logging functions:
840
841 lala = { 0 };
842 trxn = _ITM_startTransaction ();
843 _ITM_LU4 (&lala.x[i]);
844
845 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
846 far up the dominator tree to shadow all of the writes to a given
847 location (thus reducing the total number of logging calls), but not
848 so high as to be called on a path that does not perform a
849 write. */
850
851 /* One individual log entry. We may have multiple statements for the
852 same location if neither dominate each other (on different
853 execution paths). */
854 typedef struct tm_log_entry
855 {
856 /* Address to save. */
857 tree addr;
858 /* Entry block for the transaction this address occurs in. */
859 basic_block entry_block;
860 /* Dominating statements the store occurs in. */
861 gimple_vec stmts;
862 /* Initially, while we are building the log, we place a nonzero
863 value here to mean that this address *will* be saved with a
864 save/restore sequence. Later, when generating the save sequence
865 we place the SSA temp generated here. */
866 tree save_var;
867 } *tm_log_entry_t;
868
869 /* The actual log. */
870 static htab_t tm_log;
871
872 /* Addresses to log with a save/restore sequence. These should be in
873 dominator order. */
874 static VEC(tree,heap) *tm_log_save_addresses;
875
876 /* Map for an SSA_NAME originally pointing to a non aliased new piece
877 of memory (malloc, alloc, etc). */
878 static htab_t tm_new_mem_hash;
879
880 enum thread_memory_type
881 {
882 mem_non_local = 0,
883 mem_thread_local,
884 mem_transaction_local,
885 mem_max
886 };
887
888 typedef struct tm_new_mem_map
889 {
890 /* SSA_NAME being dereferenced. */
891 tree val;
892 enum thread_memory_type local_new_memory;
893 } tm_new_mem_map_t;
894
895 /* Htab support. Return hash value for a `tm_log_entry'. */
896 static hashval_t
897 tm_log_hash (const void *p)
898 {
899 const struct tm_log_entry *log = (const struct tm_log_entry *) p;
900 return iterative_hash_expr (log->addr, 0);
901 }
902
903 /* Htab support. Return true if two log entries are the same. */
904 static int
905 tm_log_eq (const void *p1, const void *p2)
906 {
907 const struct tm_log_entry *log1 = (const struct tm_log_entry *) p1;
908 const struct tm_log_entry *log2 = (const struct tm_log_entry *) p2;
909
910 /* FIXME:
911
912 rth: I suggest that we get rid of the component refs etc.
913 I.e. resolve the reference to base + offset.
914
915 We may need to actually finish a merge with mainline for this,
916 since we'd like to be presented with Richi's MEM_REF_EXPRs more
917 often than not. But in the meantime your tm_log_entry could save
918 the results of get_inner_reference.
919
920 See: g++.dg/tm/pr46653.C
921 */
922
923 /* Special case plain equality because operand_equal_p() below will
924 return FALSE if the addresses are equal but they have
925 side-effects (e.g. a volatile address). */
926 if (log1->addr == log2->addr)
927 return true;
928
929 return operand_equal_p (log1->addr, log2->addr, 0);
930 }
931
932 /* Htab support. Free one tm_log_entry. */
933 static void
934 tm_log_free (void *p)
935 {
936 struct tm_log_entry *lp = (struct tm_log_entry *) p;
937 VEC_free (gimple, heap, lp->stmts);
938 free (lp);
939 }
940
941 /* Initialize logging data structures. */
942 static void
943 tm_log_init (void)
944 {
945 tm_log = htab_create (10, tm_log_hash, tm_log_eq, tm_log_free);
946 tm_new_mem_hash = htab_create (5, struct_ptr_hash, struct_ptr_eq, free);
947 tm_log_save_addresses = VEC_alloc (tree, heap, 5);
948 }
949
950 /* Free logging data structures. */
951 static void
952 tm_log_delete (void)
953 {
954 htab_delete (tm_log);
955 htab_delete (tm_new_mem_hash);
956 VEC_free (tree, heap, tm_log_save_addresses);
957 }
958
959 /* Return true if MEM is a transaction invariant memory for the TM
960 region starting at REGION_ENTRY_BLOCK. */
961 static bool
962 transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
963 {
964 if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
965 && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
966 {
967 basic_block def_bb;
968
969 def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
970 return def_bb != region_entry_block
971 && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
972 }
973
974 mem = strip_invariant_refs (mem);
975 return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
976 }
977
978 /* Given an address ADDR in STMT, find it in the memory log or add it,
979 making sure to keep only the addresses highest in the dominator
980 tree.
981
982 ENTRY_BLOCK is the entry_block for the transaction.
983
984 If we find the address in the log, make sure it's either the same
985 address, or an equivalent one that dominates ADDR.
986
987 If we find the address, but neither ADDR dominates the found
988 address, nor the found one dominates ADDR, we're on different
989 execution paths. Add it.
990
991 If known, ENTRY_BLOCK is the entry block for the region, otherwise
992 NULL. */
993 static void
994 tm_log_add (basic_block entry_block, tree addr, gimple stmt)
995 {
996 void **slot;
997 struct tm_log_entry l, *lp;
998
999 l.addr = addr;
1000 slot = htab_find_slot (tm_log, &l, INSERT);
1001 if (!*slot)
1002 {
1003 tree type = TREE_TYPE (addr);
1004
1005 lp = XNEW (struct tm_log_entry);
1006 lp->addr = addr;
1007 *slot = lp;
1008
1009 /* Small invariant addresses can be handled as save/restores. */
1010 if (entry_block
1011 && transaction_invariant_address_p (lp->addr, entry_block)
1012 && TYPE_SIZE_UNIT (type) != NULL
1013 && host_integerp (TYPE_SIZE_UNIT (type), 1)
1014 && (tree_low_cst (TYPE_SIZE_UNIT (type), 1)
1015 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
1016 /* We must be able to copy this type normally. I.e., no
1017 special constructors and the like. */
1018 && !TREE_ADDRESSABLE (type))
1019 {
1020 lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
1021 add_referenced_var (lp->save_var);
1022 lp->stmts = NULL;
1023 lp->entry_block = entry_block;
1024 /* Save addresses separately in dominator order so we don't
1025 get confused by overlapping addresses in the save/restore
1026 sequence. */
1027 VEC_safe_push (tree, heap, tm_log_save_addresses, lp->addr);
1028 }
1029 else
1030 {
1031 /* Use the logging functions. */
1032 lp->stmts = VEC_alloc (gimple, heap, 5);
1033 VEC_quick_push (gimple, lp->stmts, stmt);
1034 lp->save_var = NULL;
1035 }
1036 }
1037 else
1038 {
1039 size_t i;
1040 gimple oldstmt;
1041
1042 lp = (struct tm_log_entry *) *slot;
1043
1044 /* If we're generating a save/restore sequence, we don't care
1045 about statements. */
1046 if (lp->save_var)
1047 return;
1048
1049 for (i = 0; VEC_iterate (gimple, lp->stmts, i, oldstmt); ++i)
1050 {
1051 if (stmt == oldstmt)
1052 return;
1053 /* We already have a store to the same address, higher up the
1054 dominator tree. Nothing to do. */
1055 if (dominated_by_p (CDI_DOMINATORS,
1056 gimple_bb (stmt), gimple_bb (oldstmt)))
1057 return;
1058 /* We should be processing blocks in dominator tree order. */
1059 gcc_assert (!dominated_by_p (CDI_DOMINATORS,
1060 gimple_bb (oldstmt), gimple_bb (stmt)));
1061 }
1062 /* Store is on a different code path. */
1063 VEC_safe_push (gimple, heap, lp->stmts, stmt);
1064 }
1065 }
1066
1067 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1068 result, insert the new statements before GSI. */
1069
1070 static tree
1071 gimplify_addr (gimple_stmt_iterator *gsi, tree x)
1072 {
1073 if (TREE_CODE (x) == TARGET_MEM_REF)
1074 x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
1075 else
1076 x = build_fold_addr_expr (x);
1077 return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
1078 }
1079
1080 /* Instrument one address with the logging functions.
1081 ADDR is the address to save.
1082 STMT is the statement before which to place it. */
1083 static void
1084 tm_log_emit_stmt (tree addr, gimple stmt)
1085 {
1086 tree type = TREE_TYPE (addr);
1087 tree size = TYPE_SIZE_UNIT (type);
1088 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1089 gimple log;
1090 enum built_in_function code = BUILT_IN_TM_LOG;
1091
1092 if (type == float_type_node)
1093 code = BUILT_IN_TM_LOG_FLOAT;
1094 else if (type == double_type_node)
1095 code = BUILT_IN_TM_LOG_DOUBLE;
1096 else if (type == long_double_type_node)
1097 code = BUILT_IN_TM_LOG_LDOUBLE;
1098 else if (host_integerp (size, 1))
1099 {
1100 unsigned int n = tree_low_cst (size, 1);
1101 switch (n)
1102 {
1103 case 1:
1104 code = BUILT_IN_TM_LOG_1;
1105 break;
1106 case 2:
1107 code = BUILT_IN_TM_LOG_2;
1108 break;
1109 case 4:
1110 code = BUILT_IN_TM_LOG_4;
1111 break;
1112 case 8:
1113 code = BUILT_IN_TM_LOG_8;
1114 break;
1115 default:
1116 code = BUILT_IN_TM_LOG;
1117 if (TREE_CODE (type) == VECTOR_TYPE)
1118 {
1119 if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
1120 code = BUILT_IN_TM_LOG_M64;
1121 else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
1122 code = BUILT_IN_TM_LOG_M128;
1123 else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
1124 code = BUILT_IN_TM_LOG_M256;
1125 }
1126 break;
1127 }
1128 }
1129
1130 addr = gimplify_addr (&gsi, addr);
1131 if (code == BUILT_IN_TM_LOG)
1132 log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size);
1133 else
1134 log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
1135 gsi_insert_before (&gsi, log, GSI_SAME_STMT);
1136 }
1137
1138 /* Go through the log and instrument address that must be instrumented
1139 with the logging functions. Leave the save/restore addresses for
1140 later. */
1141 static void
1142 tm_log_emit (void)
1143 {
1144 htab_iterator hi;
1145 struct tm_log_entry *lp;
1146
1147 FOR_EACH_HTAB_ELEMENT (tm_log, lp, tm_log_entry_t, hi)
1148 {
1149 size_t i;
1150 gimple stmt;
1151
1152 if (dump_file)
1153 {
1154 fprintf (dump_file, "TM thread private mem logging: ");
1155 print_generic_expr (dump_file, lp->addr, 0);
1156 fprintf (dump_file, "\n");
1157 }
1158
1159 if (lp->save_var)
1160 {
1161 if (dump_file)
1162 fprintf (dump_file, "DUMPING to variable\n");
1163 continue;
1164 }
1165 else
1166 {
1167 if (dump_file)
1168 fprintf (dump_file, "DUMPING with logging functions\n");
1169 for (i = 0; VEC_iterate (gimple, lp->stmts, i, stmt); ++i)
1170 tm_log_emit_stmt (lp->addr, stmt);
1171 }
1172 }
1173 }
1174
1175 /* Emit the save sequence for the corresponding addresses in the log.
1176 ENTRY_BLOCK is the entry block for the transaction.
1177 BB is the basic block to insert the code in. */
1178 static void
1179 tm_log_emit_saves (basic_block entry_block, basic_block bb)
1180 {
1181 size_t i;
1182 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1183 gimple stmt;
1184 struct tm_log_entry l, *lp;
1185
1186 for (i = 0; i < VEC_length (tree, tm_log_save_addresses); ++i)
1187 {
1188 l.addr = VEC_index (tree, tm_log_save_addresses, i);
1189 lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
1190 gcc_assert (lp->save_var != NULL);
1191
1192 /* We only care about variables in the current transaction. */
1193 if (lp->entry_block != entry_block)
1194 continue;
1195
1196 stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
1197
1198 /* Make sure we can create an SSA_NAME for this type. For
1199 instance, aggregates aren't allowed, in which case the system
1200 will create a VOP for us and everything will just work. */
1201 if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
1202 {
1203 lp->save_var = make_ssa_name (lp->save_var, stmt);
1204 gimple_assign_set_lhs (stmt, lp->save_var);
1205 }
1206
1207 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1208 }
1209 }
1210
1211 /* Emit the restore sequence for the corresponding addresses in the log.
1212 ENTRY_BLOCK is the entry block for the transaction.
1213 BB is the basic block to insert the code in. */
1214 static void
1215 tm_log_emit_restores (basic_block entry_block, basic_block bb)
1216 {
1217 int i;
1218 struct tm_log_entry l, *lp;
1219 gimple_stmt_iterator gsi;
1220 gimple stmt;
1221
1222 for (i = VEC_length (tree, tm_log_save_addresses) - 1; i >= 0; i--)
1223 {
1224 l.addr = VEC_index (tree, tm_log_save_addresses, i);
1225 lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
1226 gcc_assert (lp->save_var != NULL);
1227
1228 /* We only care about variables in the current transaction. */
1229 if (lp->entry_block != entry_block)
1230 continue;
1231
1232 /* Restores are in LIFO order from the saves in case we have
1233 overlaps. */
1234 gsi = gsi_start_bb (bb);
1235
1236 stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
1237 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1238 }
1239 }
1240
1241 /* Emit the checks for performing either a save or a restore sequence.
1242
1243 TRXN_PROP is either A_SAVELIVEVARIABLES or A_RESTORELIVEVARIABLES.
1244
1245 The code sequence is inserted in a new basic block created in
1246 END_BB which is inserted between BEFORE_BB and the destination of
1247 FALLTHRU_EDGE.
1248
1249 STATUS is the return value from _ITM_beginTransaction.
1250 ENTRY_BLOCK is the entry block for the transaction.
1251 EMITF is a callback to emit the actual save/restore code.
1252
1253 The basic block containing the conditional checking for TRXN_PROP
1254 is returned. */
1255 static basic_block
1256 tm_log_emit_save_or_restores (basic_block entry_block,
1257 unsigned trxn_prop,
1258 tree status,
1259 void (*emitf)(basic_block, basic_block),
1260 basic_block before_bb,
1261 edge fallthru_edge,
1262 basic_block *end_bb)
1263 {
1264 basic_block cond_bb, code_bb;
1265 gimple cond_stmt, stmt;
1266 gimple_stmt_iterator gsi;
1267 tree t1, t2;
1268 int old_flags = fallthru_edge->flags;
1269
1270 cond_bb = create_empty_bb (before_bb);
1271 code_bb = create_empty_bb (cond_bb);
1272 *end_bb = create_empty_bb (code_bb);
1273 if (current_loops && before_bb->loop_father)
1274 {
1275 add_bb_to_loop (cond_bb, before_bb->loop_father);
1276 add_bb_to_loop (code_bb, before_bb->loop_father);
1277 add_bb_to_loop (*end_bb, before_bb->loop_father);
1278 }
1279 redirect_edge_pred (fallthru_edge, *end_bb);
1280 fallthru_edge->flags = EDGE_FALLTHRU;
1281 make_edge (before_bb, cond_bb, old_flags);
1282
1283 set_immediate_dominator (CDI_DOMINATORS, cond_bb, before_bb);
1284 set_immediate_dominator (CDI_DOMINATORS, code_bb, cond_bb);
1285
1286 gsi = gsi_last_bb (cond_bb);
1287
1288 /* t1 = status & A_{property}. */
1289 t1 = make_rename_temp (TREE_TYPE (status), NULL);
1290 t2 = build_int_cst (TREE_TYPE (status), trxn_prop);
1291 stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, status, t2);
1292 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1293
1294 /* if (t1). */
1295 t2 = build_int_cst (TREE_TYPE (status), 0);
1296 cond_stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
1297 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
1298
1299 emitf (entry_block, code_bb);
1300
1301 make_edge (cond_bb, code_bb, EDGE_TRUE_VALUE);
1302 make_edge (cond_bb, *end_bb, EDGE_FALSE_VALUE);
1303 make_edge (code_bb, *end_bb, EDGE_FALLTHRU);
1304
1305 return cond_bb;
1306 }
1307 \f
1308 static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
1309 struct walk_stmt_info *);
1310 static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
1311 struct walk_stmt_info *);
1312
1313 /* Evaluate an address X being dereferenced and determine if it
1314 originally points to a non aliased new chunk of memory (malloc,
1315 alloca, etc).
1316
1317 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1318 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1319 Return MEM_NON_LOCAL otherwise.
1320
1321 ENTRY_BLOCK is the entry block to the transaction containing the
1322 dereference of X. */
1323 static enum thread_memory_type
1324 thread_private_new_memory (basic_block entry_block, tree x)
1325 {
1326 gimple stmt = NULL;
1327 enum tree_code code;
1328 void **slot;
1329 tm_new_mem_map_t elt, *elt_p;
1330 tree val = x;
1331 enum thread_memory_type retval = mem_transaction_local;
1332
1333 if (!entry_block
1334 || TREE_CODE (x) != SSA_NAME
1335 /* Possible uninitialized use, or a function argument. In
1336 either case, we don't care. */
1337 || SSA_NAME_IS_DEFAULT_DEF (x))
1338 return mem_non_local;
1339
1340 /* Look in cache first. */
1341 elt.val = x;
1342 slot = htab_find_slot (tm_new_mem_hash, &elt, INSERT);
1343 elt_p = (tm_new_mem_map_t *) *slot;
1344 if (elt_p)
1345 return elt_p->local_new_memory;
1346
1347 /* Optimistically assume the memory is transaction local during
1348 processing. This catches recursion into this variable. */
1349 *slot = elt_p = XNEW (tm_new_mem_map_t);
1350 elt_p->val = val;
1351 elt_p->local_new_memory = mem_transaction_local;
1352
1353 /* Search DEF chain to find the original definition of this address. */
1354 do
1355 {
1356 if (ptr_deref_may_alias_global_p (x))
1357 {
1358 /* Address escapes. This is not thread-private. */
1359 retval = mem_non_local;
1360 goto new_memory_ret;
1361 }
1362
1363 stmt = SSA_NAME_DEF_STMT (x);
1364
1365 /* If the malloc call is outside the transaction, this is
1366 thread-local. */
1367 if (retval != mem_thread_local
1368 && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
1369 retval = mem_thread_local;
1370
1371 if (is_gimple_assign (stmt))
1372 {
1373 code = gimple_assign_rhs_code (stmt);
1374 /* x = foo ==> foo */
1375 if (code == SSA_NAME)
1376 x = gimple_assign_rhs1 (stmt);
1377 /* x = foo + n ==> foo */
1378 else if (code == POINTER_PLUS_EXPR)
1379 x = gimple_assign_rhs1 (stmt);
1380 /* x = (cast*) foo ==> foo */
1381 else if (code == VIEW_CONVERT_EXPR || code == NOP_EXPR)
1382 x = gimple_assign_rhs1 (stmt);
1383 else
1384 {
1385 retval = mem_non_local;
1386 goto new_memory_ret;
1387 }
1388 }
1389 else
1390 {
1391 if (gimple_code (stmt) == GIMPLE_PHI)
1392 {
1393 unsigned int i;
1394 enum thread_memory_type mem;
1395 tree phi_result = gimple_phi_result (stmt);
1396
1397 /* If any of the ancestors are non-local, we are sure to
1398 be non-local. Otherwise we can avoid doing anything
1399 and inherit what has already been generated. */
1400 retval = mem_max;
1401 for (i = 0; i < gimple_phi_num_args (stmt); ++i)
1402 {
1403 tree op = PHI_ARG_DEF (stmt, i);
1404
1405 /* Exclude self-assignment. */
1406 if (phi_result == op)
1407 continue;
1408
1409 mem = thread_private_new_memory (entry_block, op);
1410 if (mem == mem_non_local)
1411 {
1412 retval = mem;
1413 goto new_memory_ret;
1414 }
1415 retval = MIN (retval, mem);
1416 }
1417 goto new_memory_ret;
1418 }
1419 break;
1420 }
1421 }
1422 while (TREE_CODE (x) == SSA_NAME);
1423
1424 if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
1425 /* Thread-local or transaction-local. */
1426 ;
1427 else
1428 retval = mem_non_local;
1429
1430 new_memory_ret:
1431 elt_p->local_new_memory = retval;
1432 return retval;
1433 }
1434
1435 /* Determine whether X has to be instrumented using a read
1436 or write barrier.
1437
1438 ENTRY_BLOCK is the entry block for the region where stmt resides
1439 in. NULL if unknown.
1440
1441 STMT is the statement in which X occurs in. It is used for thread
1442 private memory instrumentation. If no TPM instrumentation is
1443 desired, STMT should be null. */
1444 static bool
1445 requires_barrier (basic_block entry_block, tree x, gimple stmt)
1446 {
1447 tree orig = x;
1448 while (handled_component_p (x))
1449 x = TREE_OPERAND (x, 0);
1450
1451 switch (TREE_CODE (x))
1452 {
1453 case INDIRECT_REF:
1454 case MEM_REF:
1455 {
1456 enum thread_memory_type ret;
1457
1458 ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
1459 if (ret == mem_non_local)
1460 return true;
1461 if (stmt && ret == mem_thread_local)
1462 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1463 tm_log_add (entry_block, orig, stmt);
1464
1465 /* Transaction-locals require nothing at all. For malloc, a
1466 transaction restart frees the memory and we reallocate.
1467 For alloca, the stack pointer gets reset by the retry and
1468 we reallocate. */
1469 return false;
1470 }
1471
1472 case TARGET_MEM_REF:
1473 if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
1474 return true;
1475 x = TREE_OPERAND (TMR_BASE (x), 0);
1476 if (TREE_CODE (x) == PARM_DECL)
1477 return false;
1478 gcc_assert (TREE_CODE (x) == VAR_DECL);
1479 /* FALLTHRU */
1480
1481 case PARM_DECL:
1482 case RESULT_DECL:
1483 case VAR_DECL:
1484 if (DECL_BY_REFERENCE (x))
1485 {
1486 /* ??? This value is a pointer, but aggregate_value_p has been
1487 jigged to return true which confuses needs_to_live_in_memory.
1488 This ought to be cleaned up generically.
1489
1490 FIXME: Verify this still happens after the next mainline
1491 merge. Testcase ie g++.dg/tm/pr47554.C.
1492 */
1493 return false;
1494 }
1495
1496 if (is_global_var (x))
1497 return !TREE_READONLY (x);
1498 if (/* FIXME: This condition should actually go below in the
1499 tm_log_add() call, however is_call_clobbered() depends on
1500 aliasing info which is not available during
1501 gimplification. Since requires_barrier() gets called
1502 during lower_sequence_tm/gimplification, leave the call
1503 to needs_to_live_in_memory until we eliminate
1504 lower_sequence_tm altogether. */
1505 needs_to_live_in_memory (x))
1506 return true;
1507 else
1508 {
1509 /* For local memory that doesn't escape (aka thread private
1510 memory), we can either save the value at the beginning of
1511 the transaction and restore on restart, or call a tm
1512 function to dynamically save and restore on restart
1513 (ITM_L*). */
1514 if (stmt)
1515 tm_log_add (entry_block, orig, stmt);
1516 return false;
1517 }
1518
1519 default:
1520 return false;
1521 }
1522 }
1523
1524 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1525 a transaction region. */
1526
1527 static void
1528 examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
1529 {
1530 gimple stmt = gsi_stmt (*gsi);
1531
1532 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
1533 *state |= GTMA_HAVE_LOAD;
1534 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
1535 *state |= GTMA_HAVE_STORE;
1536 }
1537
1538 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1539
1540 static void
1541 examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
1542 {
1543 gimple stmt = gsi_stmt (*gsi);
1544 tree fn;
1545
1546 if (is_tm_pure_call (stmt))
1547 return;
1548
1549 /* Check if this call is a transaction abort. */
1550 fn = gimple_call_fndecl (stmt);
1551 if (is_tm_abort (fn))
1552 *state |= GTMA_HAVE_ABORT;
1553
1554 /* Note that something may happen. */
1555 *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
1556 }
1557
1558 /* Lower a GIMPLE_TRANSACTION statement. */
1559
1560 static void
1561 lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
1562 {
1563 gimple g, stmt = gsi_stmt (*gsi);
1564 unsigned int *outer_state = (unsigned int *) wi->info;
1565 unsigned int this_state = 0;
1566 struct walk_stmt_info this_wi;
1567
1568 /* First, lower the body. The scanning that we do inside gives
1569 us some idea of what we're dealing with. */
1570 memset (&this_wi, 0, sizeof (this_wi));
1571 this_wi.info = (void *) &this_state;
1572 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
1573 lower_sequence_tm, NULL, &this_wi);
1574
1575 /* If there was absolutely nothing transaction related inside the
1576 transaction, we may elide it. Likewise if this is a nested
1577 transaction and does not contain an abort. */
1578 if (this_state == 0
1579 || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
1580 {
1581 if (outer_state)
1582 *outer_state |= this_state;
1583
1584 gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
1585 GSI_SAME_STMT);
1586 gimple_transaction_set_body (stmt, NULL);
1587
1588 gsi_remove (gsi, true);
1589 wi->removed_stmt = true;
1590 return;
1591 }
1592
1593 /* Wrap the body of the transaction in a try-finally node so that
1594 the commit call is always properly called. */
1595 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
1596 if (flag_exceptions)
1597 {
1598 tree ptr;
1599 gimple_seq n_seq, e_seq;
1600
1601 n_seq = gimple_seq_alloc_with_stmt (g);
1602 e_seq = NULL;
1603
1604 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1605 1, integer_zero_node);
1606 ptr = create_tmp_var (ptr_type_node, NULL);
1607 gimple_call_set_lhs (g, ptr);
1608 gimple_seq_add_stmt (&e_seq, g);
1609
1610 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1611 1, ptr);
1612 gimple_seq_add_stmt (&e_seq, g);
1613
1614 g = gimple_build_eh_else (n_seq, e_seq);
1615 }
1616
1617 g = gimple_build_try (gimple_transaction_body (stmt),
1618 gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
1619 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
1620
1621 gimple_transaction_set_body (stmt, NULL);
1622
1623 /* If the transaction calls abort or if this is an outer transaction,
1624 add an "over" label afterwards. */
1625 if ((this_state & (GTMA_HAVE_ABORT))
1626 || (gimple_transaction_subcode(stmt) & GTMA_IS_OUTER))
1627 {
1628 tree label = create_artificial_label (UNKNOWN_LOCATION);
1629 gimple_transaction_set_label (stmt, label);
1630 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
1631 }
1632
1633 /* Record the set of operations found for use later. */
1634 this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
1635 gimple_transaction_set_subcode (stmt, this_state);
1636 }
1637
1638 /* Iterate through the statements in the sequence, lowering them all
1639 as appropriate for being in a transaction. */
1640
1641 static tree
1642 lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1643 struct walk_stmt_info *wi)
1644 {
1645 unsigned int *state = (unsigned int *) wi->info;
1646 gimple stmt = gsi_stmt (*gsi);
1647
1648 *handled_ops_p = true;
1649 switch (gimple_code (stmt))
1650 {
1651 case GIMPLE_ASSIGN:
1652 /* Only memory reads/writes need to be instrumented. */
1653 if (gimple_assign_single_p (stmt))
1654 examine_assign_tm (state, gsi);
1655 break;
1656
1657 case GIMPLE_CALL:
1658 examine_call_tm (state, gsi);
1659 break;
1660
1661 case GIMPLE_ASM:
1662 *state |= GTMA_MAY_ENTER_IRREVOCABLE;
1663 break;
1664
1665 case GIMPLE_TRANSACTION:
1666 lower_transaction (gsi, wi);
1667 break;
1668
1669 default:
1670 *handled_ops_p = !gimple_has_substatements (stmt);
1671 break;
1672 }
1673
1674 return NULL_TREE;
1675 }
1676
1677 /* Iterate through the statements in the sequence, lowering them all
1678 as appropriate for being outside of a transaction. */
1679
1680 static tree
1681 lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1682 struct walk_stmt_info * wi)
1683 {
1684 gimple stmt = gsi_stmt (*gsi);
1685
1686 if (gimple_code (stmt) == GIMPLE_TRANSACTION)
1687 {
1688 *handled_ops_p = true;
1689 lower_transaction (gsi, wi);
1690 }
1691 else
1692 *handled_ops_p = !gimple_has_substatements (stmt);
1693
1694 return NULL_TREE;
1695 }
1696
1697 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1698 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1699 been moved out, and all the data required for constructing a proper
1700 CFG has been recorded. */
1701
1702 static unsigned int
1703 execute_lower_tm (void)
1704 {
1705 struct walk_stmt_info wi;
1706 gimple_seq body;
1707
1708 /* Transactional clones aren't created until a later pass. */
1709 gcc_assert (!decl_is_tm_clone (current_function_decl));
1710
1711 body = gimple_body (current_function_decl);
1712 memset (&wi, 0, sizeof (wi));
1713 walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
1714 gimple_set_body (current_function_decl, body);
1715
1716 return 0;
1717 }
1718
1719 struct gimple_opt_pass pass_lower_tm =
1720 {
1721 {
1722 GIMPLE_PASS,
1723 "tmlower", /* name */
1724 gate_tm, /* gate */
1725 execute_lower_tm, /* execute */
1726 NULL, /* sub */
1727 NULL, /* next */
1728 0, /* static_pass_number */
1729 TV_TRANS_MEM, /* tv_id */
1730 PROP_gimple_lcf, /* properties_required */
1731 0, /* properties_provided */
1732 0, /* properties_destroyed */
1733 0, /* todo_flags_start */
1734 0, /* todo_flags_finish */
1735 }
1736 };
1737 \f
1738 /* Collect region information for each transaction. */
1739
1740 struct tm_region
1741 {
1742 /* Link to the next unnested transaction. */
1743 struct tm_region *next;
1744
1745 /* Link to the next inner transaction. */
1746 struct tm_region *inner;
1747
1748 /* Link to the next outer transaction. */
1749 struct tm_region *outer;
1750
1751 /* The GIMPLE_TRANSACTION statement beginning this transaction. */
1752 gimple transaction_stmt;
1753
1754 /* The entry block to this region. */
1755 basic_block entry_block;
1756
1757 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1758 These blocks are still a part of the region (i.e., the border is
1759 inclusive). Note that this set is only complete for paths in the CFG
1760 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1761 the edge to the "over" label. */
1762 bitmap exit_blocks;
1763
1764 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1765 bitmap irr_blocks;
1766 };
1767
1768 typedef struct tm_region *tm_region_p;
1769 DEF_VEC_P (tm_region_p);
1770 DEF_VEC_ALLOC_P (tm_region_p, heap);
1771
1772 /* True if there are pending edge statements to be committed for the
1773 current function being scanned in the tmmark pass. */
1774 bool pending_edge_inserts_p;
1775
1776 static struct tm_region *all_tm_regions;
1777 static bitmap_obstack tm_obstack;
1778
1779
1780 /* A subroutine of tm_region_init. Record the existence of the
1781 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1782
1783 static struct tm_region *
1784 tm_region_init_0 (struct tm_region *outer, basic_block bb, gimple stmt)
1785 {
1786 struct tm_region *region;
1787
1788 region = (struct tm_region *)
1789 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1790
1791 if (outer)
1792 {
1793 region->next = outer->inner;
1794 outer->inner = region;
1795 }
1796 else
1797 {
1798 region->next = all_tm_regions;
1799 all_tm_regions = region;
1800 }
1801 region->inner = NULL;
1802 region->outer = outer;
1803
1804 region->transaction_stmt = stmt;
1805
1806 /* There are either one or two edges out of the block containing
1807 the GIMPLE_TRANSACTION, one to the actual region and one to the
1808 "over" label if the region contains an abort. The former will
1809 always be the one marked FALLTHRU. */
1810 region->entry_block = FALLTHRU_EDGE (bb)->dest;
1811
1812 region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
1813 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1814
1815 return region;
1816 }
1817
1818 /* A subroutine of tm_region_init. Record all the exit and
1819 irrevocable blocks in BB into the region's exit_blocks and
1820 irr_blocks bitmaps. Returns the new region being scanned. */
1821
1822 static struct tm_region *
1823 tm_region_init_1 (struct tm_region *region, basic_block bb)
1824 {
1825 gimple_stmt_iterator gsi;
1826 gimple g;
1827
1828 if (!region
1829 || (!region->irr_blocks && !region->exit_blocks))
1830 return region;
1831
1832 /* Check to see if this is the end of a region by seeing if it
1833 contains a call to __builtin_tm_commit{,_eh}. Note that the
1834 outermost region for DECL_IS_TM_CLONE need not collect this. */
1835 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1836 {
1837 g = gsi_stmt (gsi);
1838 if (gimple_code (g) == GIMPLE_CALL)
1839 {
1840 tree fn = gimple_call_fndecl (g);
1841 if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
1842 {
1843 if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
1844 || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
1845 && region->exit_blocks)
1846 {
1847 bitmap_set_bit (region->exit_blocks, bb->index);
1848 region = region->outer;
1849 break;
1850 }
1851 if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
1852 bitmap_set_bit (region->irr_blocks, bb->index);
1853 }
1854 }
1855 }
1856 return region;
1857 }
1858
1859 /* Collect all of the transaction regions within the current function
1860 and record them in ALL_TM_REGIONS. The REGION parameter may specify
1861 an "outermost" region for use by tm clones. */
1862
1863 static void
1864 tm_region_init (struct tm_region *region)
1865 {
1866 gimple g;
1867 edge_iterator ei;
1868 edge e;
1869 basic_block bb;
1870 VEC(basic_block, heap) *queue = NULL;
1871 bitmap visited_blocks = BITMAP_ALLOC (NULL);
1872 struct tm_region *old_region;
1873 VEC(tm_region_p, heap) *bb_regions = NULL;
1874
1875 all_tm_regions = region;
1876 bb = single_succ (ENTRY_BLOCK_PTR);
1877
1878 /* We could store this information in bb->aux, but we may get called
1879 through get_all_tm_blocks() from another pass that may be already
1880 using bb->aux. */
1881 VEC_safe_grow_cleared (tm_region_p, heap, bb_regions, last_basic_block);
1882
1883 VEC_safe_push (basic_block, heap, queue, bb);
1884 VEC_replace (tm_region_p, bb_regions, bb->index, region);
1885 do
1886 {
1887 bb = VEC_pop (basic_block, queue);
1888 region = VEC_index (tm_region_p, bb_regions, bb->index);
1889 VEC_replace (tm_region_p, bb_regions, bb->index, NULL);
1890
1891 /* Record exit and irrevocable blocks. */
1892 region = tm_region_init_1 (region, bb);
1893
1894 /* Check for the last statement in the block beginning a new region. */
1895 g = last_stmt (bb);
1896 old_region = region;
1897 if (g && gimple_code (g) == GIMPLE_TRANSACTION)
1898 region = tm_region_init_0 (region, bb, g);
1899
1900 /* Process subsequent blocks. */
1901 FOR_EACH_EDGE (e, ei, bb->succs)
1902 if (!bitmap_bit_p (visited_blocks, e->dest->index))
1903 {
1904 bitmap_set_bit (visited_blocks, e->dest->index);
1905 VEC_safe_push (basic_block, heap, queue, e->dest);
1906
1907 /* If the current block started a new region, make sure that only
1908 the entry block of the new region is associated with this region.
1909 Other successors are still part of the old region. */
1910 if (old_region != region && e->dest != region->entry_block)
1911 VEC_replace (tm_region_p, bb_regions, e->dest->index, old_region);
1912 else
1913 VEC_replace (tm_region_p, bb_regions, e->dest->index, region);
1914 }
1915 }
1916 while (!VEC_empty (basic_block, queue));
1917 VEC_free (basic_block, heap, queue);
1918 BITMAP_FREE (visited_blocks);
1919 VEC_free (tm_region_p, heap, bb_regions);
1920 }
1921
1922 /* The "gate" function for all transactional memory expansion and optimization
1923 passes. We collect region information for each top-level transaction, and
1924 if we don't find any, we skip all of the TM passes. Each region will have
1925 all of the exit blocks recorded, and the originating statement. */
1926
1927 static bool
1928 gate_tm_init (void)
1929 {
1930 if (!flag_tm)
1931 return false;
1932
1933 calculate_dominance_info (CDI_DOMINATORS);
1934 bitmap_obstack_initialize (&tm_obstack);
1935
1936 /* If the function is a TM_CLONE, then the entire function is the region. */
1937 if (decl_is_tm_clone (current_function_decl))
1938 {
1939 struct tm_region *region = (struct tm_region *)
1940 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1941 memset (region, 0, sizeof (*region));
1942 region->entry_block = single_succ (ENTRY_BLOCK_PTR);
1943 /* For a clone, the entire function is the region. But even if
1944 we don't need to record any exit blocks, we may need to
1945 record irrevocable blocks. */
1946 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1947
1948 tm_region_init (region);
1949 }
1950 else
1951 {
1952 tm_region_init (NULL);
1953
1954 /* If we didn't find any regions, cleanup and skip the whole tree
1955 of tm-related optimizations. */
1956 if (all_tm_regions == NULL)
1957 {
1958 bitmap_obstack_release (&tm_obstack);
1959 return false;
1960 }
1961 }
1962
1963 return true;
1964 }
1965
1966 struct gimple_opt_pass pass_tm_init =
1967 {
1968 {
1969 GIMPLE_PASS,
1970 "*tminit", /* name */
1971 gate_tm_init, /* gate */
1972 NULL, /* execute */
1973 NULL, /* sub */
1974 NULL, /* next */
1975 0, /* static_pass_number */
1976 TV_TRANS_MEM, /* tv_id */
1977 PROP_ssa | PROP_cfg, /* properties_required */
1978 0, /* properties_provided */
1979 0, /* properties_destroyed */
1980 0, /* todo_flags_start */
1981 0, /* todo_flags_finish */
1982 }
1983 };
1984 \f
1985 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
1986 represented by STATE. */
1987
1988 static inline void
1989 transaction_subcode_ior (struct tm_region *region, unsigned flags)
1990 {
1991 if (region && region->transaction_stmt)
1992 {
1993 flags |= gimple_transaction_subcode (region->transaction_stmt);
1994 gimple_transaction_set_subcode (region->transaction_stmt, flags);
1995 }
1996 }
1997
1998 /* Construct a memory load in a transactional context. Return the
1999 gimple statement performing the load, or NULL if there is no
2000 TM_LOAD builtin of the appropriate size to do the load.
2001
2002 LOC is the location to use for the new statement(s). */
2003
2004 static gimple
2005 build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2006 {
2007 enum built_in_function code = END_BUILTINS;
2008 tree t, type = TREE_TYPE (rhs), decl;
2009 gimple gcall;
2010
2011 if (type == float_type_node)
2012 code = BUILT_IN_TM_LOAD_FLOAT;
2013 else if (type == double_type_node)
2014 code = BUILT_IN_TM_LOAD_DOUBLE;
2015 else if (type == long_double_type_node)
2016 code = BUILT_IN_TM_LOAD_LDOUBLE;
2017 else if (TYPE_SIZE_UNIT (type) != NULL
2018 && host_integerp (TYPE_SIZE_UNIT (type), 1))
2019 {
2020 switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
2021 {
2022 case 1:
2023 code = BUILT_IN_TM_LOAD_1;
2024 break;
2025 case 2:
2026 code = BUILT_IN_TM_LOAD_2;
2027 break;
2028 case 4:
2029 code = BUILT_IN_TM_LOAD_4;
2030 break;
2031 case 8:
2032 code = BUILT_IN_TM_LOAD_8;
2033 break;
2034 }
2035 }
2036
2037 if (code == END_BUILTINS)
2038 {
2039 decl = targetm.vectorize.builtin_tm_load (type);
2040 if (!decl)
2041 return NULL;
2042 }
2043 else
2044 decl = builtin_decl_explicit (code);
2045
2046 t = gimplify_addr (gsi, rhs);
2047 gcall = gimple_build_call (decl, 1, t);
2048 gimple_set_location (gcall, loc);
2049
2050 t = TREE_TYPE (TREE_TYPE (decl));
2051 if (useless_type_conversion_p (type, t))
2052 {
2053 gimple_call_set_lhs (gcall, lhs);
2054 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2055 }
2056 else
2057 {
2058 gimple g;
2059 tree temp;
2060
2061 temp = make_rename_temp (t, NULL);
2062 gimple_call_set_lhs (gcall, temp);
2063 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2064
2065 t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
2066 g = gimple_build_assign (lhs, t);
2067 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2068 }
2069
2070 return gcall;
2071 }
2072
2073
2074 /* Similarly for storing TYPE in a transactional context. */
2075
2076 static gimple
2077 build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2078 {
2079 enum built_in_function code = END_BUILTINS;
2080 tree t, fn, type = TREE_TYPE (rhs), simple_type;
2081 gimple gcall;
2082
2083 if (type == float_type_node)
2084 code = BUILT_IN_TM_STORE_FLOAT;
2085 else if (type == double_type_node)
2086 code = BUILT_IN_TM_STORE_DOUBLE;
2087 else if (type == long_double_type_node)
2088 code = BUILT_IN_TM_STORE_LDOUBLE;
2089 else if (TYPE_SIZE_UNIT (type) != NULL
2090 && host_integerp (TYPE_SIZE_UNIT (type), 1))
2091 {
2092 switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
2093 {
2094 case 1:
2095 code = BUILT_IN_TM_STORE_1;
2096 break;
2097 case 2:
2098 code = BUILT_IN_TM_STORE_2;
2099 break;
2100 case 4:
2101 code = BUILT_IN_TM_STORE_4;
2102 break;
2103 case 8:
2104 code = BUILT_IN_TM_STORE_8;
2105 break;
2106 }
2107 }
2108
2109 if (code == END_BUILTINS)
2110 {
2111 fn = targetm.vectorize.builtin_tm_store (type);
2112 if (!fn)
2113 return NULL;
2114 }
2115 else
2116 fn = builtin_decl_explicit (code);
2117
2118 simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
2119
2120 if (TREE_CODE (rhs) == CONSTRUCTOR)
2121 {
2122 /* Handle the easy initialization to zero. */
2123 if (CONSTRUCTOR_ELTS (rhs) == 0)
2124 rhs = build_int_cst (simple_type, 0);
2125 else
2126 {
2127 /* ...otherwise punt to the caller and probably use
2128 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2129 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2130 valid gimple. */
2131 return NULL;
2132 }
2133 }
2134 else if (!useless_type_conversion_p (simple_type, type))
2135 {
2136 gimple g;
2137 tree temp;
2138
2139 temp = make_rename_temp (simple_type, NULL);
2140 t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
2141 g = gimple_build_assign (temp, t);
2142 gimple_set_location (g, loc);
2143 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2144
2145 rhs = temp;
2146 }
2147
2148 t = gimplify_addr (gsi, lhs);
2149 gcall = gimple_build_call (fn, 2, t, rhs);
2150 gimple_set_location (gcall, loc);
2151 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2152
2153 return gcall;
2154 }
2155
2156
2157 /* Expand an assignment statement into transactional builtins. */
2158
2159 static void
2160 expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
2161 {
2162 gimple stmt = gsi_stmt (*gsi);
2163 location_t loc = gimple_location (stmt);
2164 tree lhs = gimple_assign_lhs (stmt);
2165 tree rhs = gimple_assign_rhs1 (stmt);
2166 bool store_p = requires_barrier (region->entry_block, lhs, NULL);
2167 bool load_p = requires_barrier (region->entry_block, rhs, NULL);
2168 gimple gcall = NULL;
2169
2170 if (!load_p && !store_p)
2171 {
2172 /* Add thread private addresses to log if applicable. */
2173 requires_barrier (region->entry_block, lhs, stmt);
2174 gsi_next (gsi);
2175 return;
2176 }
2177
2178 gsi_remove (gsi, true);
2179
2180 if (load_p && !store_p)
2181 {
2182 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2183 gcall = build_tm_load (loc, lhs, rhs, gsi);
2184 }
2185 else if (store_p && !load_p)
2186 {
2187 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2188 gcall = build_tm_store (loc, lhs, rhs, gsi);
2189 }
2190 if (!gcall)
2191 {
2192 tree lhs_addr, rhs_addr, tmp;
2193
2194 if (load_p)
2195 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2196 if (store_p)
2197 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2198
2199 /* ??? Figure out if there's any possible overlap between the LHS
2200 and the RHS and if not, use MEMCPY. */
2201
2202 if (load_p && is_gimple_reg (lhs))
2203 {
2204 tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
2205 lhs_addr = build_fold_addr_expr (tmp);
2206 }
2207 else
2208 {
2209 tmp = NULL_TREE;
2210 lhs_addr = gimplify_addr (gsi, lhs);
2211 }
2212 rhs_addr = gimplify_addr (gsi, rhs);
2213 gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
2214 3, lhs_addr, rhs_addr,
2215 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
2216 gimple_set_location (gcall, loc);
2217 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2218
2219 if (tmp)
2220 {
2221 gcall = gimple_build_assign (lhs, tmp);
2222 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2223 }
2224 }
2225
2226 /* Now that we have the load/store in its instrumented form, add
2227 thread private addresses to the log if applicable. */
2228 if (!store_p)
2229 requires_barrier (region->entry_block, lhs, gcall);
2230
2231 /* add_stmt_to_tm_region (region, gcall); */
2232 }
2233
2234
2235 /* Expand a call statement as appropriate for a transaction. That is,
2236 either verify that the call does not affect the transaction, or
2237 redirect the call to a clone that handles transactions, or change
2238 the transaction state to IRREVOCABLE. Return true if the call is
2239 one of the builtins that end a transaction. */
2240
2241 static bool
2242 expand_call_tm (struct tm_region *region,
2243 gimple_stmt_iterator *gsi)
2244 {
2245 gimple stmt = gsi_stmt (*gsi);
2246 tree lhs = gimple_call_lhs (stmt);
2247 tree fn_decl;
2248 struct cgraph_node *node;
2249 bool retval = false;
2250
2251 fn_decl = gimple_call_fndecl (stmt);
2252
2253 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
2254 || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
2255 transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
2256 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
2257 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2258
2259 if (is_tm_pure_call (stmt))
2260 return false;
2261
2262 if (fn_decl)
2263 retval = is_tm_ending_fndecl (fn_decl);
2264 if (!retval)
2265 {
2266 /* Assume all non-const/pure calls write to memory, except
2267 transaction ending builtins. */
2268 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2269 }
2270
2271 /* For indirect calls, we already generated a call into the runtime. */
2272 if (!fn_decl)
2273 {
2274 tree fn = gimple_call_fn (stmt);
2275
2276 /* We are guaranteed never to go irrevocable on a safe or pure
2277 call, and the pure call was handled above. */
2278 if (is_tm_safe (fn))
2279 return false;
2280 else
2281 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2282
2283 return false;
2284 }
2285
2286 node = cgraph_get_node (fn_decl);
2287 /* All calls should have cgraph here. */
2288 gcc_assert (node);
2289 if (node->local.tm_may_enter_irr)
2290 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2291
2292 if (is_tm_abort (fn_decl))
2293 {
2294 transaction_subcode_ior (region, GTMA_HAVE_ABORT);
2295 return true;
2296 }
2297
2298 /* Instrument the store if needed.
2299
2300 If the assignment happens inside the function call (return slot
2301 optimization), there is no instrumentation to be done, since
2302 the callee should have done the right thing. */
2303 if (lhs && requires_barrier (region->entry_block, lhs, stmt)
2304 && !gimple_call_return_slot_opt_p (stmt))
2305 {
2306 tree tmp = make_rename_temp (TREE_TYPE (lhs), NULL);
2307 location_t loc = gimple_location (stmt);
2308 edge fallthru_edge = NULL;
2309
2310 /* Remember if the call was going to throw. */
2311 if (stmt_can_throw_internal (stmt))
2312 {
2313 edge_iterator ei;
2314 edge e;
2315 basic_block bb = gimple_bb (stmt);
2316
2317 FOR_EACH_EDGE (e, ei, bb->succs)
2318 if (e->flags & EDGE_FALLTHRU)
2319 {
2320 fallthru_edge = e;
2321 break;
2322 }
2323 }
2324
2325 gimple_call_set_lhs (stmt, tmp);
2326 update_stmt (stmt);
2327 stmt = gimple_build_assign (lhs, tmp);
2328 gimple_set_location (stmt, loc);
2329
2330 /* We cannot throw in the middle of a BB. If the call was going
2331 to throw, place the instrumentation on the fallthru edge, so
2332 the call remains the last statement in the block. */
2333 if (fallthru_edge)
2334 {
2335 gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (stmt);
2336 gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
2337 expand_assign_tm (region, &fallthru_gsi);
2338 gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
2339 pending_edge_inserts_p = true;
2340 }
2341 else
2342 {
2343 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
2344 expand_assign_tm (region, gsi);
2345 }
2346
2347 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2348 }
2349
2350 return retval;
2351 }
2352
2353
2354 /* Expand all statements in BB as appropriate for being inside
2355 a transaction. */
2356
2357 static void
2358 expand_block_tm (struct tm_region *region, basic_block bb)
2359 {
2360 gimple_stmt_iterator gsi;
2361
2362 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2363 {
2364 gimple stmt = gsi_stmt (gsi);
2365 switch (gimple_code (stmt))
2366 {
2367 case GIMPLE_ASSIGN:
2368 /* Only memory reads/writes need to be instrumented. */
2369 if (gimple_assign_single_p (stmt)
2370 && !gimple_clobber_p (stmt))
2371 {
2372 expand_assign_tm (region, &gsi);
2373 continue;
2374 }
2375 break;
2376
2377 case GIMPLE_CALL:
2378 if (expand_call_tm (region, &gsi))
2379 return;
2380 break;
2381
2382 case GIMPLE_ASM:
2383 gcc_unreachable ();
2384
2385 default:
2386 break;
2387 }
2388 if (!gsi_end_p (gsi))
2389 gsi_next (&gsi);
2390 }
2391 }
2392
2393 /* Return the list of basic-blocks in REGION.
2394
2395 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2396 following a TM_IRREVOCABLE call. */
2397
2398 static VEC (basic_block, heap) *
2399 get_tm_region_blocks (basic_block entry_block,
2400 bitmap exit_blocks,
2401 bitmap irr_blocks,
2402 bitmap all_region_blocks,
2403 bool stop_at_irrevocable_p)
2404 {
2405 VEC(basic_block, heap) *bbs = NULL;
2406 unsigned i;
2407 edge e;
2408 edge_iterator ei;
2409 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2410
2411 i = 0;
2412 VEC_safe_push (basic_block, heap, bbs, entry_block);
2413 bitmap_set_bit (visited_blocks, entry_block->index);
2414
2415 do
2416 {
2417 basic_block bb = VEC_index (basic_block, bbs, i++);
2418
2419 if (exit_blocks &&
2420 bitmap_bit_p (exit_blocks, bb->index))
2421 continue;
2422
2423 if (stop_at_irrevocable_p
2424 && irr_blocks
2425 && bitmap_bit_p (irr_blocks, bb->index))
2426 continue;
2427
2428 FOR_EACH_EDGE (e, ei, bb->succs)
2429 if (!bitmap_bit_p (visited_blocks, e->dest->index))
2430 {
2431 bitmap_set_bit (visited_blocks, e->dest->index);
2432 VEC_safe_push (basic_block, heap, bbs, e->dest);
2433 }
2434 }
2435 while (i < VEC_length (basic_block, bbs));
2436
2437 if (all_region_blocks)
2438 bitmap_ior_into (all_region_blocks, visited_blocks);
2439
2440 BITMAP_FREE (visited_blocks);
2441 return bbs;
2442 }
2443
2444 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2445 transaction. */
2446
2447 void
2448 compute_transaction_bits (void)
2449 {
2450 struct tm_region *region;
2451 VEC (basic_block, heap) *queue;
2452 unsigned int i;
2453 basic_block bb;
2454
2455 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2456 certainly don't need it to calculate CDI_DOMINATOR info. */
2457 gate_tm_init ();
2458
2459 FOR_EACH_BB (bb)
2460 bb->flags &= ~BB_IN_TRANSACTION;
2461
2462 for (region = all_tm_regions; region; region = region->next)
2463 {
2464 queue = get_tm_region_blocks (region->entry_block,
2465 region->exit_blocks,
2466 region->irr_blocks,
2467 NULL,
2468 /*stop_at_irr_p=*/true);
2469 for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
2470 bb->flags |= BB_IN_TRANSACTION;
2471 VEC_free (basic_block, heap, queue);
2472 }
2473
2474 if (all_tm_regions)
2475 bitmap_obstack_release (&tm_obstack);
2476 }
2477
2478 /* Entry point to the MARK phase of TM expansion. Here we replace
2479 transactional memory statements with calls to builtins, and function
2480 calls with their transactional clones (if available). But we don't
2481 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
2482
2483 static unsigned int
2484 execute_tm_mark (void)
2485 {
2486 struct tm_region *region;
2487 basic_block bb;
2488 VEC (basic_block, heap) *queue;
2489 size_t i;
2490
2491 queue = VEC_alloc (basic_block, heap, 10);
2492 pending_edge_inserts_p = false;
2493
2494 for (region = all_tm_regions; region ; region = region->next)
2495 {
2496 tm_log_init ();
2497 /* If we have a transaction... */
2498 if (region->exit_blocks)
2499 {
2500 unsigned int subcode
2501 = gimple_transaction_subcode (region->transaction_stmt);
2502
2503 /* Collect a new SUBCODE set, now that optimizations are done... */
2504 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2505 subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
2506 | GTMA_MAY_ENTER_IRREVOCABLE);
2507 else
2508 subcode &= GTMA_DECLARATION_MASK;
2509 gimple_transaction_set_subcode (region->transaction_stmt, subcode);
2510 }
2511
2512 queue = get_tm_region_blocks (region->entry_block,
2513 region->exit_blocks,
2514 region->irr_blocks,
2515 NULL,
2516 /*stop_at_irr_p=*/true);
2517 for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
2518 expand_block_tm (region, bb);
2519 VEC_free (basic_block, heap, queue);
2520
2521 tm_log_emit ();
2522 }
2523
2524 if (pending_edge_inserts_p)
2525 gsi_commit_edge_inserts ();
2526 return 0;
2527 }
2528
2529 struct gimple_opt_pass pass_tm_mark =
2530 {
2531 {
2532 GIMPLE_PASS,
2533 "tmmark", /* name */
2534 NULL, /* gate */
2535 execute_tm_mark, /* execute */
2536 NULL, /* sub */
2537 NULL, /* next */
2538 0, /* static_pass_number */
2539 TV_TRANS_MEM, /* tv_id */
2540 PROP_ssa | PROP_cfg, /* properties_required */
2541 0, /* properties_provided */
2542 0, /* properties_destroyed */
2543 0, /* todo_flags_start */
2544 TODO_update_ssa
2545 | TODO_verify_ssa, /* todo_flags_finish */
2546 }
2547 };
2548 \f
2549 /* Create an abnormal call edge from BB to the first block of the region
2550 represented by STATE. Also record the edge in the TM_RESTART map. */
2551
2552 static inline void
2553 make_tm_edge (gimple stmt, basic_block bb, struct tm_region *region)
2554 {
2555 void **slot;
2556 struct tm_restart_node *n, dummy;
2557
2558 if (cfun->gimple_df->tm_restart == NULL)
2559 cfun->gimple_df->tm_restart = htab_create_ggc (31, struct_ptr_hash,
2560 struct_ptr_eq, ggc_free);
2561
2562 dummy.stmt = stmt;
2563 dummy.label_or_list = gimple_block_label (region->entry_block);
2564 slot = htab_find_slot (cfun->gimple_df->tm_restart, &dummy, INSERT);
2565 n = (struct tm_restart_node *) *slot;
2566 if (n == NULL)
2567 {
2568 n = ggc_alloc_tm_restart_node ();
2569 *n = dummy;
2570 }
2571 else
2572 {
2573 tree old = n->label_or_list;
2574 if (TREE_CODE (old) == LABEL_DECL)
2575 old = tree_cons (NULL, old, NULL);
2576 n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
2577 }
2578
2579 make_edge (bb, region->entry_block, EDGE_ABNORMAL);
2580 }
2581
2582
2583 /* Split block BB as necessary for every builtin function we added, and
2584 wire up the abnormal back edges implied by the transaction restart. */
2585
2586 static void
2587 expand_block_edges (struct tm_region *region, basic_block bb)
2588 {
2589 gimple_stmt_iterator gsi;
2590
2591 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2592 {
2593 bool do_next = true;
2594 gimple stmt = gsi_stmt (gsi);
2595
2596 /* ??? TM_COMMIT (and any other tm builtin function) in a nested
2597 transaction has an abnormal edge back to the outer-most transaction
2598 (there are no nested retries), while a TM_ABORT also has an abnormal
2599 backedge to the inner-most transaction. We haven't actually saved
2600 the inner-most transaction here. We should be able to get to it
2601 via the region_nr saved on STMT, and read the transaction_stmt from
2602 that, and find the first region block from there. */
2603 /* ??? Shouldn't we split for any non-pure, non-irrevocable function? */
2604 if (gimple_code (stmt) == GIMPLE_CALL
2605 && (gimple_call_flags (stmt) & ECF_TM_BUILTIN) != 0)
2606 {
2607 if (gsi_one_before_end_p (gsi))
2608 make_tm_edge (stmt, bb, region);
2609 else
2610 {
2611 edge e = split_block (bb, stmt);
2612 make_tm_edge (stmt, bb, region);
2613 bb = e->dest;
2614 gsi = gsi_start_bb (bb);
2615 do_next = false;
2616 }
2617
2618 /* Delete any tail-call annotation that may have been added.
2619 The tail-call pass may have mis-identified the commit as being
2620 a candidate because we had not yet added this restart edge. */
2621 gimple_call_set_tail (stmt, false);
2622 }
2623
2624 if (do_next)
2625 gsi_next (&gsi);
2626 }
2627 }
2628
2629 /* Expand the GIMPLE_TRANSACTION statement into the STM library call. */
2630
2631 static void
2632 expand_transaction (struct tm_region *region)
2633 {
2634 tree status, tm_start;
2635 basic_block atomic_bb, slice_bb;
2636 gimple_stmt_iterator gsi;
2637 tree t1, t2;
2638 gimple g;
2639 int flags, subcode;
2640
2641 tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2642 status = make_rename_temp (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
2643
2644 /* ??? There are plenty of bits here we're not computing. */
2645 subcode = gimple_transaction_subcode (region->transaction_stmt);
2646 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2647 flags = PR_DOESGOIRREVOCABLE | PR_UNINSTRUMENTEDCODE;
2648 else
2649 flags = PR_INSTRUMENTEDCODE;
2650 if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
2651 flags |= PR_HASNOIRREVOCABLE;
2652 /* If the transaction does not have an abort in lexical scope and is not
2653 marked as an outer transaction, then it will never abort. */
2654 if ((subcode & GTMA_HAVE_ABORT) == 0
2655 && (subcode & GTMA_IS_OUTER) == 0)
2656 flags |= PR_HASNOABORT;
2657 if ((subcode & GTMA_HAVE_STORE) == 0)
2658 flags |= PR_READONLY;
2659 t2 = build_int_cst (TREE_TYPE (status), flags);
2660 g = gimple_build_call (tm_start, 1, t2);
2661 gimple_call_set_lhs (g, status);
2662 gimple_set_location (g, gimple_location (region->transaction_stmt));
2663
2664 atomic_bb = gimple_bb (region->transaction_stmt);
2665
2666 if (!VEC_empty (tree, tm_log_save_addresses))
2667 tm_log_emit_saves (region->entry_block, atomic_bb);
2668
2669 gsi = gsi_last_bb (atomic_bb);
2670 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2671 gsi_remove (&gsi, true);
2672
2673 if (!VEC_empty (tree, tm_log_save_addresses))
2674 region->entry_block =
2675 tm_log_emit_save_or_restores (region->entry_block,
2676 A_RESTORELIVEVARIABLES,
2677 status,
2678 tm_log_emit_restores,
2679 atomic_bb,
2680 FALLTHRU_EDGE (atomic_bb),
2681 &slice_bb);
2682 else
2683 slice_bb = atomic_bb;
2684
2685 /* If we have an ABORT statement, create a test following the start
2686 call to perform the abort. */
2687 if (gimple_transaction_label (region->transaction_stmt))
2688 {
2689 edge e;
2690 basic_block test_bb;
2691
2692 test_bb = create_empty_bb (slice_bb);
2693 if (current_loops && slice_bb->loop_father)
2694 add_bb_to_loop (test_bb, slice_bb->loop_father);
2695 if (VEC_empty (tree, tm_log_save_addresses))
2696 region->entry_block = test_bb;
2697 gsi = gsi_last_bb (test_bb);
2698
2699 t1 = make_rename_temp (TREE_TYPE (status), NULL);
2700 t2 = build_int_cst (TREE_TYPE (status), A_ABORTTRANSACTION);
2701 g = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, status, t2);
2702 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
2703
2704 t2 = build_int_cst (TREE_TYPE (status), 0);
2705 g = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2706 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
2707
2708 e = FALLTHRU_EDGE (slice_bb);
2709 redirect_edge_pred (e, test_bb);
2710 e->flags = EDGE_FALSE_VALUE;
2711 e->probability = PROB_ALWAYS - PROB_VERY_UNLIKELY;
2712
2713 e = BRANCH_EDGE (atomic_bb);
2714 redirect_edge_pred (e, test_bb);
2715 e->flags = EDGE_TRUE_VALUE;
2716 e->probability = PROB_VERY_UNLIKELY;
2717
2718 e = make_edge (slice_bb, test_bb, EDGE_FALLTHRU);
2719 }
2720
2721 /* If we've no abort, but we do have PHIs at the beginning of the atomic
2722 region, that means we've a loop at the beginning of the atomic region
2723 that shares the first block. This can cause problems with the abnormal
2724 edges we're about to add for the transaction restart. Solve this by
2725 adding a new empty block to receive the abnormal edges. */
2726 else if (phi_nodes (region->entry_block))
2727 {
2728 edge e;
2729 basic_block empty_bb;
2730
2731 region->entry_block = empty_bb = create_empty_bb (atomic_bb);
2732 if (current_loops && atomic_bb->loop_father)
2733 add_bb_to_loop (empty_bb, atomic_bb->loop_father);
2734
2735 e = FALLTHRU_EDGE (atomic_bb);
2736 redirect_edge_pred (e, empty_bb);
2737
2738 e = make_edge (atomic_bb, empty_bb, EDGE_FALLTHRU);
2739 }
2740
2741 /* The GIMPLE_TRANSACTION statement no longer exists. */
2742 region->transaction_stmt = NULL;
2743 }
2744
2745 static void expand_regions (struct tm_region *);
2746
2747 /* Helper function for expand_regions. Expand REGION and recurse to
2748 the inner region. */
2749
2750 static void
2751 expand_regions_1 (struct tm_region *region)
2752 {
2753 if (region->exit_blocks)
2754 {
2755 unsigned int i;
2756 basic_block bb;
2757 VEC (basic_block, heap) *queue;
2758
2759 /* Collect the set of blocks in this region. Do this before
2760 splitting edges, so that we don't have to play with the
2761 dominator tree in the middle. */
2762 queue = get_tm_region_blocks (region->entry_block,
2763 region->exit_blocks,
2764 region->irr_blocks,
2765 NULL,
2766 /*stop_at_irr_p=*/false);
2767 expand_transaction (region);
2768 for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
2769 expand_block_edges (region, bb);
2770 VEC_free (basic_block, heap, queue);
2771 }
2772 if (region->inner)
2773 expand_regions (region->inner);
2774 }
2775
2776 /* Expand regions starting at REGION. */
2777
2778 static void
2779 expand_regions (struct tm_region *region)
2780 {
2781 while (region)
2782 {
2783 expand_regions_1 (region);
2784 region = region->next;
2785 }
2786 }
2787
2788 /* Entry point to the final expansion of transactional nodes. */
2789
2790 static unsigned int
2791 execute_tm_edges (void)
2792 {
2793 expand_regions (all_tm_regions);
2794 tm_log_delete ();
2795
2796 /* We've got to release the dominance info now, to indicate that it
2797 must be rebuilt completely. Otherwise we'll crash trying to update
2798 the SSA web in the TODO section following this pass. */
2799 free_dominance_info (CDI_DOMINATORS);
2800 bitmap_obstack_release (&tm_obstack);
2801 all_tm_regions = NULL;
2802
2803 return 0;
2804 }
2805
2806 struct gimple_opt_pass pass_tm_edges =
2807 {
2808 {
2809 GIMPLE_PASS,
2810 "tmedge", /* name */
2811 NULL, /* gate */
2812 execute_tm_edges, /* execute */
2813 NULL, /* sub */
2814 NULL, /* next */
2815 0, /* static_pass_number */
2816 TV_TRANS_MEM, /* tv_id */
2817 PROP_ssa | PROP_cfg, /* properties_required */
2818 0, /* properties_provided */
2819 0, /* properties_destroyed */
2820 0, /* todo_flags_start */
2821 TODO_update_ssa
2822 | TODO_verify_ssa, /* todo_flags_finish */
2823 }
2824 };
2825 \f
2826 /* A unique TM memory operation. */
2827 typedef struct tm_memop
2828 {
2829 /* Unique ID that all memory operations to the same location have. */
2830 unsigned int value_id;
2831 /* Address of load/store. */
2832 tree addr;
2833 } *tm_memop_t;
2834
2835 /* Sets for solving data flow equations in the memory optimization pass. */
2836 struct tm_memopt_bitmaps
2837 {
2838 /* Stores available to this BB upon entry. Basically, stores that
2839 dominate this BB. */
2840 bitmap store_avail_in;
2841 /* Stores available at the end of this BB. */
2842 bitmap store_avail_out;
2843 bitmap store_antic_in;
2844 bitmap store_antic_out;
2845 /* Reads available to this BB upon entry. Basically, reads that
2846 dominate this BB. */
2847 bitmap read_avail_in;
2848 /* Reads available at the end of this BB. */
2849 bitmap read_avail_out;
2850 /* Reads performed in this BB. */
2851 bitmap read_local;
2852 /* Writes performed in this BB. */
2853 bitmap store_local;
2854
2855 /* Temporary storage for pass. */
2856 /* Is the current BB in the worklist? */
2857 bool avail_in_worklist_p;
2858 /* Have we visited this BB? */
2859 bool visited_p;
2860 };
2861
2862 static bitmap_obstack tm_memopt_obstack;
2863
2864 /* Unique counter for TM loads and stores. Loads and stores of the
2865 same address get the same ID. */
2866 static unsigned int tm_memopt_value_id;
2867 static htab_t tm_memopt_value_numbers;
2868
2869 #define STORE_AVAIL_IN(BB) \
2870 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
2871 #define STORE_AVAIL_OUT(BB) \
2872 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
2873 #define STORE_ANTIC_IN(BB) \
2874 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
2875 #define STORE_ANTIC_OUT(BB) \
2876 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
2877 #define READ_AVAIL_IN(BB) \
2878 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
2879 #define READ_AVAIL_OUT(BB) \
2880 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
2881 #define READ_LOCAL(BB) \
2882 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
2883 #define STORE_LOCAL(BB) \
2884 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
2885 #define AVAIL_IN_WORKLIST_P(BB) \
2886 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
2887 #define BB_VISITED_P(BB) \
2888 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
2889
2890 /* Htab support. Return a hash value for a `tm_memop'. */
2891 static hashval_t
2892 tm_memop_hash (const void *p)
2893 {
2894 const struct tm_memop *mem = (const struct tm_memop *) p;
2895 tree addr = mem->addr;
2896 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
2897 actually done with operand_equal_p (see tm_memop_eq). */
2898 if (TREE_CODE (addr) == ADDR_EXPR)
2899 addr = TREE_OPERAND (addr, 0);
2900 return iterative_hash_expr (addr, 0);
2901 }
2902
2903 /* Htab support. Return true if two tm_memop's are the same. */
2904 static int
2905 tm_memop_eq (const void *p1, const void *p2)
2906 {
2907 const struct tm_memop *mem1 = (const struct tm_memop *) p1;
2908 const struct tm_memop *mem2 = (const struct tm_memop *) p2;
2909
2910 return operand_equal_p (mem1->addr, mem2->addr, 0);
2911 }
2912
2913 /* Given a TM load/store in STMT, return the value number for the address
2914 it accesses. */
2915
2916 static unsigned int
2917 tm_memopt_value_number (gimple stmt, enum insert_option op)
2918 {
2919 struct tm_memop tmpmem, *mem;
2920 void **slot;
2921
2922 gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
2923 tmpmem.addr = gimple_call_arg (stmt, 0);
2924 slot = htab_find_slot (tm_memopt_value_numbers, &tmpmem, op);
2925 if (*slot)
2926 mem = (struct tm_memop *) *slot;
2927 else if (op == INSERT)
2928 {
2929 mem = XNEW (struct tm_memop);
2930 *slot = mem;
2931 mem->value_id = tm_memopt_value_id++;
2932 mem->addr = tmpmem.addr;
2933 }
2934 else
2935 gcc_unreachable ();
2936 return mem->value_id;
2937 }
2938
2939 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
2940
2941 static void
2942 tm_memopt_accumulate_memops (basic_block bb)
2943 {
2944 gimple_stmt_iterator gsi;
2945
2946 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2947 {
2948 gimple stmt = gsi_stmt (gsi);
2949 bitmap bits;
2950 unsigned int loc;
2951
2952 if (is_tm_store (stmt))
2953 bits = STORE_LOCAL (bb);
2954 else if (is_tm_load (stmt))
2955 bits = READ_LOCAL (bb);
2956 else
2957 continue;
2958
2959 loc = tm_memopt_value_number (stmt, INSERT);
2960 bitmap_set_bit (bits, loc);
2961 if (dump_file)
2962 {
2963 fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
2964 is_tm_load (stmt) ? "LOAD" : "STORE", loc,
2965 gimple_bb (stmt)->index);
2966 print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
2967 fprintf (dump_file, "\n");
2968 }
2969 }
2970 }
2971
2972 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
2973
2974 static void
2975 dump_tm_memopt_set (const char *set_name, bitmap bits)
2976 {
2977 unsigned i;
2978 bitmap_iterator bi;
2979 const char *comma = "";
2980
2981 fprintf (dump_file, "TM memopt: %s: [", set_name);
2982 EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
2983 {
2984 htab_iterator hi;
2985 struct tm_memop *mem;
2986
2987 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
2988 FOR_EACH_HTAB_ELEMENT (tm_memopt_value_numbers, mem, tm_memop_t, hi)
2989 if (mem->value_id == i)
2990 break;
2991 gcc_assert (mem->value_id == i);
2992 fprintf (dump_file, "%s", comma);
2993 comma = ", ";
2994 print_generic_expr (dump_file, mem->addr, 0);
2995 }
2996 fprintf (dump_file, "]\n");
2997 }
2998
2999 /* Prettily dump all of the memopt sets in BLOCKS. */
3000
3001 static void
3002 dump_tm_memopt_sets (VEC (basic_block, heap) *blocks)
3003 {
3004 size_t i;
3005 basic_block bb;
3006
3007 for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3008 {
3009 fprintf (dump_file, "------------BB %d---------\n", bb->index);
3010 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
3011 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
3012 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
3013 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
3014 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
3015 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
3016 }
3017 }
3018
3019 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3020
3021 static void
3022 tm_memopt_compute_avin (basic_block bb)
3023 {
3024 edge e;
3025 unsigned ix;
3026
3027 /* Seed with the AVOUT of any predecessor. */
3028 for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
3029 {
3030 e = EDGE_PRED (bb, ix);
3031 /* Make sure we have already visited this BB, and is thus
3032 initialized.
3033
3034 If e->src->aux is NULL, this predecessor is actually on an
3035 enclosing transaction. We only care about the current
3036 transaction, so ignore it. */
3037 if (e->src->aux && BB_VISITED_P (e->src))
3038 {
3039 bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3040 bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3041 break;
3042 }
3043 }
3044
3045 for (; ix < EDGE_COUNT (bb->preds); ix++)
3046 {
3047 e = EDGE_PRED (bb, ix);
3048 if (e->src->aux && BB_VISITED_P (e->src))
3049 {
3050 bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3051 bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3052 }
3053 }
3054
3055 BB_VISITED_P (bb) = true;
3056 }
3057
3058 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3059
3060 static void
3061 tm_memopt_compute_antin (basic_block bb)
3062 {
3063 edge e;
3064 unsigned ix;
3065
3066 /* Seed with the ANTIC_OUT of any successor. */
3067 for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
3068 {
3069 e = EDGE_SUCC (bb, ix);
3070 /* Make sure we have already visited this BB, and is thus
3071 initialized. */
3072 if (BB_VISITED_P (e->dest))
3073 {
3074 bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3075 break;
3076 }
3077 }
3078
3079 for (; ix < EDGE_COUNT (bb->succs); ix++)
3080 {
3081 e = EDGE_SUCC (bb, ix);
3082 if (BB_VISITED_P (e->dest))
3083 bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3084 }
3085
3086 BB_VISITED_P (bb) = true;
3087 }
3088
3089 /* Compute the AVAIL sets for every basic block in BLOCKS.
3090
3091 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3092
3093 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3094 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3095
3096 This is basically what we do in lcm's compute_available(), but here
3097 we calculate two sets of sets (one for STOREs and one for READs),
3098 and we work on a region instead of the entire CFG.
3099
3100 REGION is the TM region.
3101 BLOCKS are the basic blocks in the region. */
3102
3103 static void
3104 tm_memopt_compute_available (struct tm_region *region,
3105 VEC (basic_block, heap) *blocks)
3106 {
3107 edge e;
3108 basic_block *worklist, *qin, *qout, *qend, bb;
3109 unsigned int qlen, i;
3110 edge_iterator ei;
3111 bool changed;
3112
3113 /* Allocate a worklist array/queue. Entries are only added to the
3114 list if they were not already on the list. So the size is
3115 bounded by the number of basic blocks in the region. */
3116 qlen = VEC_length (basic_block, blocks) - 1;
3117 qin = qout = worklist =
3118 XNEWVEC (basic_block, qlen);
3119
3120 /* Put every block in the region on the worklist. */
3121 for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3122 {
3123 /* Seed AVAIL_OUT with the LOCAL set. */
3124 bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
3125 bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
3126
3127 AVAIL_IN_WORKLIST_P (bb) = true;
3128 /* No need to insert the entry block, since it has an AVIN of
3129 null, and an AVOUT that has already been seeded in. */
3130 if (bb != region->entry_block)
3131 *qin++ = bb;
3132 }
3133
3134 /* The entry block has been initialized with the local sets. */
3135 BB_VISITED_P (region->entry_block) = true;
3136
3137 qin = worklist;
3138 qend = &worklist[qlen];
3139
3140 /* Iterate until the worklist is empty. */
3141 while (qlen)
3142 {
3143 /* Take the first entry off the worklist. */
3144 bb = *qout++;
3145 qlen--;
3146
3147 if (qout >= qend)
3148 qout = worklist;
3149
3150 /* This block can be added to the worklist again if necessary. */
3151 AVAIL_IN_WORKLIST_P (bb) = false;
3152 tm_memopt_compute_avin (bb);
3153
3154 /* Note: We do not add the LOCAL sets here because we already
3155 seeded the AVAIL_OUT sets with them. */
3156 changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
3157 changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
3158 if (changed
3159 && (region->exit_blocks == NULL
3160 || !bitmap_bit_p (region->exit_blocks, bb->index)))
3161 /* If the out state of this block changed, then we need to add
3162 its successors to the worklist if they are not already in. */
3163 FOR_EACH_EDGE (e, ei, bb->succs)
3164 if (!AVAIL_IN_WORKLIST_P (e->dest) && e->dest != EXIT_BLOCK_PTR)
3165 {
3166 *qin++ = e->dest;
3167 AVAIL_IN_WORKLIST_P (e->dest) = true;
3168 qlen++;
3169
3170 if (qin >= qend)
3171 qin = worklist;
3172 }
3173 }
3174
3175 free (worklist);
3176
3177 if (dump_file)
3178 dump_tm_memopt_sets (blocks);
3179 }
3180
3181 /* Compute ANTIC sets for every basic block in BLOCKS.
3182
3183 We compute STORE_ANTIC_OUT as follows:
3184
3185 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3186 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3187
3188 REGION is the TM region.
3189 BLOCKS are the basic blocks in the region. */
3190
3191 static void
3192 tm_memopt_compute_antic (struct tm_region *region,
3193 VEC (basic_block, heap) *blocks)
3194 {
3195 edge e;
3196 basic_block *worklist, *qin, *qout, *qend, bb;
3197 unsigned int qlen;
3198 int i;
3199 edge_iterator ei;
3200
3201 /* Allocate a worklist array/queue. Entries are only added to the
3202 list if they were not already on the list. So the size is
3203 bounded by the number of basic blocks in the region. */
3204 qin = qout = worklist =
3205 XNEWVEC (basic_block, VEC_length (basic_block, blocks));
3206
3207 for (qlen = 0, i = VEC_length (basic_block, blocks) - 1; i >= 0; --i)
3208 {
3209 bb = VEC_index (basic_block, blocks, i);
3210
3211 /* Seed ANTIC_OUT with the LOCAL set. */
3212 bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
3213
3214 /* Put every block in the region on the worklist. */
3215 AVAIL_IN_WORKLIST_P (bb) = true;
3216 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3217 and their ANTIC_OUT has already been seeded in. */
3218 if (region->exit_blocks
3219 && !bitmap_bit_p (region->exit_blocks, bb->index))
3220 {
3221 qlen++;
3222 *qin++ = bb;
3223 }
3224 }
3225
3226 /* The exit blocks have been initialized with the local sets. */
3227 if (region->exit_blocks)
3228 {
3229 unsigned int i;
3230 bitmap_iterator bi;
3231 EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
3232 BB_VISITED_P (BASIC_BLOCK (i)) = true;
3233 }
3234
3235 qin = worklist;
3236 qend = &worklist[qlen];
3237
3238 /* Iterate until the worklist is empty. */
3239 while (qlen)
3240 {
3241 /* Take the first entry off the worklist. */
3242 bb = *qout++;
3243 qlen--;
3244
3245 if (qout >= qend)
3246 qout = worklist;
3247
3248 /* This block can be added to the worklist again if necessary. */
3249 AVAIL_IN_WORKLIST_P (bb) = false;
3250 tm_memopt_compute_antin (bb);
3251
3252 /* Note: We do not add the LOCAL sets here because we already
3253 seeded the ANTIC_OUT sets with them. */
3254 if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
3255 && bb != region->entry_block)
3256 /* If the out state of this block changed, then we need to add
3257 its predecessors to the worklist if they are not already in. */
3258 FOR_EACH_EDGE (e, ei, bb->preds)
3259 if (!AVAIL_IN_WORKLIST_P (e->src))
3260 {
3261 *qin++ = e->src;
3262 AVAIL_IN_WORKLIST_P (e->src) = true;
3263 qlen++;
3264
3265 if (qin >= qend)
3266 qin = worklist;
3267 }
3268 }
3269
3270 free (worklist);
3271
3272 if (dump_file)
3273 dump_tm_memopt_sets (blocks);
3274 }
3275
3276 /* Offsets of load variants from TM_LOAD. For example,
3277 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3278 See gtm-builtins.def. */
3279 #define TRANSFORM_RAR 1
3280 #define TRANSFORM_RAW 2
3281 #define TRANSFORM_RFW 3
3282 /* Offsets of store variants from TM_STORE. */
3283 #define TRANSFORM_WAR 1
3284 #define TRANSFORM_WAW 2
3285
3286 /* Inform about a load/store optimization. */
3287
3288 static void
3289 dump_tm_memopt_transform (gimple stmt)
3290 {
3291 if (dump_file)
3292 {
3293 fprintf (dump_file, "TM memopt: transforming: ");
3294 print_gimple_stmt (dump_file, stmt, 0, 0);
3295 fprintf (dump_file, "\n");
3296 }
3297 }
3298
3299 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3300 by a builtin that is OFFSET entries down in the builtins table in
3301 gtm-builtins.def. */
3302
3303 static void
3304 tm_memopt_transform_stmt (unsigned int offset,
3305 gimple stmt,
3306 gimple_stmt_iterator *gsi)
3307 {
3308 tree fn = gimple_call_fn (stmt);
3309 gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
3310 TREE_OPERAND (fn, 0)
3311 = builtin_decl_explicit ((enum built_in_function)
3312 (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
3313 + offset));
3314 gimple_call_set_fn (stmt, fn);
3315 gsi_replace (gsi, stmt, true);
3316 dump_tm_memopt_transform (stmt);
3317 }
3318
3319 /* Perform the actual TM memory optimization transformations in the
3320 basic blocks in BLOCKS. */
3321
3322 static void
3323 tm_memopt_transform_blocks (VEC (basic_block, heap) *blocks)
3324 {
3325 size_t i;
3326 basic_block bb;
3327 gimple_stmt_iterator gsi;
3328
3329 for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3330 {
3331 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3332 {
3333 gimple stmt = gsi_stmt (gsi);
3334 bitmap read_avail = READ_AVAIL_IN (bb);
3335 bitmap store_avail = STORE_AVAIL_IN (bb);
3336 bitmap store_antic = STORE_ANTIC_OUT (bb);
3337 unsigned int loc;
3338
3339 if (is_tm_simple_load (stmt))
3340 {
3341 loc = tm_memopt_value_number (stmt, NO_INSERT);
3342 if (store_avail && bitmap_bit_p (store_avail, loc))
3343 tm_memopt_transform_stmt (TRANSFORM_RAW, stmt, &gsi);
3344 else if (store_antic && bitmap_bit_p (store_antic, loc))
3345 {
3346 tm_memopt_transform_stmt (TRANSFORM_RFW, stmt, &gsi);
3347 bitmap_set_bit (store_avail, loc);
3348 }
3349 else if (read_avail && bitmap_bit_p (read_avail, loc))
3350 tm_memopt_transform_stmt (TRANSFORM_RAR, stmt, &gsi);
3351 else
3352 bitmap_set_bit (read_avail, loc);
3353 }
3354 else if (is_tm_simple_store (stmt))
3355 {
3356 loc = tm_memopt_value_number (stmt, NO_INSERT);
3357 if (store_avail && bitmap_bit_p (store_avail, loc))
3358 tm_memopt_transform_stmt (TRANSFORM_WAW, stmt, &gsi);
3359 else
3360 {
3361 if (read_avail && bitmap_bit_p (read_avail, loc))
3362 tm_memopt_transform_stmt (TRANSFORM_WAR, stmt, &gsi);
3363 bitmap_set_bit (store_avail, loc);
3364 }
3365 }
3366 }
3367 }
3368 }
3369
3370 /* Return a new set of bitmaps for a BB. */
3371
3372 static struct tm_memopt_bitmaps *
3373 tm_memopt_init_sets (void)
3374 {
3375 struct tm_memopt_bitmaps *b
3376 = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
3377 b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3378 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3379 b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
3380 b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
3381 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3382 b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3383 b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3384 b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
3385 b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
3386 return b;
3387 }
3388
3389 /* Free sets computed for each BB. */
3390
3391 static void
3392 tm_memopt_free_sets (VEC (basic_block, heap) *blocks)
3393 {
3394 size_t i;
3395 basic_block bb;
3396
3397 for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3398 bb->aux = NULL;
3399 }
3400
3401 /* Clear the visited bit for every basic block in BLOCKS. */
3402
3403 static void
3404 tm_memopt_clear_visited (VEC (basic_block, heap) *blocks)
3405 {
3406 size_t i;
3407 basic_block bb;
3408
3409 for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3410 BB_VISITED_P (bb) = false;
3411 }
3412
3413 /* Replace TM load/stores with hints for the runtime. We handle
3414 things like read-after-write, write-after-read, read-after-read,
3415 read-for-write, etc. */
3416
3417 static unsigned int
3418 execute_tm_memopt (void)
3419 {
3420 struct tm_region *region;
3421 VEC (basic_block, heap) *bbs;
3422
3423 tm_memopt_value_id = 0;
3424 tm_memopt_value_numbers = htab_create (10, tm_memop_hash, tm_memop_eq, free);
3425
3426 for (region = all_tm_regions; region; region = region->next)
3427 {
3428 /* All the TM stores/loads in the current region. */
3429 size_t i;
3430 basic_block bb;
3431
3432 bitmap_obstack_initialize (&tm_memopt_obstack);
3433
3434 /* Save all BBs for the current region. */
3435 bbs = get_tm_region_blocks (region->entry_block,
3436 region->exit_blocks,
3437 region->irr_blocks,
3438 NULL,
3439 false);
3440
3441 /* Collect all the memory operations. */
3442 for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
3443 {
3444 bb->aux = tm_memopt_init_sets ();
3445 tm_memopt_accumulate_memops (bb);
3446 }
3447
3448 /* Solve data flow equations and transform each block accordingly. */
3449 tm_memopt_clear_visited (bbs);
3450 tm_memopt_compute_available (region, bbs);
3451 tm_memopt_clear_visited (bbs);
3452 tm_memopt_compute_antic (region, bbs);
3453 tm_memopt_transform_blocks (bbs);
3454
3455 tm_memopt_free_sets (bbs);
3456 VEC_free (basic_block, heap, bbs);
3457 bitmap_obstack_release (&tm_memopt_obstack);
3458 htab_empty (tm_memopt_value_numbers);
3459 }
3460
3461 htab_delete (tm_memopt_value_numbers);
3462 return 0;
3463 }
3464
3465 static bool
3466 gate_tm_memopt (void)
3467 {
3468 return flag_tm && optimize > 0;
3469 }
3470
3471 struct gimple_opt_pass pass_tm_memopt =
3472 {
3473 {
3474 GIMPLE_PASS,
3475 "tmmemopt", /* name */
3476 gate_tm_memopt, /* gate */
3477 execute_tm_memopt, /* execute */
3478 NULL, /* sub */
3479 NULL, /* next */
3480 0, /* static_pass_number */
3481 TV_TRANS_MEM, /* tv_id */
3482 PROP_ssa | PROP_cfg, /* properties_required */
3483 0, /* properties_provided */
3484 0, /* properties_destroyed */
3485 0, /* todo_flags_start */
3486 0, /* todo_flags_finish */
3487 }
3488 };
3489
3490 \f
3491 /* Interprocedual analysis for the creation of transactional clones.
3492 The aim of this pass is to find which functions are referenced in
3493 a non-irrevocable transaction context, and for those over which
3494 we have control (or user directive), create a version of the
3495 function which uses only the transactional interface to reference
3496 protected memories. This analysis proceeds in several steps:
3497
3498 (1) Collect the set of all possible transactional clones:
3499
3500 (a) For all local public functions marked tm_callable, push
3501 it onto the tm_callee queue.
3502
3503 (b) For all local functions, scan for calls in transaction blocks.
3504 Push the caller and callee onto the tm_caller and tm_callee
3505 queues. Count the number of callers for each callee.
3506
3507 (c) For each local function on the callee list, assume we will
3508 create a transactional clone. Push *all* calls onto the
3509 callee queues; count the number of clone callers separately
3510 to the number of original callers.
3511
3512 (2) Propagate irrevocable status up the dominator tree:
3513
3514 (a) Any external function on the callee list that is not marked
3515 tm_callable is irrevocable. Push all callers of such onto
3516 a worklist.
3517
3518 (b) For each function on the worklist, mark each block that
3519 contains an irrevocable call. Use the AND operator to
3520 propagate that mark up the dominator tree.
3521
3522 (c) If we reach the entry block for a possible transactional
3523 clone, then the transactional clone is irrevocable, and
3524 we should not create the clone after all. Push all
3525 callers onto the worklist.
3526
3527 (d) Place tm_irrevocable calls at the beginning of the relevant
3528 blocks. Special case here is the entry block for the entire
3529 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
3530 the library to begin the region in serial mode. Decrement
3531 the call count for all callees in the irrevocable region.
3532
3533 (3) Create the transactional clones:
3534
3535 Any tm_callee that still has a non-zero call count is cloned.
3536 */
3537
3538 /* This structure is stored in the AUX field of each cgraph_node. */
3539 struct tm_ipa_cg_data
3540 {
3541 /* The clone of the function that got created. */
3542 struct cgraph_node *clone;
3543
3544 /* The tm regions in the normal function. */
3545 struct tm_region *all_tm_regions;
3546
3547 /* The blocks of the normal/clone functions that contain irrevocable
3548 calls, or blocks that are post-dominated by irrevocable calls. */
3549 bitmap irrevocable_blocks_normal;
3550 bitmap irrevocable_blocks_clone;
3551
3552 /* The blocks of the normal function that are involved in transactions. */
3553 bitmap transaction_blocks_normal;
3554
3555 /* The number of callers to the transactional clone of this function
3556 from normal and transactional clones respectively. */
3557 unsigned tm_callers_normal;
3558 unsigned tm_callers_clone;
3559
3560 /* True if all calls to this function's transactional clone
3561 are irrevocable. Also automatically true if the function
3562 has no transactional clone. */
3563 bool is_irrevocable;
3564
3565 /* Flags indicating the presence of this function in various queues. */
3566 bool in_callee_queue;
3567 bool in_worklist;
3568
3569 /* Flags indicating the kind of scan desired while in the worklist. */
3570 bool want_irr_scan_normal;
3571 };
3572
3573 typedef struct cgraph_node *cgraph_node_p;
3574
3575 DEF_VEC_P (cgraph_node_p);
3576 DEF_VEC_ALLOC_P (cgraph_node_p, heap);
3577
3578 typedef VEC (cgraph_node_p, heap) *cgraph_node_queue;
3579
3580 /* Return the ipa data associated with NODE, allocating zeroed memory
3581 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
3582 and set *NODE accordingly. */
3583
3584 static struct tm_ipa_cg_data *
3585 get_cg_data (struct cgraph_node **node, bool traverse_aliases)
3586 {
3587 struct tm_ipa_cg_data *d;
3588
3589 if (traverse_aliases && (*node)->alias)
3590 *node = cgraph_get_node ((*node)->thunk.alias);
3591
3592 d = (struct tm_ipa_cg_data *) (*node)->symbol.aux;
3593
3594 if (d == NULL)
3595 {
3596 d = (struct tm_ipa_cg_data *)
3597 obstack_alloc (&tm_obstack.obstack, sizeof (*d));
3598 (*node)->symbol.aux = (void *) d;
3599 memset (d, 0, sizeof (*d));
3600 }
3601
3602 return d;
3603 }
3604
3605 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
3606 it is already present. */
3607
3608 static void
3609 maybe_push_queue (struct cgraph_node *node,
3610 cgraph_node_queue *queue_p, bool *in_queue_p)
3611 {
3612 if (!*in_queue_p)
3613 {
3614 *in_queue_p = true;
3615 VEC_safe_push (cgraph_node_p, heap, *queue_p, node);
3616 }
3617 }
3618
3619 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
3620 Queue all callees within block BB. */
3621
3622 static void
3623 ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
3624 basic_block bb, bool for_clone)
3625 {
3626 gimple_stmt_iterator gsi;
3627
3628 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3629 {
3630 gimple stmt = gsi_stmt (gsi);
3631 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
3632 {
3633 tree fndecl = gimple_call_fndecl (stmt);
3634 if (fndecl)
3635 {
3636 struct tm_ipa_cg_data *d;
3637 unsigned *pcallers;
3638 struct cgraph_node *node;
3639
3640 if (is_tm_ending_fndecl (fndecl))
3641 continue;
3642 if (find_tm_replacement_function (fndecl))
3643 continue;
3644
3645 node = cgraph_get_node (fndecl);
3646 gcc_assert (node != NULL);
3647 d = get_cg_data (&node, true);
3648
3649 pcallers = (for_clone ? &d->tm_callers_clone
3650 : &d->tm_callers_normal);
3651 *pcallers += 1;
3652
3653 maybe_push_queue (node, callees_p, &d->in_callee_queue);
3654 }
3655 }
3656 }
3657 }
3658
3659 /* Scan all calls in NODE that are within a transaction region,
3660 and push the resulting nodes into the callee queue. */
3661
3662 static void
3663 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
3664 cgraph_node_queue *callees_p)
3665 {
3666 struct tm_region *r;
3667
3668 d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
3669 d->all_tm_regions = all_tm_regions;
3670
3671 for (r = all_tm_regions; r; r = r->next)
3672 {
3673 VEC (basic_block, heap) *bbs;
3674 basic_block bb;
3675 unsigned i;
3676
3677 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
3678 d->transaction_blocks_normal, false);
3679
3680 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
3681 ipa_tm_scan_calls_block (callees_p, bb, false);
3682
3683 VEC_free (basic_block, heap, bbs);
3684 }
3685 }
3686
3687 /* Scan all calls in NODE as if this is the transactional clone,
3688 and push the destinations into the callee queue. */
3689
3690 static void
3691 ipa_tm_scan_calls_clone (struct cgraph_node *node,
3692 cgraph_node_queue *callees_p)
3693 {
3694 struct function *fn = DECL_STRUCT_FUNCTION (node->symbol.decl);
3695 basic_block bb;
3696
3697 FOR_EACH_BB_FN (bb, fn)
3698 ipa_tm_scan_calls_block (callees_p, bb, true);
3699 }
3700
3701 /* The function NODE has been detected to be irrevocable. Push all
3702 of its callers onto WORKLIST for the purpose of re-scanning them. */
3703
3704 static void
3705 ipa_tm_note_irrevocable (struct cgraph_node *node,
3706 cgraph_node_queue *worklist_p)
3707 {
3708 struct tm_ipa_cg_data *d = get_cg_data (&node, true);
3709 struct cgraph_edge *e;
3710
3711 d->is_irrevocable = true;
3712
3713 for (e = node->callers; e ; e = e->next_caller)
3714 {
3715 basic_block bb;
3716 struct cgraph_node *caller;
3717
3718 /* Don't examine recursive calls. */
3719 if (e->caller == node)
3720 continue;
3721 /* Even if we think we can go irrevocable, believe the user
3722 above all. */
3723 if (is_tm_safe_or_pure (e->caller->symbol.decl))
3724 continue;
3725
3726 caller = e->caller;
3727 d = get_cg_data (&caller, true);
3728
3729 /* Check if the callee is in a transactional region. If so,
3730 schedule the function for normal re-scan as well. */
3731 bb = gimple_bb (e->call_stmt);
3732 gcc_assert (bb != NULL);
3733 if (d->transaction_blocks_normal
3734 && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
3735 d->want_irr_scan_normal = true;
3736
3737 maybe_push_queue (caller, worklist_p, &d->in_worklist);
3738 }
3739 }
3740
3741 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
3742 within the block is irrevocable. */
3743
3744 static bool
3745 ipa_tm_scan_irr_block (basic_block bb)
3746 {
3747 gimple_stmt_iterator gsi;
3748 tree fn;
3749
3750 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3751 {
3752 gimple stmt = gsi_stmt (gsi);
3753 switch (gimple_code (stmt))
3754 {
3755 case GIMPLE_CALL:
3756 if (is_tm_pure_call (stmt))
3757 break;
3758
3759 fn = gimple_call_fn (stmt);
3760
3761 /* Functions with the attribute are by definition irrevocable. */
3762 if (is_tm_irrevocable (fn))
3763 return true;
3764
3765 /* For direct function calls, go ahead and check for replacement
3766 functions, or transitive irrevocable functions. For indirect
3767 functions, we'll ask the runtime. */
3768 if (TREE_CODE (fn) == ADDR_EXPR)
3769 {
3770 struct tm_ipa_cg_data *d;
3771 struct cgraph_node *node;
3772
3773 fn = TREE_OPERAND (fn, 0);
3774 if (is_tm_ending_fndecl (fn))
3775 break;
3776 if (find_tm_replacement_function (fn))
3777 break;
3778
3779 node = cgraph_get_node(fn);
3780 d = get_cg_data (&node, true);
3781
3782 /* Return true if irrevocable, but above all, believe
3783 the user. */
3784 if (d->is_irrevocable
3785 && !is_tm_safe_or_pure (fn))
3786 return true;
3787 }
3788 break;
3789
3790 case GIMPLE_ASM:
3791 /* ??? The Approved Method of indicating that an inline
3792 assembly statement is not relevant to the transaction
3793 is to wrap it in a __tm_waiver block. This is not
3794 yet implemented, so we can't check for it. */
3795 if (is_tm_safe (current_function_decl))
3796 {
3797 tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
3798 SET_EXPR_LOCATION (t, gimple_location (stmt));
3799 TREE_BLOCK (t) = gimple_block (stmt);
3800 error ("%Kasm not allowed in %<transaction_safe%> function", t);
3801 }
3802 return true;
3803
3804 default:
3805 break;
3806 }
3807 }
3808
3809 return false;
3810 }
3811
3812 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
3813 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
3814 scanning past OLD_IRR or EXIT_BLOCKS. */
3815
3816 static bool
3817 ipa_tm_scan_irr_blocks (VEC (basic_block, heap) **pqueue, bitmap new_irr,
3818 bitmap old_irr, bitmap exit_blocks)
3819 {
3820 bool any_new_irr = false;
3821 edge e;
3822 edge_iterator ei;
3823 bitmap visited_blocks = BITMAP_ALLOC (NULL);
3824
3825 do
3826 {
3827 basic_block bb = VEC_pop (basic_block, *pqueue);
3828
3829 /* Don't re-scan blocks we know already are irrevocable. */
3830 if (old_irr && bitmap_bit_p (old_irr, bb->index))
3831 continue;
3832
3833 if (ipa_tm_scan_irr_block (bb))
3834 {
3835 bitmap_set_bit (new_irr, bb->index);
3836 any_new_irr = true;
3837 }
3838 else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
3839 {
3840 FOR_EACH_EDGE (e, ei, bb->succs)
3841 if (!bitmap_bit_p (visited_blocks, e->dest->index))
3842 {
3843 bitmap_set_bit (visited_blocks, e->dest->index);
3844 VEC_safe_push (basic_block, heap, *pqueue, e->dest);
3845 }
3846 }
3847 }
3848 while (!VEC_empty (basic_block, *pqueue));
3849
3850 BITMAP_FREE (visited_blocks);
3851
3852 return any_new_irr;
3853 }
3854
3855 /* Propagate the irrevocable property both up and down the dominator tree.
3856 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
3857 TM regions; OLD_IRR are the results of a previous scan of the dominator
3858 tree which has been fully propagated; NEW_IRR is the set of new blocks
3859 which are gaining the irrevocable property during the current scan. */
3860
3861 static void
3862 ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
3863 bitmap old_irr, bitmap exit_blocks)
3864 {
3865 VEC (basic_block, heap) *bbs;
3866 bitmap all_region_blocks;
3867
3868 /* If this block is in the old set, no need to rescan. */
3869 if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
3870 return;
3871
3872 all_region_blocks = BITMAP_ALLOC (&tm_obstack);
3873 bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
3874 all_region_blocks, false);
3875 do
3876 {
3877 basic_block bb = VEC_pop (basic_block, bbs);
3878 bool this_irr = bitmap_bit_p (new_irr, bb->index);
3879 bool all_son_irr = false;
3880 edge_iterator ei;
3881 edge e;
3882
3883 /* Propagate up. If my children are, I am too, but we must have
3884 at least one child that is. */
3885 if (!this_irr)
3886 {
3887 FOR_EACH_EDGE (e, ei, bb->succs)
3888 {
3889 if (!bitmap_bit_p (new_irr, e->dest->index))
3890 {
3891 all_son_irr = false;
3892 break;
3893 }
3894 else
3895 all_son_irr = true;
3896 }
3897 if (all_son_irr)
3898 {
3899 /* Add block to new_irr if it hasn't already been processed. */
3900 if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
3901 {
3902 bitmap_set_bit (new_irr, bb->index);
3903 this_irr = true;
3904 }
3905 }
3906 }
3907
3908 /* Propagate down to everyone we immediately dominate. */
3909 if (this_irr)
3910 {
3911 basic_block son;
3912 for (son = first_dom_son (CDI_DOMINATORS, bb);
3913 son;
3914 son = next_dom_son (CDI_DOMINATORS, son))
3915 {
3916 /* Make sure block is actually in a TM region, and it
3917 isn't already in old_irr. */
3918 if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
3919 && bitmap_bit_p (all_region_blocks, son->index))
3920 bitmap_set_bit (new_irr, son->index);
3921 }
3922 }
3923 }
3924 while (!VEC_empty (basic_block, bbs));
3925
3926 BITMAP_FREE (all_region_blocks);
3927 VEC_free (basic_block, heap, bbs);
3928 }
3929
3930 static void
3931 ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
3932 {
3933 gimple_stmt_iterator gsi;
3934
3935 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3936 {
3937 gimple stmt = gsi_stmt (gsi);
3938 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
3939 {
3940 tree fndecl = gimple_call_fndecl (stmt);
3941 if (fndecl)
3942 {
3943 struct tm_ipa_cg_data *d;
3944 unsigned *pcallers;
3945 struct cgraph_node *tnode;
3946
3947 if (is_tm_ending_fndecl (fndecl))
3948 continue;
3949 if (find_tm_replacement_function (fndecl))
3950 continue;
3951
3952 tnode = cgraph_get_node (fndecl);
3953 d = get_cg_data (&tnode, true);
3954
3955 pcallers = (for_clone ? &d->tm_callers_clone
3956 : &d->tm_callers_normal);
3957
3958 gcc_assert (*pcallers > 0);
3959 *pcallers -= 1;
3960 }
3961 }
3962 }
3963 }
3964
3965 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
3966 as well as other irrevocable actions such as inline assembly. Mark all
3967 such blocks as irrevocable and decrement the number of calls to
3968 transactional clones. Return true if, for the transactional clone, the
3969 entire function is irrevocable. */
3970
3971 static bool
3972 ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
3973 {
3974 struct tm_ipa_cg_data *d;
3975 bitmap new_irr, old_irr;
3976 VEC (basic_block, heap) *queue;
3977 bool ret = false;
3978
3979 /* Builtin operators (operator new, and such). */
3980 if (DECL_STRUCT_FUNCTION (node->symbol.decl) == NULL
3981 || DECL_STRUCT_FUNCTION (node->symbol.decl)->cfg == NULL)
3982 return false;
3983
3984 current_function_decl = node->symbol.decl;
3985 push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));
3986 calculate_dominance_info (CDI_DOMINATORS);
3987
3988 d = get_cg_data (&node, true);
3989 queue = VEC_alloc (basic_block, heap, 10);
3990 new_irr = BITMAP_ALLOC (&tm_obstack);
3991
3992 /* Scan each tm region, propagating irrevocable status through the tree. */
3993 if (for_clone)
3994 {
3995 old_irr = d->irrevocable_blocks_clone;
3996 VEC_quick_push (basic_block, queue, single_succ (ENTRY_BLOCK_PTR));
3997 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
3998 {
3999 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR), new_irr,
4000 old_irr, NULL);
4001 ret = bitmap_bit_p (new_irr, single_succ (ENTRY_BLOCK_PTR)->index);
4002 }
4003 }
4004 else
4005 {
4006 struct tm_region *region;
4007
4008 old_irr = d->irrevocable_blocks_normal;
4009 for (region = d->all_tm_regions; region; region = region->next)
4010 {
4011 VEC_quick_push (basic_block, queue, region->entry_block);
4012 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
4013 region->exit_blocks))
4014 ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
4015 region->exit_blocks);
4016 }
4017 }
4018
4019 /* If we found any new irrevocable blocks, reduce the call count for
4020 transactional clones within the irrevocable blocks. Save the new
4021 set of irrevocable blocks for next time. */
4022 if (!bitmap_empty_p (new_irr))
4023 {
4024 bitmap_iterator bmi;
4025 unsigned i;
4026
4027 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4028 ipa_tm_decrement_clone_counts (BASIC_BLOCK (i), for_clone);
4029
4030 if (old_irr)
4031 {
4032 bitmap_ior_into (old_irr, new_irr);
4033 BITMAP_FREE (new_irr);
4034 }
4035 else if (for_clone)
4036 d->irrevocable_blocks_clone = new_irr;
4037 else
4038 d->irrevocable_blocks_normal = new_irr;
4039
4040 if (dump_file && new_irr)
4041 {
4042 const char *dname;
4043 bitmap_iterator bmi;
4044 unsigned i;
4045
4046 dname = lang_hooks.decl_printable_name (current_function_decl, 2);
4047 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4048 fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
4049 }
4050 }
4051 else
4052 BITMAP_FREE (new_irr);
4053
4054 VEC_free (basic_block, heap, queue);
4055 pop_cfun ();
4056 current_function_decl = NULL;
4057
4058 return ret;
4059 }
4060
4061 /* Return true if, for the transactional clone of NODE, any call
4062 may enter irrevocable mode. */
4063
4064 static bool
4065 ipa_tm_mayenterirr_function (struct cgraph_node *node)
4066 {
4067 struct tm_ipa_cg_data *d;
4068 tree decl;
4069 unsigned flags;
4070
4071 d = get_cg_data (&node, true);
4072 decl = node->symbol.decl;
4073 flags = flags_from_decl_or_type (decl);
4074
4075 /* Handle some TM builtins. Ordinarily these aren't actually generated
4076 at this point, but handling these functions when written in by the
4077 user makes it easier to build unit tests. */
4078 if (flags & ECF_TM_BUILTIN)
4079 return false;
4080
4081 /* Filter out all functions that are marked. */
4082 if (flags & ECF_TM_PURE)
4083 return false;
4084 if (is_tm_safe (decl))
4085 return false;
4086 if (is_tm_irrevocable (decl))
4087 return true;
4088 if (is_tm_callable (decl))
4089 return true;
4090 if (find_tm_replacement_function (decl))
4091 return true;
4092
4093 /* If we aren't seeing the final version of the function we don't
4094 know what it will contain at runtime. */
4095 if (cgraph_function_body_availability (node) < AVAIL_AVAILABLE)
4096 return true;
4097
4098 /* If the function must go irrevocable, then of course true. */
4099 if (d->is_irrevocable)
4100 return true;
4101
4102 /* If there are any blocks marked irrevocable, then the function
4103 as a whole may enter irrevocable. */
4104 if (d->irrevocable_blocks_clone)
4105 return true;
4106
4107 /* We may have previously marked this function as tm_may_enter_irr;
4108 see pass_diagnose_tm_blocks. */
4109 if (node->local.tm_may_enter_irr)
4110 return true;
4111
4112 /* Recurse on the main body for aliases. In general, this will
4113 result in one of the bits above being set so that we will not
4114 have to recurse next time. */
4115 if (node->alias)
4116 return ipa_tm_mayenterirr_function (cgraph_get_node (node->thunk.alias));
4117
4118 /* What remains is unmarked local functions without items that force
4119 the function to go irrevocable. */
4120 return false;
4121 }
4122
4123 /* Diagnose calls from transaction_safe functions to unmarked
4124 functions that are determined to not be safe. */
4125
4126 static void
4127 ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
4128 {
4129 struct cgraph_edge *e;
4130
4131 for (e = node->callees; e ; e = e->next_callee)
4132 if (!is_tm_callable (e->callee->symbol.decl)
4133 && e->callee->local.tm_may_enter_irr)
4134 error_at (gimple_location (e->call_stmt),
4135 "unsafe function call %qD within "
4136 "%<transaction_safe%> function", e->callee->symbol.decl);
4137 }
4138
4139 /* Diagnose call from atomic transactions to unmarked functions
4140 that are determined to not be safe. */
4141
4142 static void
4143 ipa_tm_diagnose_transaction (struct cgraph_node *node,
4144 struct tm_region *all_tm_regions)
4145 {
4146 struct tm_region *r;
4147
4148 for (r = all_tm_regions; r ; r = r->next)
4149 if (gimple_transaction_subcode (r->transaction_stmt) & GTMA_IS_RELAXED)
4150 {
4151 /* Atomic transactions can be nested inside relaxed. */
4152 if (r->inner)
4153 ipa_tm_diagnose_transaction (node, r->inner);
4154 }
4155 else
4156 {
4157 VEC (basic_block, heap) *bbs;
4158 gimple_stmt_iterator gsi;
4159 basic_block bb;
4160 size_t i;
4161
4162 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
4163 r->irr_blocks, NULL, false);
4164
4165 for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
4166 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4167 {
4168 gimple stmt = gsi_stmt (gsi);
4169 tree fndecl;
4170
4171 if (gimple_code (stmt) == GIMPLE_ASM)
4172 {
4173 error_at (gimple_location (stmt),
4174 "asm not allowed in atomic transaction");
4175 continue;
4176 }
4177
4178 if (!is_gimple_call (stmt))
4179 continue;
4180 fndecl = gimple_call_fndecl (stmt);
4181
4182 /* Indirect function calls have been diagnosed already. */
4183 if (!fndecl)
4184 continue;
4185
4186 /* Stop at the end of the transaction. */
4187 if (is_tm_ending_fndecl (fndecl))
4188 {
4189 if (bitmap_bit_p (r->exit_blocks, bb->index))
4190 break;
4191 continue;
4192 }
4193
4194 /* Marked functions have been diagnosed already. */
4195 if (is_tm_pure_call (stmt))
4196 continue;
4197 if (is_tm_callable (fndecl))
4198 continue;
4199
4200 if (cgraph_local_info (fndecl)->tm_may_enter_irr)
4201 error_at (gimple_location (stmt),
4202 "unsafe function call %qD within "
4203 "atomic transaction", fndecl);
4204 }
4205
4206 VEC_free (basic_block, heap, bbs);
4207 }
4208 }
4209
4210 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4211 OLD_DECL. The returned value is a freshly malloced pointer that
4212 should be freed by the caller. */
4213
4214 static tree
4215 tm_mangle (tree old_asm_id)
4216 {
4217 const char *old_asm_name;
4218 char *tm_name;
4219 void *alloc = NULL;
4220 struct demangle_component *dc;
4221 tree new_asm_id;
4222
4223 /* Determine if the symbol is already a valid C++ mangled name. Do this
4224 even for C, which might be interfacing with C++ code via appropriately
4225 ugly identifiers. */
4226 /* ??? We could probably do just as well checking for "_Z" and be done. */
4227 old_asm_name = IDENTIFIER_POINTER (old_asm_id);
4228 dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
4229
4230 if (dc == NULL)
4231 {
4232 char length[8];
4233
4234 do_unencoded:
4235 sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
4236 tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
4237 }
4238 else
4239 {
4240 old_asm_name += 2; /* Skip _Z */
4241
4242 switch (dc->type)
4243 {
4244 case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
4245 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
4246 /* Don't play silly games, you! */
4247 goto do_unencoded;
4248
4249 case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
4250 /* I'd really like to know if we can ever be passed one of
4251 these from the C++ front end. The Logical Thing would
4252 seem that hidden-alias should be outer-most, so that we
4253 get hidden-alias of a transaction-clone and not vice-versa. */
4254 old_asm_name += 2;
4255 break;
4256
4257 default:
4258 break;
4259 }
4260
4261 tm_name = concat ("_ZGTt", old_asm_name, NULL);
4262 }
4263 free (alloc);
4264
4265 new_asm_id = get_identifier (tm_name);
4266 free (tm_name);
4267
4268 return new_asm_id;
4269 }
4270
4271 static inline void
4272 ipa_tm_mark_force_output_node (struct cgraph_node *node)
4273 {
4274 cgraph_mark_force_output_node (node);
4275 /* ??? function_and_variable_visibility will reset
4276 the needed bit, without actually checking. */
4277 node->analyzed = 1;
4278 }
4279
4280 /* Callback data for ipa_tm_create_version_alias. */
4281 struct create_version_alias_info
4282 {
4283 struct cgraph_node *old_node;
4284 tree new_decl;
4285 };
4286
4287 /* A subroutine of ipa_tm_create_version, called via
4288 cgraph_for_node_and_aliases. Create new tm clones for each of
4289 the existing aliases. */
4290 static bool
4291 ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
4292 {
4293 struct create_version_alias_info *info
4294 = (struct create_version_alias_info *)data;
4295 tree old_decl, new_decl, tm_name;
4296 struct cgraph_node *new_node;
4297
4298 if (!node->same_body_alias)
4299 return false;
4300
4301 old_decl = node->symbol.decl;
4302 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4303 new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
4304 TREE_CODE (old_decl), tm_name,
4305 TREE_TYPE (old_decl));
4306
4307 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4308 SET_DECL_RTL (new_decl, NULL);
4309
4310 /* Based loosely on C++'s make_alias_for(). */
4311 TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
4312 DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
4313 DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
4314 TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
4315 DECL_EXTERNAL (new_decl) = 0;
4316 DECL_ARTIFICIAL (new_decl) = 1;
4317 TREE_ADDRESSABLE (new_decl) = 1;
4318 TREE_USED (new_decl) = 1;
4319 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4320
4321 /* Perform the same remapping to the comdat group. */
4322 if (DECL_ONE_ONLY (new_decl))
4323 DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
4324
4325 new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl);
4326 new_node->tm_clone = true;
4327 new_node->symbol.externally_visible = info->old_node->symbol.externally_visible;
4328 /* ?? Do not traverse aliases here. */
4329 get_cg_data (&node, false)->clone = new_node;
4330
4331 record_tm_clone_pair (old_decl, new_decl);
4332
4333 if (info->old_node->symbol.force_output
4334 || ipa_ref_list_first_referring (&info->old_node->symbol.ref_list))
4335 ipa_tm_mark_force_output_node (new_node);
4336 return false;
4337 }
4338
4339 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4340 appropriate for the transactional clone. */
4341
4342 static void
4343 ipa_tm_create_version (struct cgraph_node *old_node)
4344 {
4345 tree new_decl, old_decl, tm_name;
4346 struct cgraph_node *new_node;
4347
4348 old_decl = old_node->symbol.decl;
4349 new_decl = copy_node (old_decl);
4350
4351 /* DECL_ASSEMBLER_NAME needs to be set before we call
4352 cgraph_copy_node_for_versioning below, because cgraph_node will
4353 fill the assembler_name_hash. */
4354 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4355 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4356 SET_DECL_RTL (new_decl, NULL);
4357 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4358
4359 /* Perform the same remapping to the comdat group. */
4360 if (DECL_ONE_ONLY (new_decl))
4361 DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
4362
4363 new_node = cgraph_copy_node_for_versioning (old_node, new_decl, NULL, NULL);
4364 new_node->symbol.externally_visible = old_node->symbol.externally_visible;
4365 new_node->lowered = true;
4366 new_node->tm_clone = 1;
4367 get_cg_data (&old_node, true)->clone = new_node;
4368
4369 if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE)
4370 {
4371 /* Remap extern inline to static inline. */
4372 /* ??? Is it worth trying to use make_decl_one_only? */
4373 if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
4374 {
4375 DECL_EXTERNAL (new_decl) = 0;
4376 TREE_PUBLIC (new_decl) = 0;
4377 DECL_WEAK (new_decl) = 0;
4378 }
4379
4380 tree_function_versioning (old_decl, new_decl, NULL, false, NULL, false,
4381 NULL, NULL);
4382 }
4383
4384 record_tm_clone_pair (old_decl, new_decl);
4385
4386 cgraph_call_function_insertion_hooks (new_node);
4387 if (old_node->symbol.force_output
4388 || ipa_ref_list_first_referring (&old_node->symbol.ref_list))
4389 ipa_tm_mark_force_output_node (new_node);
4390
4391 /* Do the same thing, but for any aliases of the original node. */
4392 {
4393 struct create_version_alias_info data;
4394 data.old_node = old_node;
4395 data.new_decl = new_decl;
4396 cgraph_for_node_and_aliases (old_node, ipa_tm_create_version_alias,
4397 &data, true);
4398 }
4399 }
4400
4401 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
4402
4403 static void
4404 ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
4405 basic_block bb)
4406 {
4407 gimple_stmt_iterator gsi;
4408 gimple g;
4409
4410 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4411
4412 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
4413 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
4414
4415 split_block_after_labels (bb);
4416 gsi = gsi_after_labels (bb);
4417 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
4418
4419 cgraph_create_edge (node,
4420 cgraph_get_create_node
4421 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
4422 g, 0,
4423 compute_call_stmt_bb_frequency (node->symbol.decl,
4424 gimple_bb (g)));
4425 }
4426
4427 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
4428
4429 static bool
4430 ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
4431 struct tm_region *region,
4432 gimple_stmt_iterator *gsi, gimple stmt)
4433 {
4434 tree gettm_fn, ret, old_fn, callfn;
4435 gimple g, g2;
4436 bool safe;
4437
4438 old_fn = gimple_call_fn (stmt);
4439
4440 if (TREE_CODE (old_fn) == ADDR_EXPR)
4441 {
4442 tree fndecl = TREE_OPERAND (old_fn, 0);
4443 tree clone = get_tm_clone_pair (fndecl);
4444
4445 /* By transforming the call into a TM_GETTMCLONE, we are
4446 technically taking the address of the original function and
4447 its clone. Explain this so inlining will know this function
4448 is needed. */
4449 cgraph_mark_address_taken_node (cgraph_get_node (fndecl));
4450 if (clone)
4451 cgraph_mark_address_taken_node (cgraph_get_node (clone));
4452 }
4453
4454 safe = is_tm_safe (TREE_TYPE (old_fn));
4455 gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
4456 : BUILT_IN_TM_GETTMCLONE_IRR);
4457 ret = create_tmp_var (ptr_type_node, NULL);
4458 add_referenced_var (ret);
4459
4460 if (!safe)
4461 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4462
4463 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
4464 if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
4465 old_fn = OBJ_TYPE_REF_EXPR (old_fn);
4466
4467 g = gimple_build_call (gettm_fn, 1, old_fn);
4468 ret = make_ssa_name (ret, g);
4469 gimple_call_set_lhs (g, ret);
4470
4471 gsi_insert_before (gsi, g, GSI_SAME_STMT);
4472
4473 cgraph_create_edge (node, cgraph_get_create_node (gettm_fn), g, 0,
4474 compute_call_stmt_bb_frequency (node->symbol.decl,
4475 gimple_bb(g)));
4476
4477 /* Cast return value from tm_gettmclone* into appropriate function
4478 pointer. */
4479 callfn = create_tmp_var (TREE_TYPE (old_fn), NULL);
4480 add_referenced_var (callfn);
4481 g2 = gimple_build_assign (callfn,
4482 fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
4483 callfn = make_ssa_name (callfn, g2);
4484 gimple_assign_set_lhs (g2, callfn);
4485 gsi_insert_before (gsi, g2, GSI_SAME_STMT);
4486
4487 /* ??? This is a hack to preserve the NOTHROW bit on the call,
4488 which we would have derived from the decl. Failure to save
4489 this bit means we might have to split the basic block. */
4490 if (gimple_call_nothrow_p (stmt))
4491 gimple_call_set_nothrow (stmt, true);
4492
4493 gimple_call_set_fn (stmt, callfn);
4494
4495 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
4496 for a call statement. Fix it. */
4497 {
4498 tree lhs = gimple_call_lhs (stmt);
4499 tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
4500 if (lhs
4501 && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
4502 {
4503 tree temp;
4504
4505 temp = make_rename_temp (rettype, 0);
4506 gimple_call_set_lhs (stmt, temp);
4507
4508 g2 = gimple_build_assign (lhs,
4509 fold_build1 (VIEW_CONVERT_EXPR,
4510 TREE_TYPE (lhs), temp));
4511 gsi_insert_after (gsi, g2, GSI_SAME_STMT);
4512 }
4513 }
4514
4515 update_stmt (stmt);
4516
4517 return true;
4518 }
4519
4520 /* Helper function for ipa_tm_transform_calls*. Given a call
4521 statement in GSI which resides inside transaction REGION, redirect
4522 the call to either its wrapper function, or its clone. */
4523
4524 static void
4525 ipa_tm_transform_calls_redirect (struct cgraph_node *node,
4526 struct tm_region *region,
4527 gimple_stmt_iterator *gsi,
4528 bool *need_ssa_rename_p)
4529 {
4530 gimple stmt = gsi_stmt (*gsi);
4531 struct cgraph_node *new_node;
4532 struct cgraph_edge *e = cgraph_edge (node, stmt);
4533 tree fndecl = gimple_call_fndecl (stmt);
4534
4535 /* For indirect calls, pass the address through the runtime. */
4536 if (fndecl == NULL)
4537 {
4538 *need_ssa_rename_p |=
4539 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
4540 return;
4541 }
4542
4543 /* Handle some TM builtins. Ordinarily these aren't actually generated
4544 at this point, but handling these functions when written in by the
4545 user makes it easier to build unit tests. */
4546 if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
4547 return;
4548
4549 /* Fixup recursive calls inside clones. */
4550 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
4551 for recursion but not update the call statements themselves? */
4552 if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
4553 {
4554 gimple_call_set_fndecl (stmt, current_function_decl);
4555 return;
4556 }
4557
4558 /* If there is a replacement, use it. */
4559 fndecl = find_tm_replacement_function (fndecl);
4560 if (fndecl)
4561 {
4562 new_node = cgraph_get_create_node (fndecl);
4563
4564 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
4565
4566 We can't do this earlier in record_tm_replacement because
4567 cgraph_remove_unreachable_nodes is called before we inject
4568 references to the node. Further, we can't do this in some
4569 nice central place in ipa_tm_execute because we don't have
4570 the exact list of wrapper functions that would be used.
4571 Marking more wrappers than necessary results in the creation
4572 of unnecessary cgraph_nodes, which can cause some of the
4573 other IPA passes to crash.
4574
4575 We do need to mark these nodes so that we get the proper
4576 result in expand_call_tm. */
4577 /* ??? This seems broken. How is it that we're marking the
4578 CALLEE as may_enter_irr? Surely we should be marking the
4579 CALLER. Also note that find_tm_replacement_function also
4580 contains mappings into the TM runtime, e.g. memcpy. These
4581 we know won't go irrevocable. */
4582 new_node->local.tm_may_enter_irr = 1;
4583 }
4584 else
4585 {
4586 struct tm_ipa_cg_data *d;
4587 struct cgraph_node *tnode = e->callee;
4588
4589 d = get_cg_data (&tnode, true);
4590 new_node = d->clone;
4591
4592 /* As we've already skipped pure calls and appropriate builtins,
4593 and we've already marked irrevocable blocks, if we can't come
4594 up with a static replacement, then ask the runtime. */
4595 if (new_node == NULL)
4596 {
4597 *need_ssa_rename_p |=
4598 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
4599 return;
4600 }
4601
4602 fndecl = new_node->symbol.decl;
4603 }
4604
4605 cgraph_redirect_edge_callee (e, new_node);
4606 gimple_call_set_fndecl (stmt, fndecl);
4607 }
4608
4609 /* Helper function for ipa_tm_transform_calls. For a given BB,
4610 install calls to tm_irrevocable when IRR_BLOCKS are reached,
4611 redirect other calls to the generated transactional clone. */
4612
4613 static bool
4614 ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
4615 basic_block bb, bitmap irr_blocks)
4616 {
4617 gimple_stmt_iterator gsi;
4618 bool need_ssa_rename = false;
4619
4620 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
4621 {
4622 ipa_tm_insert_irr_call (node, region, bb);
4623 return true;
4624 }
4625
4626 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4627 {
4628 gimple stmt = gsi_stmt (gsi);
4629
4630 if (!is_gimple_call (stmt))
4631 continue;
4632 if (is_tm_pure_call (stmt))
4633 continue;
4634
4635 /* Redirect edges to the appropriate replacement or clone. */
4636 ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
4637 }
4638
4639 return need_ssa_rename;
4640 }
4641
4642 /* Walk the CFG for REGION, beginning at BB. Install calls to
4643 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
4644 the generated transactional clone. */
4645
4646 static bool
4647 ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
4648 basic_block bb, bitmap irr_blocks)
4649 {
4650 bool need_ssa_rename = false;
4651 edge e;
4652 edge_iterator ei;
4653 VEC(basic_block, heap) *queue = NULL;
4654 bitmap visited_blocks = BITMAP_ALLOC (NULL);
4655
4656 VEC_safe_push (basic_block, heap, queue, bb);
4657 do
4658 {
4659 bb = VEC_pop (basic_block, queue);
4660
4661 need_ssa_rename |=
4662 ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
4663
4664 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
4665 continue;
4666
4667 if (region && bitmap_bit_p (region->exit_blocks, bb->index))
4668 continue;
4669
4670 FOR_EACH_EDGE (e, ei, bb->succs)
4671 if (!bitmap_bit_p (visited_blocks, e->dest->index))
4672 {
4673 bitmap_set_bit (visited_blocks, e->dest->index);
4674 VEC_safe_push (basic_block, heap, queue, e->dest);
4675 }
4676 }
4677 while (!VEC_empty (basic_block, queue));
4678
4679 VEC_free (basic_block, heap, queue);
4680 BITMAP_FREE (visited_blocks);
4681
4682 return need_ssa_rename;
4683 }
4684
4685 /* Transform the calls within the TM regions within NODE. */
4686
4687 static void
4688 ipa_tm_transform_transaction (struct cgraph_node *node)
4689 {
4690 struct tm_ipa_cg_data *d;
4691 struct tm_region *region;
4692 bool need_ssa_rename = false;
4693
4694 d = get_cg_data (&node, true);
4695
4696 current_function_decl = node->symbol.decl;
4697 push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));
4698 calculate_dominance_info (CDI_DOMINATORS);
4699
4700 for (region = d->all_tm_regions; region; region = region->next)
4701 {
4702 /* If we're sure to go irrevocable, don't transform anything. */
4703 if (d->irrevocable_blocks_normal
4704 && bitmap_bit_p (d->irrevocable_blocks_normal,
4705 region->entry_block->index))
4706 {
4707 transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE);
4708 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4709 continue;
4710 }
4711
4712 need_ssa_rename |=
4713 ipa_tm_transform_calls (node, region, region->entry_block,
4714 d->irrevocable_blocks_normal);
4715 }
4716
4717 if (need_ssa_rename)
4718 update_ssa (TODO_update_ssa_only_virtuals);
4719
4720 pop_cfun ();
4721 current_function_decl = NULL;
4722 }
4723
4724 /* Transform the calls within the transactional clone of NODE. */
4725
4726 static void
4727 ipa_tm_transform_clone (struct cgraph_node *node)
4728 {
4729 struct tm_ipa_cg_data *d;
4730 bool need_ssa_rename;
4731
4732 d = get_cg_data (&node, true);
4733
4734 /* If this function makes no calls and has no irrevocable blocks,
4735 then there's nothing to do. */
4736 /* ??? Remove non-aborting top-level transactions. */
4737 if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone)
4738 return;
4739
4740 current_function_decl = d->clone->symbol.decl;
4741 push_cfun (DECL_STRUCT_FUNCTION (current_function_decl));
4742 calculate_dominance_info (CDI_DOMINATORS);
4743
4744 need_ssa_rename =
4745 ipa_tm_transform_calls (d->clone, NULL, single_succ (ENTRY_BLOCK_PTR),
4746 d->irrevocable_blocks_clone);
4747
4748 if (need_ssa_rename)
4749 update_ssa (TODO_update_ssa_only_virtuals);
4750
4751 pop_cfun ();
4752 current_function_decl = NULL;
4753 }
4754
4755 /* Main entry point for the transactional memory IPA pass. */
4756
4757 static unsigned int
4758 ipa_tm_execute (void)
4759 {
4760 cgraph_node_queue tm_callees = NULL;
4761 /* List of functions that will go irrevocable. */
4762 cgraph_node_queue irr_worklist = NULL;
4763
4764 struct cgraph_node *node;
4765 struct tm_ipa_cg_data *d;
4766 enum availability a;
4767 unsigned int i;
4768
4769 #ifdef ENABLE_CHECKING
4770 verify_cgraph ();
4771 #endif
4772
4773 bitmap_obstack_initialize (&tm_obstack);
4774
4775 /* For all local functions marked tm_callable, queue them. */
4776 FOR_EACH_DEFINED_FUNCTION (node)
4777 if (is_tm_callable (node->symbol.decl)
4778 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
4779 {
4780 d = get_cg_data (&node, true);
4781 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
4782 }
4783
4784 /* For all local reachable functions... */
4785 FOR_EACH_DEFINED_FUNCTION (node)
4786 if (node->lowered
4787 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
4788 {
4789 /* ... marked tm_pure, record that fact for the runtime by
4790 indicating that the pure function is its own tm_callable.
4791 No need to do this if the function's address can't be taken. */
4792 if (is_tm_pure (node->symbol.decl))
4793 {
4794 if (!node->local.local)
4795 record_tm_clone_pair (node->symbol.decl, node->symbol.decl);
4796 continue;
4797 }
4798
4799 current_function_decl = node->symbol.decl;
4800 push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl));
4801 calculate_dominance_info (CDI_DOMINATORS);
4802
4803 tm_region_init (NULL);
4804 if (all_tm_regions)
4805 {
4806 d = get_cg_data (&node, true);
4807
4808 /* Scan for calls that are in each transaction. */
4809 ipa_tm_scan_calls_transaction (d, &tm_callees);
4810
4811 /* Put it in the worklist so we can scan the function
4812 later (ipa_tm_scan_irr_function) and mark the
4813 irrevocable blocks. */
4814 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
4815 d->want_irr_scan_normal = true;
4816 }
4817
4818 pop_cfun ();
4819 current_function_decl = NULL;
4820 }
4821
4822 /* For every local function on the callee list, scan as if we will be
4823 creating a transactional clone, queueing all new functions we find
4824 along the way. */
4825 for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
4826 {
4827 node = VEC_index (cgraph_node_p, tm_callees, i);
4828 a = cgraph_function_body_availability (node);
4829 d = get_cg_data (&node, true);
4830
4831 /* Put it in the worklist so we can scan the function later
4832 (ipa_tm_scan_irr_function) and mark the irrevocable
4833 blocks. */
4834 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
4835
4836 /* Some callees cannot be arbitrarily cloned. These will always be
4837 irrevocable. Mark these now, so that we need not scan them. */
4838 if (is_tm_irrevocable (node->symbol.decl))
4839 ipa_tm_note_irrevocable (node, &irr_worklist);
4840 else if (a <= AVAIL_NOT_AVAILABLE
4841 && !is_tm_safe_or_pure (node->symbol.decl))
4842 ipa_tm_note_irrevocable (node, &irr_worklist);
4843 else if (a >= AVAIL_OVERWRITABLE)
4844 {
4845 if (!tree_versionable_function_p (node->symbol.decl))
4846 ipa_tm_note_irrevocable (node, &irr_worklist);
4847 else if (!d->is_irrevocable)
4848 {
4849 /* If this is an alias, make sure its base is queued as well.
4850 we need not scan the callees now, as the base will do. */
4851 if (node->alias)
4852 {
4853 node = cgraph_get_node (node->thunk.alias);
4854 d = get_cg_data (&node, true);
4855 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
4856 continue;
4857 }
4858
4859 /* Add all nodes called by this function into
4860 tm_callees as well. */
4861 ipa_tm_scan_calls_clone (node, &tm_callees);
4862 }
4863 }
4864 }
4865
4866 /* Iterate scans until no more work to be done. Prefer not to use
4867 VEC_pop because the worklist tends to follow a breadth-first
4868 search of the callgraph, which should allow convergance with a
4869 minimum number of scans. But we also don't want the worklist
4870 array to grow without bound, so we shift the array up periodically. */
4871 for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
4872 {
4873 if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
4874 {
4875 VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
4876 i = 0;
4877 }
4878
4879 node = VEC_index (cgraph_node_p, irr_worklist, i);
4880 d = get_cg_data (&node, true);
4881 d->in_worklist = false;
4882
4883 if (d->want_irr_scan_normal)
4884 {
4885 d->want_irr_scan_normal = false;
4886 ipa_tm_scan_irr_function (node, false);
4887 }
4888 if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
4889 ipa_tm_note_irrevocable (node, &irr_worklist);
4890 }
4891
4892 /* For every function on the callee list, collect the tm_may_enter_irr
4893 bit on the node. */
4894 VEC_truncate (cgraph_node_p, irr_worklist, 0);
4895 for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
4896 {
4897 node = VEC_index (cgraph_node_p, tm_callees, i);
4898 if (ipa_tm_mayenterirr_function (node))
4899 {
4900 d = get_cg_data (&node, true);
4901 gcc_assert (d->in_worklist == false);
4902 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
4903 }
4904 }
4905
4906 /* Propagate the tm_may_enter_irr bit to callers until stable. */
4907 for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
4908 {
4909 struct cgraph_node *caller;
4910 struct cgraph_edge *e;
4911 struct ipa_ref *ref;
4912 unsigned j;
4913
4914 if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
4915 {
4916 VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
4917 i = 0;
4918 }
4919
4920 node = VEC_index (cgraph_node_p, irr_worklist, i);
4921 d = get_cg_data (&node, true);
4922 d->in_worklist = false;
4923 node->local.tm_may_enter_irr = true;
4924
4925 /* Propagate back to normal callers. */
4926 for (e = node->callers; e ; e = e->next_caller)
4927 {
4928 caller = e->caller;
4929 if (!is_tm_safe_or_pure (caller->symbol.decl)
4930 && !caller->local.tm_may_enter_irr)
4931 {
4932 d = get_cg_data (&caller, true);
4933 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
4934 }
4935 }
4936
4937 /* Propagate back to referring aliases as well. */
4938 for (j = 0; ipa_ref_list_referring_iterate (&node->symbol.ref_list, j, ref); j++)
4939 {
4940 caller = cgraph (ref->referring);
4941 if (ref->use == IPA_REF_ALIAS
4942 && !caller->local.tm_may_enter_irr)
4943 {
4944 /* ?? Do not traverse aliases here. */
4945 d = get_cg_data (&caller, false);
4946 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
4947 }
4948 }
4949 }
4950
4951 /* Now validate all tm_safe functions, and all atomic regions in
4952 other functions. */
4953 FOR_EACH_DEFINED_FUNCTION (node)
4954 if (node->lowered
4955 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
4956 {
4957 d = get_cg_data (&node, true);
4958 if (is_tm_safe (node->symbol.decl))
4959 ipa_tm_diagnose_tm_safe (node);
4960 else if (d->all_tm_regions)
4961 ipa_tm_diagnose_transaction (node, d->all_tm_regions);
4962 }
4963
4964 /* Create clones. Do those that are not irrevocable and have a
4965 positive call count. Do those publicly visible functions that
4966 the user directed us to clone. */
4967 for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
4968 {
4969 bool doit = false;
4970
4971 node = VEC_index (cgraph_node_p, tm_callees, i);
4972 if (node->same_body_alias)
4973 continue;
4974
4975 a = cgraph_function_body_availability (node);
4976 d = get_cg_data (&node, true);
4977
4978 if (a <= AVAIL_NOT_AVAILABLE)
4979 doit = is_tm_callable (node->symbol.decl);
4980 else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->symbol.decl))
4981 doit = true;
4982 else if (!d->is_irrevocable
4983 && d->tm_callers_normal + d->tm_callers_clone > 0)
4984 doit = true;
4985
4986 if (doit)
4987 ipa_tm_create_version (node);
4988 }
4989
4990 /* Redirect calls to the new clones, and insert irrevocable marks. */
4991 for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
4992 {
4993 node = VEC_index (cgraph_node_p, tm_callees, i);
4994 if (node->analyzed)
4995 {
4996 d = get_cg_data (&node, true);
4997 if (d->clone)
4998 ipa_tm_transform_clone (node);
4999 }
5000 }
5001 FOR_EACH_DEFINED_FUNCTION (node)
5002 if (node->lowered
5003 && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
5004 {
5005 d = get_cg_data (&node, true);
5006 if (d->all_tm_regions)
5007 ipa_tm_transform_transaction (node);
5008 }
5009
5010 /* Free and clear all data structures. */
5011 VEC_free (cgraph_node_p, heap, tm_callees);
5012 VEC_free (cgraph_node_p, heap, irr_worklist);
5013 bitmap_obstack_release (&tm_obstack);
5014
5015 FOR_EACH_FUNCTION (node)
5016 node->symbol.aux = NULL;
5017
5018 #ifdef ENABLE_CHECKING
5019 verify_cgraph ();
5020 #endif
5021
5022 return 0;
5023 }
5024
5025 struct simple_ipa_opt_pass pass_ipa_tm =
5026 {
5027 {
5028 SIMPLE_IPA_PASS,
5029 "tmipa", /* name */
5030 gate_tm, /* gate */
5031 ipa_tm_execute, /* execute */
5032 NULL, /* sub */
5033 NULL, /* next */
5034 0, /* static_pass_number */
5035 TV_TRANS_MEM, /* tv_id */
5036 PROP_ssa | PROP_cfg, /* properties_required */
5037 0, /* properties_provided */
5038 0, /* properties_destroyed */
5039 0, /* todo_flags_start */
5040 0, /* todo_flags_finish */
5041 },
5042 };
5043
5044 #include "gt-trans-mem.h"