IA MCU psABI support: changes to libraries
[gcc.git] / gcc / trans-mem.c
1 /* Passes for transactional memory support.
2 Copyright (C) 2008-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "alias.h"
24 #include "symtab.h"
25 #include "options.h"
26 #include "tree.h"
27 #include "fold-const.h"
28 #include "predict.h"
29 #include "tm.h"
30 #include "hard-reg-set.h"
31 #include "function.h"
32 #include "dominance.h"
33 #include "cfg.h"
34 #include "basic-block.h"
35 #include "tree-ssa-alias.h"
36 #include "internal-fn.h"
37 #include "tree-eh.h"
38 #include "gimple-expr.h"
39 #include "gimple.h"
40 #include "calls.h"
41 #include "rtl.h"
42 #include "emit-rtl.h"
43 #include "gimplify.h"
44 #include "gimple-iterator.h"
45 #include "gimplify-me.h"
46 #include "gimple-walk.h"
47 #include "gimple-ssa.h"
48 #include "cgraph.h"
49 #include "tree-cfg.h"
50 #include "stringpool.h"
51 #include "tree-ssanames.h"
52 #include "tree-into-ssa.h"
53 #include "tree-pass.h"
54 #include "tree-inline.h"
55 #include "diagnostic-core.h"
56 #include "demangle.h"
57 #include "output.h"
58 #include "trans-mem.h"
59 #include "params.h"
60 #include "target.h"
61 #include "langhooks.h"
62 #include "gimple-pretty-print.h"
63 #include "cfgloop.h"
64 #include "tree-ssa-address.h"
65
66
67 #define A_RUNINSTRUMENTEDCODE 0x0001
68 #define A_RUNUNINSTRUMENTEDCODE 0x0002
69 #define A_SAVELIVEVARIABLES 0x0004
70 #define A_RESTORELIVEVARIABLES 0x0008
71 #define A_ABORTTRANSACTION 0x0010
72
73 #define AR_USERABORT 0x0001
74 #define AR_USERRETRY 0x0002
75 #define AR_TMCONFLICT 0x0004
76 #define AR_EXCEPTIONBLOCKABORT 0x0008
77 #define AR_OUTERABORT 0x0010
78
79 #define MODE_SERIALIRREVOCABLE 0x0000
80
81
82 /* The representation of a transaction changes several times during the
83 lowering process. In the beginning, in the front-end we have the
84 GENERIC tree TRANSACTION_EXPR. For example,
85
86 __transaction {
87 local++;
88 if (++global == 10)
89 __tm_abort;
90 }
91
92 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
93 trivially replaced with a GIMPLE_TRANSACTION node.
94
95 During pass_lower_tm, we examine the body of transactions looking
96 for aborts. Transactions that do not contain an abort may be
97 merged into an outer transaction. We also add a TRY-FINALLY node
98 to arrange for the transaction to be committed on any exit.
99
100 [??? Think about how this arrangement affects throw-with-commit
101 and throw-with-abort operations. In this case we want the TRY to
102 handle gotos, but not to catch any exceptions because the transaction
103 will already be closed.]
104
105 GIMPLE_TRANSACTION [label=NULL] {
106 try {
107 local = local + 1;
108 t0 = global;
109 t1 = t0 + 1;
110 global = t1;
111 if (t1 == 10)
112 __builtin___tm_abort ();
113 } finally {
114 __builtin___tm_commit ();
115 }
116 }
117
118 During pass_lower_eh, we create EH regions for the transactions,
119 intermixed with the regular EH stuff. This gives us a nice persistent
120 mapping (all the way through rtl) from transactional memory operation
121 back to the transaction, which allows us to get the abnormal edges
122 correct to model transaction aborts and restarts:
123
124 GIMPLE_TRANSACTION [label=over]
125 local = local + 1;
126 t0 = global;
127 t1 = t0 + 1;
128 global = t1;
129 if (t1 == 10)
130 __builtin___tm_abort ();
131 __builtin___tm_commit ();
132 over:
133
134 This is the end of all_lowering_passes, and so is what is present
135 during the IPA passes, and through all of the optimization passes.
136
137 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
138 functions and mark functions for cloning.
139
140 At the end of gimple optimization, before exiting SSA form,
141 pass_tm_edges replaces statements that perform transactional
142 memory operations with the appropriate TM builtins, and swap
143 out function calls with their transactional clones. At this
144 point we introduce the abnormal transaction restart edges and
145 complete lowering of the GIMPLE_TRANSACTION node.
146
147 x = __builtin___tm_start (MAY_ABORT);
148 eh_label:
149 if (x & abort_transaction)
150 goto over;
151 local = local + 1;
152 t0 = __builtin___tm_load (global);
153 t1 = t0 + 1;
154 __builtin___tm_store (&global, t1);
155 if (t1 == 10)
156 __builtin___tm_abort ();
157 __builtin___tm_commit ();
158 over:
159 */
160
161 static void *expand_regions (struct tm_region *,
162 void *(*callback)(struct tm_region *, void *),
163 void *, bool);
164
165 \f
166 /* Return the attributes we want to examine for X, or NULL if it's not
167 something we examine. We look at function types, but allow pointers
168 to function types and function decls and peek through. */
169
170 static tree
171 get_attrs_for (const_tree x)
172 {
173 if (x == NULL_TREE)
174 return NULL_TREE;
175
176 switch (TREE_CODE (x))
177 {
178 case FUNCTION_DECL:
179 return TYPE_ATTRIBUTES (TREE_TYPE (x));
180 break;
181
182 default:
183 if (TYPE_P (x))
184 return NULL_TREE;
185 x = TREE_TYPE (x);
186 if (TREE_CODE (x) != POINTER_TYPE)
187 return NULL_TREE;
188 /* FALLTHRU */
189
190 case POINTER_TYPE:
191 x = TREE_TYPE (x);
192 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
193 return NULL_TREE;
194 /* FALLTHRU */
195
196 case FUNCTION_TYPE:
197 case METHOD_TYPE:
198 return TYPE_ATTRIBUTES (x);
199 }
200 }
201
202 /* Return true if X has been marked TM_PURE. */
203
204 bool
205 is_tm_pure (const_tree x)
206 {
207 unsigned flags;
208
209 switch (TREE_CODE (x))
210 {
211 case FUNCTION_DECL:
212 case FUNCTION_TYPE:
213 case METHOD_TYPE:
214 break;
215
216 default:
217 if (TYPE_P (x))
218 return false;
219 x = TREE_TYPE (x);
220 if (TREE_CODE (x) != POINTER_TYPE)
221 return false;
222 /* FALLTHRU */
223
224 case POINTER_TYPE:
225 x = TREE_TYPE (x);
226 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
227 return false;
228 break;
229 }
230
231 flags = flags_from_decl_or_type (x);
232 return (flags & ECF_TM_PURE) != 0;
233 }
234
235 /* Return true if X has been marked TM_IRREVOCABLE. */
236
237 static bool
238 is_tm_irrevocable (tree x)
239 {
240 tree attrs = get_attrs_for (x);
241
242 if (attrs && lookup_attribute ("transaction_unsafe", attrs))
243 return true;
244
245 /* A call to the irrevocable builtin is by definition,
246 irrevocable. */
247 if (TREE_CODE (x) == ADDR_EXPR)
248 x = TREE_OPERAND (x, 0);
249 if (TREE_CODE (x) == FUNCTION_DECL
250 && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
251 && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
252 return true;
253
254 return false;
255 }
256
257 /* Return true if X has been marked TM_SAFE. */
258
259 bool
260 is_tm_safe (const_tree x)
261 {
262 if (flag_tm)
263 {
264 tree attrs = get_attrs_for (x);
265 if (attrs)
266 {
267 if (lookup_attribute ("transaction_safe", attrs))
268 return true;
269 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
270 return true;
271 }
272 }
273 return false;
274 }
275
276 /* Return true if CALL is const, or tm_pure. */
277
278 static bool
279 is_tm_pure_call (gimple call)
280 {
281 tree fn = gimple_call_fn (call);
282
283 if (TREE_CODE (fn) == ADDR_EXPR)
284 {
285 fn = TREE_OPERAND (fn, 0);
286 gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
287 }
288 else
289 fn = TREE_TYPE (fn);
290
291 return is_tm_pure (fn);
292 }
293
294 /* Return true if X has been marked TM_CALLABLE. */
295
296 static bool
297 is_tm_callable (tree x)
298 {
299 tree attrs = get_attrs_for (x);
300 if (attrs)
301 {
302 if (lookup_attribute ("transaction_callable", attrs))
303 return true;
304 if (lookup_attribute ("transaction_safe", attrs))
305 return true;
306 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
307 return true;
308 }
309 return false;
310 }
311
312 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
313
314 bool
315 is_tm_may_cancel_outer (tree x)
316 {
317 tree attrs = get_attrs_for (x);
318 if (attrs)
319 return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
320 return false;
321 }
322
323 /* Return true for built in functions that "end" a transaction. */
324
325 bool
326 is_tm_ending_fndecl (tree fndecl)
327 {
328 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
329 switch (DECL_FUNCTION_CODE (fndecl))
330 {
331 case BUILT_IN_TM_COMMIT:
332 case BUILT_IN_TM_COMMIT_EH:
333 case BUILT_IN_TM_ABORT:
334 case BUILT_IN_TM_IRREVOCABLE:
335 return true;
336 default:
337 break;
338 }
339
340 return false;
341 }
342
343 /* Return true if STMT is a built in function call that "ends" a
344 transaction. */
345
346 bool
347 is_tm_ending (gimple stmt)
348 {
349 tree fndecl;
350
351 if (gimple_code (stmt) != GIMPLE_CALL)
352 return false;
353
354 fndecl = gimple_call_fndecl (stmt);
355 return (fndecl != NULL_TREE
356 && is_tm_ending_fndecl (fndecl));
357 }
358
359 /* Return true if STMT is a TM load. */
360
361 static bool
362 is_tm_load (gimple stmt)
363 {
364 tree fndecl;
365
366 if (gimple_code (stmt) != GIMPLE_CALL)
367 return false;
368
369 fndecl = gimple_call_fndecl (stmt);
370 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
371 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
372 }
373
374 /* Same as above, but for simple TM loads, that is, not the
375 after-write, after-read, etc optimized variants. */
376
377 static bool
378 is_tm_simple_load (gimple stmt)
379 {
380 tree fndecl;
381
382 if (gimple_code (stmt) != GIMPLE_CALL)
383 return false;
384
385 fndecl = gimple_call_fndecl (stmt);
386 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
387 {
388 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
389 return (fcode == BUILT_IN_TM_LOAD_1
390 || fcode == BUILT_IN_TM_LOAD_2
391 || fcode == BUILT_IN_TM_LOAD_4
392 || fcode == BUILT_IN_TM_LOAD_8
393 || fcode == BUILT_IN_TM_LOAD_FLOAT
394 || fcode == BUILT_IN_TM_LOAD_DOUBLE
395 || fcode == BUILT_IN_TM_LOAD_LDOUBLE
396 || fcode == BUILT_IN_TM_LOAD_M64
397 || fcode == BUILT_IN_TM_LOAD_M128
398 || fcode == BUILT_IN_TM_LOAD_M256);
399 }
400 return false;
401 }
402
403 /* Return true if STMT is a TM store. */
404
405 static bool
406 is_tm_store (gimple stmt)
407 {
408 tree fndecl;
409
410 if (gimple_code (stmt) != GIMPLE_CALL)
411 return false;
412
413 fndecl = gimple_call_fndecl (stmt);
414 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
415 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
416 }
417
418 /* Same as above, but for simple TM stores, that is, not the
419 after-write, after-read, etc optimized variants. */
420
421 static bool
422 is_tm_simple_store (gimple stmt)
423 {
424 tree fndecl;
425
426 if (gimple_code (stmt) != GIMPLE_CALL)
427 return false;
428
429 fndecl = gimple_call_fndecl (stmt);
430 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
431 {
432 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
433 return (fcode == BUILT_IN_TM_STORE_1
434 || fcode == BUILT_IN_TM_STORE_2
435 || fcode == BUILT_IN_TM_STORE_4
436 || fcode == BUILT_IN_TM_STORE_8
437 || fcode == BUILT_IN_TM_STORE_FLOAT
438 || fcode == BUILT_IN_TM_STORE_DOUBLE
439 || fcode == BUILT_IN_TM_STORE_LDOUBLE
440 || fcode == BUILT_IN_TM_STORE_M64
441 || fcode == BUILT_IN_TM_STORE_M128
442 || fcode == BUILT_IN_TM_STORE_M256);
443 }
444 return false;
445 }
446
447 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
448
449 static bool
450 is_tm_abort (tree fndecl)
451 {
452 return (fndecl
453 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
454 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
455 }
456
457 /* Build a GENERIC tree for a user abort. This is called by front ends
458 while transforming the __tm_abort statement. */
459
460 tree
461 build_tm_abort_call (location_t loc, bool is_outer)
462 {
463 return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
464 build_int_cst (integer_type_node,
465 AR_USERABORT
466 | (is_outer ? AR_OUTERABORT : 0)));
467 }
468 \f
469 /* Map for aribtrary function replacement under TM, as created
470 by the tm_wrap attribute. */
471
472 struct tm_wrapper_hasher : ggc_cache_ptr_hash<tree_map>
473 {
474 static inline hashval_t hash (tree_map *m) { return m->hash; }
475 static inline bool
476 equal (tree_map *a, tree_map *b)
477 {
478 return a->base.from == b->base.from;
479 }
480
481 static int
482 keep_cache_entry (tree_map *&m)
483 {
484 return ggc_marked_p (m->base.from);
485 }
486 };
487
488 static GTY((cache)) hash_table<tm_wrapper_hasher> *tm_wrap_map;
489
490 void
491 record_tm_replacement (tree from, tree to)
492 {
493 struct tree_map **slot, *h;
494
495 /* Do not inline wrapper functions that will get replaced in the TM
496 pass.
497
498 Suppose you have foo() that will get replaced into tmfoo(). Make
499 sure the inliner doesn't try to outsmart us and inline foo()
500 before we get a chance to do the TM replacement. */
501 DECL_UNINLINABLE (from) = 1;
502
503 if (tm_wrap_map == NULL)
504 tm_wrap_map = hash_table<tm_wrapper_hasher>::create_ggc (32);
505
506 h = ggc_alloc<tree_map> ();
507 h->hash = htab_hash_pointer (from);
508 h->base.from = from;
509 h->to = to;
510
511 slot = tm_wrap_map->find_slot_with_hash (h, h->hash, INSERT);
512 *slot = h;
513 }
514
515 /* Return a TM-aware replacement function for DECL. */
516
517 static tree
518 find_tm_replacement_function (tree fndecl)
519 {
520 if (tm_wrap_map)
521 {
522 struct tree_map *h, in;
523
524 in.base.from = fndecl;
525 in.hash = htab_hash_pointer (fndecl);
526 h = tm_wrap_map->find_with_hash (&in, in.hash);
527 if (h)
528 return h->to;
529 }
530
531 /* ??? We may well want TM versions of most of the common <string.h>
532 functions. For now, we've already these two defined. */
533 /* Adjust expand_call_tm() attributes as necessary for the cases
534 handled here: */
535 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
536 switch (DECL_FUNCTION_CODE (fndecl))
537 {
538 case BUILT_IN_MEMCPY:
539 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
540 case BUILT_IN_MEMMOVE:
541 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
542 case BUILT_IN_MEMSET:
543 return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
544 default:
545 return NULL;
546 }
547
548 return NULL;
549 }
550
551 /* When appropriate, record TM replacement for memory allocation functions.
552
553 FROM is the FNDECL to wrap. */
554 void
555 tm_malloc_replacement (tree from)
556 {
557 const char *str;
558 tree to;
559
560 if (TREE_CODE (from) != FUNCTION_DECL)
561 return;
562
563 /* If we have a previous replacement, the user must be explicitly
564 wrapping malloc/calloc/free. They better know what they're
565 doing... */
566 if (find_tm_replacement_function (from))
567 return;
568
569 str = IDENTIFIER_POINTER (DECL_NAME (from));
570
571 if (!strcmp (str, "malloc"))
572 to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
573 else if (!strcmp (str, "calloc"))
574 to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
575 else if (!strcmp (str, "free"))
576 to = builtin_decl_explicit (BUILT_IN_TM_FREE);
577 else
578 return;
579
580 TREE_NOTHROW (to) = 0;
581
582 record_tm_replacement (from, to);
583 }
584 \f
585 /* Diagnostics for tm_safe functions/regions. Called by the front end
586 once we've lowered the function to high-gimple. */
587
588 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
589 Process exactly one statement. WI->INFO is set to non-null when in
590 the context of a tm_safe function, and null for a __transaction block. */
591
592 #define DIAG_TM_OUTER 1
593 #define DIAG_TM_SAFE 2
594 #define DIAG_TM_RELAXED 4
595
596 struct diagnose_tm
597 {
598 unsigned int summary_flags : 8;
599 unsigned int block_flags : 8;
600 unsigned int func_flags : 8;
601 unsigned int saw_volatile : 1;
602 gimple stmt;
603 };
604
605 /* Return true if T is a volatile variable of some kind. */
606
607 static bool
608 volatile_var_p (tree t)
609 {
610 return (SSA_VAR_P (t)
611 && TREE_THIS_VOLATILE (TREE_TYPE (t)));
612 }
613
614 /* Tree callback function for diagnose_tm pass. */
615
616 static tree
617 diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
618 void *data)
619 {
620 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
621 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
622
623 if (volatile_var_p (*tp)
624 && d->block_flags & DIAG_TM_SAFE
625 && !d->saw_volatile)
626 {
627 d->saw_volatile = 1;
628 error_at (gimple_location (d->stmt),
629 "invalid volatile use of %qD inside transaction",
630 *tp);
631 }
632
633 return NULL_TREE;
634 }
635
636 static inline bool
637 is_tm_safe_or_pure (const_tree x)
638 {
639 return is_tm_safe (x) || is_tm_pure (x);
640 }
641
642 static tree
643 diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
644 struct walk_stmt_info *wi)
645 {
646 gimple stmt = gsi_stmt (*gsi);
647 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
648
649 /* Save stmt for use in leaf analysis. */
650 d->stmt = stmt;
651
652 switch (gimple_code (stmt))
653 {
654 case GIMPLE_CALL:
655 {
656 tree fn = gimple_call_fn (stmt);
657
658 if ((d->summary_flags & DIAG_TM_OUTER) == 0
659 && is_tm_may_cancel_outer (fn))
660 error_at (gimple_location (stmt),
661 "%<transaction_may_cancel_outer%> function call not within"
662 " outer transaction or %<transaction_may_cancel_outer%>");
663
664 if (d->summary_flags & DIAG_TM_SAFE)
665 {
666 bool is_safe, direct_call_p;
667 tree replacement;
668
669 if (TREE_CODE (fn) == ADDR_EXPR
670 && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
671 {
672 direct_call_p = true;
673 replacement = TREE_OPERAND (fn, 0);
674 replacement = find_tm_replacement_function (replacement);
675 if (replacement)
676 fn = replacement;
677 }
678 else
679 {
680 direct_call_p = false;
681 replacement = NULL_TREE;
682 }
683
684 if (is_tm_safe_or_pure (fn))
685 is_safe = true;
686 else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
687 {
688 /* A function explicitly marked transaction_callable as
689 opposed to transaction_safe is being defined to be
690 unsafe as part of its ABI, regardless of its contents. */
691 is_safe = false;
692 }
693 else if (direct_call_p)
694 {
695 if (IS_TYPE_OR_DECL_P (fn)
696 && flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
697 is_safe = true;
698 else if (replacement)
699 {
700 /* ??? At present we've been considering replacements
701 merely transaction_callable, and therefore might
702 enter irrevocable. The tm_wrap attribute has not
703 yet made it into the new language spec. */
704 is_safe = false;
705 }
706 else
707 {
708 /* ??? Diagnostics for unmarked direct calls moved into
709 the IPA pass. Section 3.2 of the spec details how
710 functions not marked should be considered "implicitly
711 safe" based on having examined the function body. */
712 is_safe = true;
713 }
714 }
715 else
716 {
717 /* An unmarked indirect call. Consider it unsafe even
718 though optimization may yet figure out how to inline. */
719 is_safe = false;
720 }
721
722 if (!is_safe)
723 {
724 if (TREE_CODE (fn) == ADDR_EXPR)
725 fn = TREE_OPERAND (fn, 0);
726 if (d->block_flags & DIAG_TM_SAFE)
727 {
728 if (direct_call_p)
729 error_at (gimple_location (stmt),
730 "unsafe function call %qD within "
731 "atomic transaction", fn);
732 else
733 {
734 if (!DECL_P (fn) || DECL_NAME (fn))
735 error_at (gimple_location (stmt),
736 "unsafe function call %qE within "
737 "atomic transaction", fn);
738 else
739 error_at (gimple_location (stmt),
740 "unsafe indirect function call within "
741 "atomic transaction");
742 }
743 }
744 else
745 {
746 if (direct_call_p)
747 error_at (gimple_location (stmt),
748 "unsafe function call %qD within "
749 "%<transaction_safe%> function", fn);
750 else
751 {
752 if (!DECL_P (fn) || DECL_NAME (fn))
753 error_at (gimple_location (stmt),
754 "unsafe function call %qE within "
755 "%<transaction_safe%> function", fn);
756 else
757 error_at (gimple_location (stmt),
758 "unsafe indirect function call within "
759 "%<transaction_safe%> function");
760 }
761 }
762 }
763 }
764 }
765 break;
766
767 case GIMPLE_ASM:
768 /* ??? We ought to come up with a way to add attributes to
769 asm statements, and then add "transaction_safe" to it.
770 Either that or get the language spec to resurrect __tm_waiver. */
771 if (d->block_flags & DIAG_TM_SAFE)
772 error_at (gimple_location (stmt),
773 "asm not allowed in atomic transaction");
774 else if (d->func_flags & DIAG_TM_SAFE)
775 error_at (gimple_location (stmt),
776 "asm not allowed in %<transaction_safe%> function");
777 break;
778
779 case GIMPLE_TRANSACTION:
780 {
781 gtransaction *trans_stmt = as_a <gtransaction *> (stmt);
782 unsigned char inner_flags = DIAG_TM_SAFE;
783
784 if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_RELAXED)
785 {
786 if (d->block_flags & DIAG_TM_SAFE)
787 error_at (gimple_location (stmt),
788 "relaxed transaction in atomic transaction");
789 else if (d->func_flags & DIAG_TM_SAFE)
790 error_at (gimple_location (stmt),
791 "relaxed transaction in %<transaction_safe%> function");
792 inner_flags = DIAG_TM_RELAXED;
793 }
794 else if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_OUTER)
795 {
796 if (d->block_flags)
797 error_at (gimple_location (stmt),
798 "outer transaction in transaction");
799 else if (d->func_flags & DIAG_TM_OUTER)
800 error_at (gimple_location (stmt),
801 "outer transaction in "
802 "%<transaction_may_cancel_outer%> function");
803 else if (d->func_flags & DIAG_TM_SAFE)
804 error_at (gimple_location (stmt),
805 "outer transaction in %<transaction_safe%> function");
806 inner_flags |= DIAG_TM_OUTER;
807 }
808
809 *handled_ops_p = true;
810 if (gimple_transaction_body (trans_stmt))
811 {
812 struct walk_stmt_info wi_inner;
813 struct diagnose_tm d_inner;
814
815 memset (&d_inner, 0, sizeof (d_inner));
816 d_inner.func_flags = d->func_flags;
817 d_inner.block_flags = d->block_flags | inner_flags;
818 d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
819
820 memset (&wi_inner, 0, sizeof (wi_inner));
821 wi_inner.info = &d_inner;
822
823 walk_gimple_seq (gimple_transaction_body (trans_stmt),
824 diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
825 }
826 }
827 break;
828
829 default:
830 break;
831 }
832
833 return NULL_TREE;
834 }
835
836 static unsigned int
837 diagnose_tm_blocks (void)
838 {
839 struct walk_stmt_info wi;
840 struct diagnose_tm d;
841
842 memset (&d, 0, sizeof (d));
843 if (is_tm_may_cancel_outer (current_function_decl))
844 d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
845 else if (is_tm_safe (current_function_decl))
846 d.func_flags = DIAG_TM_SAFE;
847 d.summary_flags = d.func_flags;
848
849 memset (&wi, 0, sizeof (wi));
850 wi.info = &d;
851
852 walk_gimple_seq (gimple_body (current_function_decl),
853 diagnose_tm_1, diagnose_tm_1_op, &wi);
854
855 return 0;
856 }
857
858 namespace {
859
860 const pass_data pass_data_diagnose_tm_blocks =
861 {
862 GIMPLE_PASS, /* type */
863 "*diagnose_tm_blocks", /* name */
864 OPTGROUP_NONE, /* optinfo_flags */
865 TV_TRANS_MEM, /* tv_id */
866 PROP_gimple_any, /* properties_required */
867 0, /* properties_provided */
868 0, /* properties_destroyed */
869 0, /* todo_flags_start */
870 0, /* todo_flags_finish */
871 };
872
873 class pass_diagnose_tm_blocks : public gimple_opt_pass
874 {
875 public:
876 pass_diagnose_tm_blocks (gcc::context *ctxt)
877 : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt)
878 {}
879
880 /* opt_pass methods: */
881 virtual bool gate (function *) { return flag_tm; }
882 virtual unsigned int execute (function *) { return diagnose_tm_blocks (); }
883
884 }; // class pass_diagnose_tm_blocks
885
886 } // anon namespace
887
888 gimple_opt_pass *
889 make_pass_diagnose_tm_blocks (gcc::context *ctxt)
890 {
891 return new pass_diagnose_tm_blocks (ctxt);
892 }
893 \f
894 /* Instead of instrumenting thread private memory, we save the
895 addresses in a log which we later use to save/restore the addresses
896 upon transaction start/restart.
897
898 The log is keyed by address, where each element contains individual
899 statements among different code paths that perform the store.
900
901 This log is later used to generate either plain save/restore of the
902 addresses upon transaction start/restart, or calls to the ITM_L*
903 logging functions.
904
905 So for something like:
906
907 struct large { int x[1000]; };
908 struct large lala = { 0 };
909 __transaction {
910 lala.x[i] = 123;
911 ...
912 }
913
914 We can either save/restore:
915
916 lala = { 0 };
917 trxn = _ITM_startTransaction ();
918 if (trxn & a_saveLiveVariables)
919 tmp_lala1 = lala.x[i];
920 else if (a & a_restoreLiveVariables)
921 lala.x[i] = tmp_lala1;
922
923 or use the logging functions:
924
925 lala = { 0 };
926 trxn = _ITM_startTransaction ();
927 _ITM_LU4 (&lala.x[i]);
928
929 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
930 far up the dominator tree to shadow all of the writes to a given
931 location (thus reducing the total number of logging calls), but not
932 so high as to be called on a path that does not perform a
933 write. */
934
935 /* One individual log entry. We may have multiple statements for the
936 same location if neither dominate each other (on different
937 execution paths). */
938 typedef struct tm_log_entry
939 {
940 /* Address to save. */
941 tree addr;
942 /* Entry block for the transaction this address occurs in. */
943 basic_block entry_block;
944 /* Dominating statements the store occurs in. */
945 vec<gimple> stmts;
946 /* Initially, while we are building the log, we place a nonzero
947 value here to mean that this address *will* be saved with a
948 save/restore sequence. Later, when generating the save sequence
949 we place the SSA temp generated here. */
950 tree save_var;
951 } *tm_log_entry_t;
952
953
954 /* Log entry hashtable helpers. */
955
956 struct log_entry_hasher : pointer_hash <tm_log_entry>
957 {
958 static inline hashval_t hash (const tm_log_entry *);
959 static inline bool equal (const tm_log_entry *, const tm_log_entry *);
960 static inline void remove (tm_log_entry *);
961 };
962
963 /* Htab support. Return hash value for a `tm_log_entry'. */
964 inline hashval_t
965 log_entry_hasher::hash (const tm_log_entry *log)
966 {
967 return iterative_hash_expr (log->addr, 0);
968 }
969
970 /* Htab support. Return true if two log entries are the same. */
971 inline bool
972 log_entry_hasher::equal (const tm_log_entry *log1, const tm_log_entry *log2)
973 {
974 /* FIXME:
975
976 rth: I suggest that we get rid of the component refs etc.
977 I.e. resolve the reference to base + offset.
978
979 We may need to actually finish a merge with mainline for this,
980 since we'd like to be presented with Richi's MEM_REF_EXPRs more
981 often than not. But in the meantime your tm_log_entry could save
982 the results of get_inner_reference.
983
984 See: g++.dg/tm/pr46653.C
985 */
986
987 /* Special case plain equality because operand_equal_p() below will
988 return FALSE if the addresses are equal but they have
989 side-effects (e.g. a volatile address). */
990 if (log1->addr == log2->addr)
991 return true;
992
993 return operand_equal_p (log1->addr, log2->addr, 0);
994 }
995
996 /* Htab support. Free one tm_log_entry. */
997 inline void
998 log_entry_hasher::remove (tm_log_entry *lp)
999 {
1000 lp->stmts.release ();
1001 free (lp);
1002 }
1003
1004
1005 /* The actual log. */
1006 static hash_table<log_entry_hasher> *tm_log;
1007
1008 /* Addresses to log with a save/restore sequence. These should be in
1009 dominator order. */
1010 static vec<tree> tm_log_save_addresses;
1011
1012 enum thread_memory_type
1013 {
1014 mem_non_local = 0,
1015 mem_thread_local,
1016 mem_transaction_local,
1017 mem_max
1018 };
1019
1020 typedef struct tm_new_mem_map
1021 {
1022 /* SSA_NAME being dereferenced. */
1023 tree val;
1024 enum thread_memory_type local_new_memory;
1025 } tm_new_mem_map_t;
1026
1027 /* Hashtable helpers. */
1028
1029 struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map_t>
1030 {
1031 static inline hashval_t hash (const tm_new_mem_map_t *);
1032 static inline bool equal (const tm_new_mem_map_t *, const tm_new_mem_map_t *);
1033 };
1034
1035 inline hashval_t
1036 tm_mem_map_hasher::hash (const tm_new_mem_map_t *v)
1037 {
1038 return (intptr_t)v->val >> 4;
1039 }
1040
1041 inline bool
1042 tm_mem_map_hasher::equal (const tm_new_mem_map_t *v, const tm_new_mem_map_t *c)
1043 {
1044 return v->val == c->val;
1045 }
1046
1047 /* Map for an SSA_NAME originally pointing to a non aliased new piece
1048 of memory (malloc, alloc, etc). */
1049 static hash_table<tm_mem_map_hasher> *tm_new_mem_hash;
1050
1051 /* Initialize logging data structures. */
1052 static void
1053 tm_log_init (void)
1054 {
1055 tm_log = new hash_table<log_entry_hasher> (10);
1056 tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5);
1057 tm_log_save_addresses.create (5);
1058 }
1059
1060 /* Free logging data structures. */
1061 static void
1062 tm_log_delete (void)
1063 {
1064 delete tm_log;
1065 tm_log = NULL;
1066 delete tm_new_mem_hash;
1067 tm_new_mem_hash = NULL;
1068 tm_log_save_addresses.release ();
1069 }
1070
1071 /* Return true if MEM is a transaction invariant memory for the TM
1072 region starting at REGION_ENTRY_BLOCK. */
1073 static bool
1074 transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
1075 {
1076 if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
1077 && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
1078 {
1079 basic_block def_bb;
1080
1081 def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
1082 return def_bb != region_entry_block
1083 && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
1084 }
1085
1086 mem = strip_invariant_refs (mem);
1087 return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
1088 }
1089
1090 /* Given an address ADDR in STMT, find it in the memory log or add it,
1091 making sure to keep only the addresses highest in the dominator
1092 tree.
1093
1094 ENTRY_BLOCK is the entry_block for the transaction.
1095
1096 If we find the address in the log, make sure it's either the same
1097 address, or an equivalent one that dominates ADDR.
1098
1099 If we find the address, but neither ADDR dominates the found
1100 address, nor the found one dominates ADDR, we're on different
1101 execution paths. Add it.
1102
1103 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1104 NULL. */
1105 static void
1106 tm_log_add (basic_block entry_block, tree addr, gimple stmt)
1107 {
1108 tm_log_entry **slot;
1109 struct tm_log_entry l, *lp;
1110
1111 l.addr = addr;
1112 slot = tm_log->find_slot (&l, INSERT);
1113 if (!*slot)
1114 {
1115 tree type = TREE_TYPE (addr);
1116
1117 lp = XNEW (struct tm_log_entry);
1118 lp->addr = addr;
1119 *slot = lp;
1120
1121 /* Small invariant addresses can be handled as save/restores. */
1122 if (entry_block
1123 && transaction_invariant_address_p (lp->addr, entry_block)
1124 && TYPE_SIZE_UNIT (type) != NULL
1125 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
1126 && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
1127 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
1128 /* We must be able to copy this type normally. I.e., no
1129 special constructors and the like. */
1130 && !TREE_ADDRESSABLE (type))
1131 {
1132 lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
1133 lp->stmts.create (0);
1134 lp->entry_block = entry_block;
1135 /* Save addresses separately in dominator order so we don't
1136 get confused by overlapping addresses in the save/restore
1137 sequence. */
1138 tm_log_save_addresses.safe_push (lp->addr);
1139 }
1140 else
1141 {
1142 /* Use the logging functions. */
1143 lp->stmts.create (5);
1144 lp->stmts.quick_push (stmt);
1145 lp->save_var = NULL;
1146 }
1147 }
1148 else
1149 {
1150 size_t i;
1151 gimple oldstmt;
1152
1153 lp = *slot;
1154
1155 /* If we're generating a save/restore sequence, we don't care
1156 about statements. */
1157 if (lp->save_var)
1158 return;
1159
1160 for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i)
1161 {
1162 if (stmt == oldstmt)
1163 return;
1164 /* We already have a store to the same address, higher up the
1165 dominator tree. Nothing to do. */
1166 if (dominated_by_p (CDI_DOMINATORS,
1167 gimple_bb (stmt), gimple_bb (oldstmt)))
1168 return;
1169 /* We should be processing blocks in dominator tree order. */
1170 gcc_assert (!dominated_by_p (CDI_DOMINATORS,
1171 gimple_bb (oldstmt), gimple_bb (stmt)));
1172 }
1173 /* Store is on a different code path. */
1174 lp->stmts.safe_push (stmt);
1175 }
1176 }
1177
1178 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1179 result, insert the new statements before GSI. */
1180
1181 static tree
1182 gimplify_addr (gimple_stmt_iterator *gsi, tree x)
1183 {
1184 if (TREE_CODE (x) == TARGET_MEM_REF)
1185 x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
1186 else
1187 x = build_fold_addr_expr (x);
1188 return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
1189 }
1190
1191 /* Instrument one address with the logging functions.
1192 ADDR is the address to save.
1193 STMT is the statement before which to place it. */
1194 static void
1195 tm_log_emit_stmt (tree addr, gimple stmt)
1196 {
1197 tree type = TREE_TYPE (addr);
1198 tree size = TYPE_SIZE_UNIT (type);
1199 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1200 gimple log;
1201 enum built_in_function code = BUILT_IN_TM_LOG;
1202
1203 if (type == float_type_node)
1204 code = BUILT_IN_TM_LOG_FLOAT;
1205 else if (type == double_type_node)
1206 code = BUILT_IN_TM_LOG_DOUBLE;
1207 else if (type == long_double_type_node)
1208 code = BUILT_IN_TM_LOG_LDOUBLE;
1209 else if (tree_fits_uhwi_p (size))
1210 {
1211 unsigned int n = tree_to_uhwi (size);
1212 switch (n)
1213 {
1214 case 1:
1215 code = BUILT_IN_TM_LOG_1;
1216 break;
1217 case 2:
1218 code = BUILT_IN_TM_LOG_2;
1219 break;
1220 case 4:
1221 code = BUILT_IN_TM_LOG_4;
1222 break;
1223 case 8:
1224 code = BUILT_IN_TM_LOG_8;
1225 break;
1226 default:
1227 code = BUILT_IN_TM_LOG;
1228 if (TREE_CODE (type) == VECTOR_TYPE)
1229 {
1230 if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
1231 code = BUILT_IN_TM_LOG_M64;
1232 else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
1233 code = BUILT_IN_TM_LOG_M128;
1234 else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
1235 code = BUILT_IN_TM_LOG_M256;
1236 }
1237 break;
1238 }
1239 }
1240
1241 addr = gimplify_addr (&gsi, addr);
1242 if (code == BUILT_IN_TM_LOG)
1243 log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size);
1244 else
1245 log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
1246 gsi_insert_before (&gsi, log, GSI_SAME_STMT);
1247 }
1248
1249 /* Go through the log and instrument address that must be instrumented
1250 with the logging functions. Leave the save/restore addresses for
1251 later. */
1252 static void
1253 tm_log_emit (void)
1254 {
1255 hash_table<log_entry_hasher>::iterator hi;
1256 struct tm_log_entry *lp;
1257
1258 FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi)
1259 {
1260 size_t i;
1261 gimple stmt;
1262
1263 if (dump_file)
1264 {
1265 fprintf (dump_file, "TM thread private mem logging: ");
1266 print_generic_expr (dump_file, lp->addr, 0);
1267 fprintf (dump_file, "\n");
1268 }
1269
1270 if (lp->save_var)
1271 {
1272 if (dump_file)
1273 fprintf (dump_file, "DUMPING to variable\n");
1274 continue;
1275 }
1276 else
1277 {
1278 if (dump_file)
1279 fprintf (dump_file, "DUMPING with logging functions\n");
1280 for (i = 0; lp->stmts.iterate (i, &stmt); ++i)
1281 tm_log_emit_stmt (lp->addr, stmt);
1282 }
1283 }
1284 }
1285
1286 /* Emit the save sequence for the corresponding addresses in the log.
1287 ENTRY_BLOCK is the entry block for the transaction.
1288 BB is the basic block to insert the code in. */
1289 static void
1290 tm_log_emit_saves (basic_block entry_block, basic_block bb)
1291 {
1292 size_t i;
1293 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1294 gimple stmt;
1295 struct tm_log_entry l, *lp;
1296
1297 for (i = 0; i < tm_log_save_addresses.length (); ++i)
1298 {
1299 l.addr = tm_log_save_addresses[i];
1300 lp = *(tm_log->find_slot (&l, NO_INSERT));
1301 gcc_assert (lp->save_var != NULL);
1302
1303 /* We only care about variables in the current transaction. */
1304 if (lp->entry_block != entry_block)
1305 continue;
1306
1307 stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
1308
1309 /* Make sure we can create an SSA_NAME for this type. For
1310 instance, aggregates aren't allowed, in which case the system
1311 will create a VOP for us and everything will just work. */
1312 if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
1313 {
1314 lp->save_var = make_ssa_name (lp->save_var, stmt);
1315 gimple_assign_set_lhs (stmt, lp->save_var);
1316 }
1317
1318 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1319 }
1320 }
1321
1322 /* Emit the restore sequence for the corresponding addresses in the log.
1323 ENTRY_BLOCK is the entry block for the transaction.
1324 BB is the basic block to insert the code in. */
1325 static void
1326 tm_log_emit_restores (basic_block entry_block, basic_block bb)
1327 {
1328 int i;
1329 struct tm_log_entry l, *lp;
1330 gimple_stmt_iterator gsi;
1331 gimple stmt;
1332
1333 for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
1334 {
1335 l.addr = tm_log_save_addresses[i];
1336 lp = *(tm_log->find_slot (&l, NO_INSERT));
1337 gcc_assert (lp->save_var != NULL);
1338
1339 /* We only care about variables in the current transaction. */
1340 if (lp->entry_block != entry_block)
1341 continue;
1342
1343 /* Restores are in LIFO order from the saves in case we have
1344 overlaps. */
1345 gsi = gsi_start_bb (bb);
1346
1347 stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
1348 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1349 }
1350 }
1351
1352 \f
1353 static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
1354 struct walk_stmt_info *);
1355 static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
1356 struct walk_stmt_info *);
1357
1358 /* Evaluate an address X being dereferenced and determine if it
1359 originally points to a non aliased new chunk of memory (malloc,
1360 alloca, etc).
1361
1362 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1363 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1364 Return MEM_NON_LOCAL otherwise.
1365
1366 ENTRY_BLOCK is the entry block to the transaction containing the
1367 dereference of X. */
1368 static enum thread_memory_type
1369 thread_private_new_memory (basic_block entry_block, tree x)
1370 {
1371 gimple stmt = NULL;
1372 enum tree_code code;
1373 tm_new_mem_map_t **slot;
1374 tm_new_mem_map_t elt, *elt_p;
1375 tree val = x;
1376 enum thread_memory_type retval = mem_transaction_local;
1377
1378 if (!entry_block
1379 || TREE_CODE (x) != SSA_NAME
1380 /* Possible uninitialized use, or a function argument. In
1381 either case, we don't care. */
1382 || SSA_NAME_IS_DEFAULT_DEF (x))
1383 return mem_non_local;
1384
1385 /* Look in cache first. */
1386 elt.val = x;
1387 slot = tm_new_mem_hash->find_slot (&elt, INSERT);
1388 elt_p = *slot;
1389 if (elt_p)
1390 return elt_p->local_new_memory;
1391
1392 /* Optimistically assume the memory is transaction local during
1393 processing. This catches recursion into this variable. */
1394 *slot = elt_p = XNEW (tm_new_mem_map_t);
1395 elt_p->val = val;
1396 elt_p->local_new_memory = mem_transaction_local;
1397
1398 /* Search DEF chain to find the original definition of this address. */
1399 do
1400 {
1401 if (ptr_deref_may_alias_global_p (x))
1402 {
1403 /* Address escapes. This is not thread-private. */
1404 retval = mem_non_local;
1405 goto new_memory_ret;
1406 }
1407
1408 stmt = SSA_NAME_DEF_STMT (x);
1409
1410 /* If the malloc call is outside the transaction, this is
1411 thread-local. */
1412 if (retval != mem_thread_local
1413 && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
1414 retval = mem_thread_local;
1415
1416 if (is_gimple_assign (stmt))
1417 {
1418 code = gimple_assign_rhs_code (stmt);
1419 /* x = foo ==> foo */
1420 if (code == SSA_NAME)
1421 x = gimple_assign_rhs1 (stmt);
1422 /* x = foo + n ==> foo */
1423 else if (code == POINTER_PLUS_EXPR)
1424 x = gimple_assign_rhs1 (stmt);
1425 /* x = (cast*) foo ==> foo */
1426 else if (code == VIEW_CONVERT_EXPR || CONVERT_EXPR_CODE_P (code))
1427 x = gimple_assign_rhs1 (stmt);
1428 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1429 else if (code == COND_EXPR)
1430 {
1431 tree op1 = gimple_assign_rhs2 (stmt);
1432 tree op2 = gimple_assign_rhs3 (stmt);
1433 enum thread_memory_type mem;
1434 retval = thread_private_new_memory (entry_block, op1);
1435 if (retval == mem_non_local)
1436 goto new_memory_ret;
1437 mem = thread_private_new_memory (entry_block, op2);
1438 retval = MIN (retval, mem);
1439 goto new_memory_ret;
1440 }
1441 else
1442 {
1443 retval = mem_non_local;
1444 goto new_memory_ret;
1445 }
1446 }
1447 else
1448 {
1449 if (gimple_code (stmt) == GIMPLE_PHI)
1450 {
1451 unsigned int i;
1452 enum thread_memory_type mem;
1453 tree phi_result = gimple_phi_result (stmt);
1454
1455 /* If any of the ancestors are non-local, we are sure to
1456 be non-local. Otherwise we can avoid doing anything
1457 and inherit what has already been generated. */
1458 retval = mem_max;
1459 for (i = 0; i < gimple_phi_num_args (stmt); ++i)
1460 {
1461 tree op = PHI_ARG_DEF (stmt, i);
1462
1463 /* Exclude self-assignment. */
1464 if (phi_result == op)
1465 continue;
1466
1467 mem = thread_private_new_memory (entry_block, op);
1468 if (mem == mem_non_local)
1469 {
1470 retval = mem;
1471 goto new_memory_ret;
1472 }
1473 retval = MIN (retval, mem);
1474 }
1475 goto new_memory_ret;
1476 }
1477 break;
1478 }
1479 }
1480 while (TREE_CODE (x) == SSA_NAME);
1481
1482 if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
1483 /* Thread-local or transaction-local. */
1484 ;
1485 else
1486 retval = mem_non_local;
1487
1488 new_memory_ret:
1489 elt_p->local_new_memory = retval;
1490 return retval;
1491 }
1492
1493 /* Determine whether X has to be instrumented using a read
1494 or write barrier.
1495
1496 ENTRY_BLOCK is the entry block for the region where stmt resides
1497 in. NULL if unknown.
1498
1499 STMT is the statement in which X occurs in. It is used for thread
1500 private memory instrumentation. If no TPM instrumentation is
1501 desired, STMT should be null. */
1502 static bool
1503 requires_barrier (basic_block entry_block, tree x, gimple stmt)
1504 {
1505 tree orig = x;
1506 while (handled_component_p (x))
1507 x = TREE_OPERAND (x, 0);
1508
1509 switch (TREE_CODE (x))
1510 {
1511 case INDIRECT_REF:
1512 case MEM_REF:
1513 {
1514 enum thread_memory_type ret;
1515
1516 ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
1517 if (ret == mem_non_local)
1518 return true;
1519 if (stmt && ret == mem_thread_local)
1520 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1521 tm_log_add (entry_block, orig, stmt);
1522
1523 /* Transaction-locals require nothing at all. For malloc, a
1524 transaction restart frees the memory and we reallocate.
1525 For alloca, the stack pointer gets reset by the retry and
1526 we reallocate. */
1527 return false;
1528 }
1529
1530 case TARGET_MEM_REF:
1531 if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
1532 return true;
1533 x = TREE_OPERAND (TMR_BASE (x), 0);
1534 if (TREE_CODE (x) == PARM_DECL)
1535 return false;
1536 gcc_assert (TREE_CODE (x) == VAR_DECL);
1537 /* FALLTHRU */
1538
1539 case PARM_DECL:
1540 case RESULT_DECL:
1541 case VAR_DECL:
1542 if (DECL_BY_REFERENCE (x))
1543 {
1544 /* ??? This value is a pointer, but aggregate_value_p has been
1545 jigged to return true which confuses needs_to_live_in_memory.
1546 This ought to be cleaned up generically.
1547
1548 FIXME: Verify this still happens after the next mainline
1549 merge. Testcase ie g++.dg/tm/pr47554.C.
1550 */
1551 return false;
1552 }
1553
1554 if (is_global_var (x))
1555 return !TREE_READONLY (x);
1556 if (/* FIXME: This condition should actually go below in the
1557 tm_log_add() call, however is_call_clobbered() depends on
1558 aliasing info which is not available during
1559 gimplification. Since requires_barrier() gets called
1560 during lower_sequence_tm/gimplification, leave the call
1561 to needs_to_live_in_memory until we eliminate
1562 lower_sequence_tm altogether. */
1563 needs_to_live_in_memory (x))
1564 return true;
1565 else
1566 {
1567 /* For local memory that doesn't escape (aka thread private
1568 memory), we can either save the value at the beginning of
1569 the transaction and restore on restart, or call a tm
1570 function to dynamically save and restore on restart
1571 (ITM_L*). */
1572 if (stmt)
1573 tm_log_add (entry_block, orig, stmt);
1574 return false;
1575 }
1576
1577 default:
1578 return false;
1579 }
1580 }
1581
1582 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1583 a transaction region. */
1584
1585 static void
1586 examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
1587 {
1588 gimple stmt = gsi_stmt (*gsi);
1589
1590 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
1591 *state |= GTMA_HAVE_LOAD;
1592 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
1593 *state |= GTMA_HAVE_STORE;
1594 }
1595
1596 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1597
1598 static void
1599 examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
1600 {
1601 gimple stmt = gsi_stmt (*gsi);
1602 tree fn;
1603
1604 if (is_tm_pure_call (stmt))
1605 return;
1606
1607 /* Check if this call is a transaction abort. */
1608 fn = gimple_call_fndecl (stmt);
1609 if (is_tm_abort (fn))
1610 *state |= GTMA_HAVE_ABORT;
1611
1612 /* Note that something may happen. */
1613 *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
1614 }
1615
1616 /* Lower a GIMPLE_TRANSACTION statement. */
1617
1618 static void
1619 lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
1620 {
1621 gimple g;
1622 gtransaction *stmt = as_a <gtransaction *> (gsi_stmt (*gsi));
1623 unsigned int *outer_state = (unsigned int *) wi->info;
1624 unsigned int this_state = 0;
1625 struct walk_stmt_info this_wi;
1626
1627 /* First, lower the body. The scanning that we do inside gives
1628 us some idea of what we're dealing with. */
1629 memset (&this_wi, 0, sizeof (this_wi));
1630 this_wi.info = (void *) &this_state;
1631 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
1632 lower_sequence_tm, NULL, &this_wi);
1633
1634 /* If there was absolutely nothing transaction related inside the
1635 transaction, we may elide it. Likewise if this is a nested
1636 transaction and does not contain an abort. */
1637 if (this_state == 0
1638 || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
1639 {
1640 if (outer_state)
1641 *outer_state |= this_state;
1642
1643 gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
1644 GSI_SAME_STMT);
1645 gimple_transaction_set_body (stmt, NULL);
1646
1647 gsi_remove (gsi, true);
1648 wi->removed_stmt = true;
1649 return;
1650 }
1651
1652 /* Wrap the body of the transaction in a try-finally node so that
1653 the commit call is always properly called. */
1654 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
1655 if (flag_exceptions)
1656 {
1657 tree ptr;
1658 gimple_seq n_seq, e_seq;
1659
1660 n_seq = gimple_seq_alloc_with_stmt (g);
1661 e_seq = NULL;
1662
1663 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1664 1, integer_zero_node);
1665 ptr = create_tmp_var (ptr_type_node);
1666 gimple_call_set_lhs (g, ptr);
1667 gimple_seq_add_stmt (&e_seq, g);
1668
1669 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1670 1, ptr);
1671 gimple_seq_add_stmt (&e_seq, g);
1672
1673 g = gimple_build_eh_else (n_seq, e_seq);
1674 }
1675
1676 g = gimple_build_try (gimple_transaction_body (stmt),
1677 gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
1678 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
1679
1680 gimple_transaction_set_body (stmt, NULL);
1681
1682 /* If the transaction calls abort or if this is an outer transaction,
1683 add an "over" label afterwards. */
1684 if ((this_state & (GTMA_HAVE_ABORT))
1685 || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER))
1686 {
1687 tree label = create_artificial_label (UNKNOWN_LOCATION);
1688 gimple_transaction_set_label (stmt, label);
1689 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
1690 }
1691
1692 /* Record the set of operations found for use later. */
1693 this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
1694 gimple_transaction_set_subcode (stmt, this_state);
1695 }
1696
1697 /* Iterate through the statements in the sequence, lowering them all
1698 as appropriate for being in a transaction. */
1699
1700 static tree
1701 lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1702 struct walk_stmt_info *wi)
1703 {
1704 unsigned int *state = (unsigned int *) wi->info;
1705 gimple stmt = gsi_stmt (*gsi);
1706
1707 *handled_ops_p = true;
1708 switch (gimple_code (stmt))
1709 {
1710 case GIMPLE_ASSIGN:
1711 /* Only memory reads/writes need to be instrumented. */
1712 if (gimple_assign_single_p (stmt))
1713 examine_assign_tm (state, gsi);
1714 break;
1715
1716 case GIMPLE_CALL:
1717 examine_call_tm (state, gsi);
1718 break;
1719
1720 case GIMPLE_ASM:
1721 *state |= GTMA_MAY_ENTER_IRREVOCABLE;
1722 break;
1723
1724 case GIMPLE_TRANSACTION:
1725 lower_transaction (gsi, wi);
1726 break;
1727
1728 default:
1729 *handled_ops_p = !gimple_has_substatements (stmt);
1730 break;
1731 }
1732
1733 return NULL_TREE;
1734 }
1735
1736 /* Iterate through the statements in the sequence, lowering them all
1737 as appropriate for being outside of a transaction. */
1738
1739 static tree
1740 lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1741 struct walk_stmt_info * wi)
1742 {
1743 gimple stmt = gsi_stmt (*gsi);
1744
1745 if (gimple_code (stmt) == GIMPLE_TRANSACTION)
1746 {
1747 *handled_ops_p = true;
1748 lower_transaction (gsi, wi);
1749 }
1750 else
1751 *handled_ops_p = !gimple_has_substatements (stmt);
1752
1753 return NULL_TREE;
1754 }
1755
1756 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1757 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1758 been moved out, and all the data required for constructing a proper
1759 CFG has been recorded. */
1760
1761 static unsigned int
1762 execute_lower_tm (void)
1763 {
1764 struct walk_stmt_info wi;
1765 gimple_seq body;
1766
1767 /* Transactional clones aren't created until a later pass. */
1768 gcc_assert (!decl_is_tm_clone (current_function_decl));
1769
1770 body = gimple_body (current_function_decl);
1771 memset (&wi, 0, sizeof (wi));
1772 walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
1773 gimple_set_body (current_function_decl, body);
1774
1775 return 0;
1776 }
1777
1778 namespace {
1779
1780 const pass_data pass_data_lower_tm =
1781 {
1782 GIMPLE_PASS, /* type */
1783 "tmlower", /* name */
1784 OPTGROUP_NONE, /* optinfo_flags */
1785 TV_TRANS_MEM, /* tv_id */
1786 PROP_gimple_lcf, /* properties_required */
1787 0, /* properties_provided */
1788 0, /* properties_destroyed */
1789 0, /* todo_flags_start */
1790 0, /* todo_flags_finish */
1791 };
1792
1793 class pass_lower_tm : public gimple_opt_pass
1794 {
1795 public:
1796 pass_lower_tm (gcc::context *ctxt)
1797 : gimple_opt_pass (pass_data_lower_tm, ctxt)
1798 {}
1799
1800 /* opt_pass methods: */
1801 virtual bool gate (function *) { return flag_tm; }
1802 virtual unsigned int execute (function *) { return execute_lower_tm (); }
1803
1804 }; // class pass_lower_tm
1805
1806 } // anon namespace
1807
1808 gimple_opt_pass *
1809 make_pass_lower_tm (gcc::context *ctxt)
1810 {
1811 return new pass_lower_tm (ctxt);
1812 }
1813 \f
1814 /* Collect region information for each transaction. */
1815
1816 struct tm_region
1817 {
1818 public:
1819
1820 /* The field "transaction_stmt" is initially a gtransaction *,
1821 but eventually gets lowered to a gcall *(to BUILT_IN_TM_START).
1822
1823 Helper method to get it as a gtransaction *, with code-checking
1824 in a checked-build. */
1825
1826 gtransaction *
1827 get_transaction_stmt () const
1828 {
1829 return as_a <gtransaction *> (transaction_stmt);
1830 }
1831
1832 public:
1833
1834 /* Link to the next unnested transaction. */
1835 struct tm_region *next;
1836
1837 /* Link to the next inner transaction. */
1838 struct tm_region *inner;
1839
1840 /* Link to the next outer transaction. */
1841 struct tm_region *outer;
1842
1843 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1844 After TM_MARK, this gets replaced by a call to
1845 BUILT_IN_TM_START.
1846 Hence this will be either a gtransaction *or a gcall *. */
1847 gimple transaction_stmt;
1848
1849 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1850 BUILT_IN_TM_START, this field is true if the transaction is an
1851 outer transaction. */
1852 bool original_transaction_was_outer;
1853
1854 /* Return value from BUILT_IN_TM_START. */
1855 tree tm_state;
1856
1857 /* The entry block to this region. This will always be the first
1858 block of the body of the transaction. */
1859 basic_block entry_block;
1860
1861 /* The first block after an expanded call to _ITM_beginTransaction. */
1862 basic_block restart_block;
1863
1864 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1865 These blocks are still a part of the region (i.e., the border is
1866 inclusive). Note that this set is only complete for paths in the CFG
1867 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1868 the edge to the "over" label. */
1869 bitmap exit_blocks;
1870
1871 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1872 bitmap irr_blocks;
1873 };
1874
1875 typedef struct tm_region *tm_region_p;
1876
1877 /* True if there are pending edge statements to be committed for the
1878 current function being scanned in the tmmark pass. */
1879 bool pending_edge_inserts_p;
1880
1881 static struct tm_region *all_tm_regions;
1882 static bitmap_obstack tm_obstack;
1883
1884
1885 /* A subroutine of tm_region_init. Record the existence of the
1886 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1887
1888 static struct tm_region *
1889 tm_region_init_0 (struct tm_region *outer, basic_block bb,
1890 gtransaction *stmt)
1891 {
1892 struct tm_region *region;
1893
1894 region = (struct tm_region *)
1895 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1896
1897 if (outer)
1898 {
1899 region->next = outer->inner;
1900 outer->inner = region;
1901 }
1902 else
1903 {
1904 region->next = all_tm_regions;
1905 all_tm_regions = region;
1906 }
1907 region->inner = NULL;
1908 region->outer = outer;
1909
1910 region->transaction_stmt = stmt;
1911 region->original_transaction_was_outer = false;
1912 region->tm_state = NULL;
1913
1914 /* There are either one or two edges out of the block containing
1915 the GIMPLE_TRANSACTION, one to the actual region and one to the
1916 "over" label if the region contains an abort. The former will
1917 always be the one marked FALLTHRU. */
1918 region->entry_block = FALLTHRU_EDGE (bb)->dest;
1919
1920 region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
1921 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1922
1923 return region;
1924 }
1925
1926 /* A subroutine of tm_region_init. Record all the exit and
1927 irrevocable blocks in BB into the region's exit_blocks and
1928 irr_blocks bitmaps. Returns the new region being scanned. */
1929
1930 static struct tm_region *
1931 tm_region_init_1 (struct tm_region *region, basic_block bb)
1932 {
1933 gimple_stmt_iterator gsi;
1934 gimple g;
1935
1936 if (!region
1937 || (!region->irr_blocks && !region->exit_blocks))
1938 return region;
1939
1940 /* Check to see if this is the end of a region by seeing if it
1941 contains a call to __builtin_tm_commit{,_eh}. Note that the
1942 outermost region for DECL_IS_TM_CLONE need not collect this. */
1943 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1944 {
1945 g = gsi_stmt (gsi);
1946 if (gimple_code (g) == GIMPLE_CALL)
1947 {
1948 tree fn = gimple_call_fndecl (g);
1949 if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
1950 {
1951 if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
1952 || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
1953 && region->exit_blocks)
1954 {
1955 bitmap_set_bit (region->exit_blocks, bb->index);
1956 region = region->outer;
1957 break;
1958 }
1959 if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
1960 bitmap_set_bit (region->irr_blocks, bb->index);
1961 }
1962 }
1963 }
1964 return region;
1965 }
1966
1967 /* Collect all of the transaction regions within the current function
1968 and record them in ALL_TM_REGIONS. The REGION parameter may specify
1969 an "outermost" region for use by tm clones. */
1970
1971 static void
1972 tm_region_init (struct tm_region *region)
1973 {
1974 gimple g;
1975 edge_iterator ei;
1976 edge e;
1977 basic_block bb;
1978 auto_vec<basic_block> queue;
1979 bitmap visited_blocks = BITMAP_ALLOC (NULL);
1980 struct tm_region *old_region;
1981 auto_vec<tm_region_p> bb_regions;
1982
1983 all_tm_regions = region;
1984 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
1985
1986 /* We could store this information in bb->aux, but we may get called
1987 through get_all_tm_blocks() from another pass that may be already
1988 using bb->aux. */
1989 bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun));
1990
1991 queue.safe_push (bb);
1992 bb_regions[bb->index] = region;
1993 do
1994 {
1995 bb = queue.pop ();
1996 region = bb_regions[bb->index];
1997 bb_regions[bb->index] = NULL;
1998
1999 /* Record exit and irrevocable blocks. */
2000 region = tm_region_init_1 (region, bb);
2001
2002 /* Check for the last statement in the block beginning a new region. */
2003 g = last_stmt (bb);
2004 old_region = region;
2005 if (g)
2006 if (gtransaction *trans_stmt = dyn_cast <gtransaction *> (g))
2007 region = tm_region_init_0 (region, bb, trans_stmt);
2008
2009 /* Process subsequent blocks. */
2010 FOR_EACH_EDGE (e, ei, bb->succs)
2011 if (!bitmap_bit_p (visited_blocks, e->dest->index))
2012 {
2013 bitmap_set_bit (visited_blocks, e->dest->index);
2014 queue.safe_push (e->dest);
2015
2016 /* If the current block started a new region, make sure that only
2017 the entry block of the new region is associated with this region.
2018 Other successors are still part of the old region. */
2019 if (old_region != region && e->dest != region->entry_block)
2020 bb_regions[e->dest->index] = old_region;
2021 else
2022 bb_regions[e->dest->index] = region;
2023 }
2024 }
2025 while (!queue.is_empty ());
2026 BITMAP_FREE (visited_blocks);
2027 }
2028
2029 /* The "gate" function for all transactional memory expansion and optimization
2030 passes. We collect region information for each top-level transaction, and
2031 if we don't find any, we skip all of the TM passes. Each region will have
2032 all of the exit blocks recorded, and the originating statement. */
2033
2034 static bool
2035 gate_tm_init (void)
2036 {
2037 if (!flag_tm)
2038 return false;
2039
2040 calculate_dominance_info (CDI_DOMINATORS);
2041 bitmap_obstack_initialize (&tm_obstack);
2042
2043 /* If the function is a TM_CLONE, then the entire function is the region. */
2044 if (decl_is_tm_clone (current_function_decl))
2045 {
2046 struct tm_region *region = (struct tm_region *)
2047 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
2048 memset (region, 0, sizeof (*region));
2049 region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2050 /* For a clone, the entire function is the region. But even if
2051 we don't need to record any exit blocks, we may need to
2052 record irrevocable blocks. */
2053 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
2054
2055 tm_region_init (region);
2056 }
2057 else
2058 {
2059 tm_region_init (NULL);
2060
2061 /* If we didn't find any regions, cleanup and skip the whole tree
2062 of tm-related optimizations. */
2063 if (all_tm_regions == NULL)
2064 {
2065 bitmap_obstack_release (&tm_obstack);
2066 return false;
2067 }
2068 }
2069
2070 return true;
2071 }
2072
2073 namespace {
2074
2075 const pass_data pass_data_tm_init =
2076 {
2077 GIMPLE_PASS, /* type */
2078 "*tminit", /* name */
2079 OPTGROUP_NONE, /* optinfo_flags */
2080 TV_TRANS_MEM, /* tv_id */
2081 ( PROP_ssa | PROP_cfg ), /* properties_required */
2082 0, /* properties_provided */
2083 0, /* properties_destroyed */
2084 0, /* todo_flags_start */
2085 0, /* todo_flags_finish */
2086 };
2087
2088 class pass_tm_init : public gimple_opt_pass
2089 {
2090 public:
2091 pass_tm_init (gcc::context *ctxt)
2092 : gimple_opt_pass (pass_data_tm_init, ctxt)
2093 {}
2094
2095 /* opt_pass methods: */
2096 virtual bool gate (function *) { return gate_tm_init (); }
2097
2098 }; // class pass_tm_init
2099
2100 } // anon namespace
2101
2102 gimple_opt_pass *
2103 make_pass_tm_init (gcc::context *ctxt)
2104 {
2105 return new pass_tm_init (ctxt);
2106 }
2107 \f
2108 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
2109 represented by STATE. */
2110
2111 static inline void
2112 transaction_subcode_ior (struct tm_region *region, unsigned flags)
2113 {
2114 if (region && region->transaction_stmt)
2115 {
2116 gtransaction *transaction_stmt = region->get_transaction_stmt ();
2117 flags |= gimple_transaction_subcode (transaction_stmt);
2118 gimple_transaction_set_subcode (transaction_stmt, flags);
2119 }
2120 }
2121
2122 /* Construct a memory load in a transactional context. Return the
2123 gimple statement performing the load, or NULL if there is no
2124 TM_LOAD builtin of the appropriate size to do the load.
2125
2126 LOC is the location to use for the new statement(s). */
2127
2128 static gcall *
2129 build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2130 {
2131 enum built_in_function code = END_BUILTINS;
2132 tree t, type = TREE_TYPE (rhs), decl;
2133 gcall *gcall;
2134
2135 if (type == float_type_node)
2136 code = BUILT_IN_TM_LOAD_FLOAT;
2137 else if (type == double_type_node)
2138 code = BUILT_IN_TM_LOAD_DOUBLE;
2139 else if (type == long_double_type_node)
2140 code = BUILT_IN_TM_LOAD_LDOUBLE;
2141 else if (TYPE_SIZE_UNIT (type) != NULL
2142 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
2143 {
2144 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type)))
2145 {
2146 case 1:
2147 code = BUILT_IN_TM_LOAD_1;
2148 break;
2149 case 2:
2150 code = BUILT_IN_TM_LOAD_2;
2151 break;
2152 case 4:
2153 code = BUILT_IN_TM_LOAD_4;
2154 break;
2155 case 8:
2156 code = BUILT_IN_TM_LOAD_8;
2157 break;
2158 }
2159 }
2160
2161 if (code == END_BUILTINS)
2162 {
2163 decl = targetm.vectorize.builtin_tm_load (type);
2164 if (!decl)
2165 return NULL;
2166 }
2167 else
2168 decl = builtin_decl_explicit (code);
2169
2170 t = gimplify_addr (gsi, rhs);
2171 gcall = gimple_build_call (decl, 1, t);
2172 gimple_set_location (gcall, loc);
2173
2174 t = TREE_TYPE (TREE_TYPE (decl));
2175 if (useless_type_conversion_p (type, t))
2176 {
2177 gimple_call_set_lhs (gcall, lhs);
2178 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2179 }
2180 else
2181 {
2182 gimple g;
2183 tree temp;
2184
2185 temp = create_tmp_reg (t);
2186 gimple_call_set_lhs (gcall, temp);
2187 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2188
2189 t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
2190 g = gimple_build_assign (lhs, t);
2191 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2192 }
2193
2194 return gcall;
2195 }
2196
2197
2198 /* Similarly for storing TYPE in a transactional context. */
2199
2200 static gcall *
2201 build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2202 {
2203 enum built_in_function code = END_BUILTINS;
2204 tree t, fn, type = TREE_TYPE (rhs), simple_type;
2205 gcall *gcall;
2206
2207 if (type == float_type_node)
2208 code = BUILT_IN_TM_STORE_FLOAT;
2209 else if (type == double_type_node)
2210 code = BUILT_IN_TM_STORE_DOUBLE;
2211 else if (type == long_double_type_node)
2212 code = BUILT_IN_TM_STORE_LDOUBLE;
2213 else if (TYPE_SIZE_UNIT (type) != NULL
2214 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
2215 {
2216 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type)))
2217 {
2218 case 1:
2219 code = BUILT_IN_TM_STORE_1;
2220 break;
2221 case 2:
2222 code = BUILT_IN_TM_STORE_2;
2223 break;
2224 case 4:
2225 code = BUILT_IN_TM_STORE_4;
2226 break;
2227 case 8:
2228 code = BUILT_IN_TM_STORE_8;
2229 break;
2230 }
2231 }
2232
2233 if (code == END_BUILTINS)
2234 {
2235 fn = targetm.vectorize.builtin_tm_store (type);
2236 if (!fn)
2237 return NULL;
2238 }
2239 else
2240 fn = builtin_decl_explicit (code);
2241
2242 simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
2243
2244 if (TREE_CODE (rhs) == CONSTRUCTOR)
2245 {
2246 /* Handle the easy initialization to zero. */
2247 if (!CONSTRUCTOR_ELTS (rhs))
2248 rhs = build_int_cst (simple_type, 0);
2249 else
2250 {
2251 /* ...otherwise punt to the caller and probably use
2252 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2253 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2254 valid gimple. */
2255 return NULL;
2256 }
2257 }
2258 else if (!useless_type_conversion_p (simple_type, type))
2259 {
2260 gimple g;
2261 tree temp;
2262
2263 temp = create_tmp_reg (simple_type);
2264 t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
2265 g = gimple_build_assign (temp, t);
2266 gimple_set_location (g, loc);
2267 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2268
2269 rhs = temp;
2270 }
2271
2272 t = gimplify_addr (gsi, lhs);
2273 gcall = gimple_build_call (fn, 2, t, rhs);
2274 gimple_set_location (gcall, loc);
2275 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2276
2277 return gcall;
2278 }
2279
2280
2281 /* Expand an assignment statement into transactional builtins. */
2282
2283 static void
2284 expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
2285 {
2286 gimple stmt = gsi_stmt (*gsi);
2287 location_t loc = gimple_location (stmt);
2288 tree lhs = gimple_assign_lhs (stmt);
2289 tree rhs = gimple_assign_rhs1 (stmt);
2290 bool store_p = requires_barrier (region->entry_block, lhs, NULL);
2291 bool load_p = requires_barrier (region->entry_block, rhs, NULL);
2292 gimple gcall = NULL;
2293
2294 if (!load_p && !store_p)
2295 {
2296 /* Add thread private addresses to log if applicable. */
2297 requires_barrier (region->entry_block, lhs, stmt);
2298 gsi_next (gsi);
2299 return;
2300 }
2301
2302 // Remove original load/store statement.
2303 gsi_remove (gsi, true);
2304
2305 if (load_p && !store_p)
2306 {
2307 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2308 gcall = build_tm_load (loc, lhs, rhs, gsi);
2309 }
2310 else if (store_p && !load_p)
2311 {
2312 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2313 gcall = build_tm_store (loc, lhs, rhs, gsi);
2314 }
2315 if (!gcall)
2316 {
2317 tree lhs_addr, rhs_addr, tmp;
2318
2319 if (load_p)
2320 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2321 if (store_p)
2322 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2323
2324 /* ??? Figure out if there's any possible overlap between the LHS
2325 and the RHS and if not, use MEMCPY. */
2326
2327 if (load_p && is_gimple_reg (lhs))
2328 {
2329 tmp = create_tmp_var (TREE_TYPE (lhs));
2330 lhs_addr = build_fold_addr_expr (tmp);
2331 }
2332 else
2333 {
2334 tmp = NULL_TREE;
2335 lhs_addr = gimplify_addr (gsi, lhs);
2336 }
2337 rhs_addr = gimplify_addr (gsi, rhs);
2338 gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
2339 3, lhs_addr, rhs_addr,
2340 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
2341 gimple_set_location (gcall, loc);
2342 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2343
2344 if (tmp)
2345 {
2346 gcall = gimple_build_assign (lhs, tmp);
2347 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2348 }
2349 }
2350
2351 /* Now that we have the load/store in its instrumented form, add
2352 thread private addresses to the log if applicable. */
2353 if (!store_p)
2354 requires_barrier (region->entry_block, lhs, gcall);
2355
2356 // The calls to build_tm_{store,load} above inserted the instrumented
2357 // call into the stream.
2358 // gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2359 }
2360
2361
2362 /* Expand a call statement as appropriate for a transaction. That is,
2363 either verify that the call does not affect the transaction, or
2364 redirect the call to a clone that handles transactions, or change
2365 the transaction state to IRREVOCABLE. Return true if the call is
2366 one of the builtins that end a transaction. */
2367
2368 static bool
2369 expand_call_tm (struct tm_region *region,
2370 gimple_stmt_iterator *gsi)
2371 {
2372 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
2373 tree lhs = gimple_call_lhs (stmt);
2374 tree fn_decl;
2375 struct cgraph_node *node;
2376 bool retval = false;
2377
2378 fn_decl = gimple_call_fndecl (stmt);
2379
2380 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
2381 || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
2382 transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
2383 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
2384 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2385
2386 if (is_tm_pure_call (stmt))
2387 return false;
2388
2389 if (fn_decl)
2390 retval = is_tm_ending_fndecl (fn_decl);
2391 if (!retval)
2392 {
2393 /* Assume all non-const/pure calls write to memory, except
2394 transaction ending builtins. */
2395 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2396 }
2397
2398 /* For indirect calls, we already generated a call into the runtime. */
2399 if (!fn_decl)
2400 {
2401 tree fn = gimple_call_fn (stmt);
2402
2403 /* We are guaranteed never to go irrevocable on a safe or pure
2404 call, and the pure call was handled above. */
2405 if (is_tm_safe (fn))
2406 return false;
2407 else
2408 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2409
2410 return false;
2411 }
2412
2413 node = cgraph_node::get (fn_decl);
2414 /* All calls should have cgraph here. */
2415 if (!node)
2416 {
2417 /* We can have a nodeless call here if some pass after IPA-tm
2418 added uninstrumented calls. For example, loop distribution
2419 can transform certain loop constructs into __builtin_mem*
2420 calls. In this case, see if we have a suitable TM
2421 replacement and fill in the gaps. */
2422 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL);
2423 enum built_in_function code = DECL_FUNCTION_CODE (fn_decl);
2424 gcc_assert (code == BUILT_IN_MEMCPY
2425 || code == BUILT_IN_MEMMOVE
2426 || code == BUILT_IN_MEMSET);
2427
2428 tree repl = find_tm_replacement_function (fn_decl);
2429 if (repl)
2430 {
2431 gimple_call_set_fndecl (stmt, repl);
2432 update_stmt (stmt);
2433 node = cgraph_node::create (repl);
2434 node->local.tm_may_enter_irr = false;
2435 return expand_call_tm (region, gsi);
2436 }
2437 gcc_unreachable ();
2438 }
2439 if (node->local.tm_may_enter_irr)
2440 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2441
2442 if (is_tm_abort (fn_decl))
2443 {
2444 transaction_subcode_ior (region, GTMA_HAVE_ABORT);
2445 return true;
2446 }
2447
2448 /* Instrument the store if needed.
2449
2450 If the assignment happens inside the function call (return slot
2451 optimization), there is no instrumentation to be done, since
2452 the callee should have done the right thing. */
2453 if (lhs && requires_barrier (region->entry_block, lhs, stmt)
2454 && !gimple_call_return_slot_opt_p (stmt))
2455 {
2456 tree tmp = create_tmp_reg (TREE_TYPE (lhs));
2457 location_t loc = gimple_location (stmt);
2458 edge fallthru_edge = NULL;
2459 gassign *assign_stmt;
2460
2461 /* Remember if the call was going to throw. */
2462 if (stmt_can_throw_internal (stmt))
2463 {
2464 edge_iterator ei;
2465 edge e;
2466 basic_block bb = gimple_bb (stmt);
2467
2468 FOR_EACH_EDGE (e, ei, bb->succs)
2469 if (e->flags & EDGE_FALLTHRU)
2470 {
2471 fallthru_edge = e;
2472 break;
2473 }
2474 }
2475
2476 gimple_call_set_lhs (stmt, tmp);
2477 update_stmt (stmt);
2478 assign_stmt = gimple_build_assign (lhs, tmp);
2479 gimple_set_location (assign_stmt, loc);
2480
2481 /* We cannot throw in the middle of a BB. If the call was going
2482 to throw, place the instrumentation on the fallthru edge, so
2483 the call remains the last statement in the block. */
2484 if (fallthru_edge)
2485 {
2486 gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (assign_stmt);
2487 gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
2488 expand_assign_tm (region, &fallthru_gsi);
2489 gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
2490 pending_edge_inserts_p = true;
2491 }
2492 else
2493 {
2494 gsi_insert_after (gsi, assign_stmt, GSI_CONTINUE_LINKING);
2495 expand_assign_tm (region, gsi);
2496 }
2497
2498 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2499 }
2500
2501 return retval;
2502 }
2503
2504
2505 /* Expand all statements in BB as appropriate for being inside
2506 a transaction. */
2507
2508 static void
2509 expand_block_tm (struct tm_region *region, basic_block bb)
2510 {
2511 gimple_stmt_iterator gsi;
2512
2513 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2514 {
2515 gimple stmt = gsi_stmt (gsi);
2516 switch (gimple_code (stmt))
2517 {
2518 case GIMPLE_ASSIGN:
2519 /* Only memory reads/writes need to be instrumented. */
2520 if (gimple_assign_single_p (stmt)
2521 && !gimple_clobber_p (stmt))
2522 {
2523 expand_assign_tm (region, &gsi);
2524 continue;
2525 }
2526 break;
2527
2528 case GIMPLE_CALL:
2529 if (expand_call_tm (region, &gsi))
2530 return;
2531 break;
2532
2533 case GIMPLE_ASM:
2534 gcc_unreachable ();
2535
2536 default:
2537 break;
2538 }
2539 if (!gsi_end_p (gsi))
2540 gsi_next (&gsi);
2541 }
2542 }
2543
2544 /* Return the list of basic-blocks in REGION.
2545
2546 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2547 following a TM_IRREVOCABLE call.
2548
2549 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2550 uninstrumented code path blocks in the list of basic blocks
2551 returned, false otherwise. */
2552
2553 static vec<basic_block>
2554 get_tm_region_blocks (basic_block entry_block,
2555 bitmap exit_blocks,
2556 bitmap irr_blocks,
2557 bitmap all_region_blocks,
2558 bool stop_at_irrevocable_p,
2559 bool include_uninstrumented_p = true)
2560 {
2561 vec<basic_block> bbs = vNULL;
2562 unsigned i;
2563 edge e;
2564 edge_iterator ei;
2565 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2566
2567 i = 0;
2568 bbs.safe_push (entry_block);
2569 bitmap_set_bit (visited_blocks, entry_block->index);
2570
2571 do
2572 {
2573 basic_block bb = bbs[i++];
2574
2575 if (exit_blocks &&
2576 bitmap_bit_p (exit_blocks, bb->index))
2577 continue;
2578
2579 if (stop_at_irrevocable_p
2580 && irr_blocks
2581 && bitmap_bit_p (irr_blocks, bb->index))
2582 continue;
2583
2584 FOR_EACH_EDGE (e, ei, bb->succs)
2585 if ((include_uninstrumented_p
2586 || !(e->flags & EDGE_TM_UNINSTRUMENTED))
2587 && !bitmap_bit_p (visited_blocks, e->dest->index))
2588 {
2589 bitmap_set_bit (visited_blocks, e->dest->index);
2590 bbs.safe_push (e->dest);
2591 }
2592 }
2593 while (i < bbs.length ());
2594
2595 if (all_region_blocks)
2596 bitmap_ior_into (all_region_blocks, visited_blocks);
2597
2598 BITMAP_FREE (visited_blocks);
2599 return bbs;
2600 }
2601
2602 // Callback data for collect_bb2reg.
2603 struct bb2reg_stuff
2604 {
2605 vec<tm_region_p> *bb2reg;
2606 bool include_uninstrumented_p;
2607 };
2608
2609 // Callback for expand_regions, collect innermost region data for each bb.
2610 static void *
2611 collect_bb2reg (struct tm_region *region, void *data)
2612 {
2613 struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data;
2614 vec<tm_region_p> *bb2reg = stuff->bb2reg;
2615 vec<basic_block> queue;
2616 unsigned int i;
2617 basic_block bb;
2618
2619 queue = get_tm_region_blocks (region->entry_block,
2620 region->exit_blocks,
2621 region->irr_blocks,
2622 NULL,
2623 /*stop_at_irr_p=*/true,
2624 stuff->include_uninstrumented_p);
2625
2626 // We expect expand_region to perform a post-order traversal of the region
2627 // tree. Therefore the last region seen for any bb is the innermost.
2628 FOR_EACH_VEC_ELT (queue, i, bb)
2629 (*bb2reg)[bb->index] = region;
2630
2631 queue.release ();
2632 return NULL;
2633 }
2634
2635 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2636 // which a basic block belongs. Note that we only consider the instrumented
2637 // code paths for the region; the uninstrumented code paths are ignored if
2638 // INCLUDE_UNINSTRUMENTED_P is false.
2639 //
2640 // ??? This data is very similar to the bb_regions array that is collected
2641 // during tm_region_init. Or, rather, this data is similar to what could
2642 // be used within tm_region_init. The actual computation in tm_region_init
2643 // begins and ends with bb_regions entirely full of NULL pointers, due to
2644 // the way in which pointers are swapped in and out of the array.
2645 //
2646 // ??? Our callers expect that blocks are not shared between transactions.
2647 // When the optimizers get too smart, and blocks are shared, then during
2648 // the tm_mark phase we'll add log entries to only one of the two transactions,
2649 // and in the tm_edge phase we'll add edges to the CFG that create invalid
2650 // cycles. The symptom being SSA defs that do not dominate their uses.
2651 // Note that the optimizers were locally correct with their transformation,
2652 // as we have no info within the program that suggests that the blocks cannot
2653 // be shared.
2654 //
2655 // ??? There is currently a hack inside tree-ssa-pre.c to work around the
2656 // only known instance of this block sharing.
2657
2658 static vec<tm_region_p>
2659 get_bb_regions_instrumented (bool traverse_clones,
2660 bool include_uninstrumented_p)
2661 {
2662 unsigned n = last_basic_block_for_fn (cfun);
2663 struct bb2reg_stuff stuff;
2664 vec<tm_region_p> ret;
2665
2666 ret.create (n);
2667 ret.safe_grow_cleared (n);
2668 stuff.bb2reg = &ret;
2669 stuff.include_uninstrumented_p = include_uninstrumented_p;
2670 expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones);
2671
2672 return ret;
2673 }
2674
2675 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2676 transaction. */
2677
2678 void
2679 compute_transaction_bits (void)
2680 {
2681 struct tm_region *region;
2682 vec<basic_block> queue;
2683 unsigned int i;
2684 basic_block bb;
2685
2686 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2687 certainly don't need it to calculate CDI_DOMINATOR info. */
2688 gate_tm_init ();
2689
2690 FOR_EACH_BB_FN (bb, cfun)
2691 bb->flags &= ~BB_IN_TRANSACTION;
2692
2693 for (region = all_tm_regions; region; region = region->next)
2694 {
2695 queue = get_tm_region_blocks (region->entry_block,
2696 region->exit_blocks,
2697 region->irr_blocks,
2698 NULL,
2699 /*stop_at_irr_p=*/true);
2700 for (i = 0; queue.iterate (i, &bb); ++i)
2701 bb->flags |= BB_IN_TRANSACTION;
2702 queue.release ();
2703 }
2704
2705 if (all_tm_regions)
2706 bitmap_obstack_release (&tm_obstack);
2707 }
2708
2709 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2710 call to BUILT_IN_TM_START. */
2711
2712 static void *
2713 expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2714 {
2715 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2716 basic_block transaction_bb = gimple_bb (region->transaction_stmt);
2717 tree tm_state = region->tm_state;
2718 tree tm_state_type = TREE_TYPE (tm_state);
2719 edge abort_edge = NULL;
2720 edge inst_edge = NULL;
2721 edge uninst_edge = NULL;
2722 edge fallthru_edge = NULL;
2723
2724 // Identify the various successors of the transaction start.
2725 {
2726 edge_iterator i;
2727 edge e;
2728 FOR_EACH_EDGE (e, i, transaction_bb->succs)
2729 {
2730 if (e->flags & EDGE_TM_ABORT)
2731 abort_edge = e;
2732 else if (e->flags & EDGE_TM_UNINSTRUMENTED)
2733 uninst_edge = e;
2734 else
2735 inst_edge = e;
2736 if (e->flags & EDGE_FALLTHRU)
2737 fallthru_edge = e;
2738 }
2739 }
2740
2741 /* ??? There are plenty of bits here we're not computing. */
2742 {
2743 int subcode = gimple_transaction_subcode (region->get_transaction_stmt ());
2744 int flags = 0;
2745 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2746 flags |= PR_DOESGOIRREVOCABLE;
2747 if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
2748 flags |= PR_HASNOIRREVOCABLE;
2749 /* If the transaction does not have an abort in lexical scope and is not
2750 marked as an outer transaction, then it will never abort. */
2751 if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0)
2752 flags |= PR_HASNOABORT;
2753 if ((subcode & GTMA_HAVE_STORE) == 0)
2754 flags |= PR_READONLY;
2755 if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION))
2756 flags |= PR_INSTRUMENTEDCODE;
2757 if (uninst_edge)
2758 flags |= PR_UNINSTRUMENTEDCODE;
2759 if (subcode & GTMA_IS_OUTER)
2760 region->original_transaction_was_outer = true;
2761 tree t = build_int_cst (tm_state_type, flags);
2762 gcall *call = gimple_build_call (tm_start, 1, t);
2763 gimple_call_set_lhs (call, tm_state);
2764 gimple_set_location (call, gimple_location (region->transaction_stmt));
2765
2766 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2767 gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb);
2768 gcc_assert (gsi_stmt (gsi) == region->transaction_stmt);
2769 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2770 gsi_remove (&gsi, true);
2771 region->transaction_stmt = call;
2772 }
2773
2774 // Generate log saves.
2775 if (!tm_log_save_addresses.is_empty ())
2776 tm_log_emit_saves (region->entry_block, transaction_bb);
2777
2778 // In the beginning, we've no tests to perform on transaction restart.
2779 // Note that after this point, transaction_bb becomes the "most recent
2780 // block containing tests for the transaction".
2781 region->restart_block = region->entry_block;
2782
2783 // Generate log restores.
2784 if (!tm_log_save_addresses.is_empty ())
2785 {
2786 basic_block test_bb = create_empty_bb (transaction_bb);
2787 basic_block code_bb = create_empty_bb (test_bb);
2788 basic_block join_bb = create_empty_bb (code_bb);
2789 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2790 add_bb_to_loop (code_bb, transaction_bb->loop_father);
2791 add_bb_to_loop (join_bb, transaction_bb->loop_father);
2792 if (region->restart_block == region->entry_block)
2793 region->restart_block = test_bb;
2794
2795 tree t1 = create_tmp_reg (tm_state_type);
2796 tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES);
2797 gimple stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2798 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2799 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2800
2801 t2 = build_int_cst (tm_state_type, 0);
2802 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2803 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2804
2805 tm_log_emit_restores (region->entry_block, code_bb);
2806
2807 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2808 edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE);
2809 edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE);
2810 redirect_edge_pred (fallthru_edge, join_bb);
2811
2812 join_bb->frequency = test_bb->frequency = transaction_bb->frequency;
2813 join_bb->count = test_bb->count = transaction_bb->count;
2814
2815 ei->probability = PROB_ALWAYS;
2816 et->probability = PROB_LIKELY;
2817 ef->probability = PROB_UNLIKELY;
2818 et->count = apply_probability (test_bb->count, et->probability);
2819 ef->count = apply_probability (test_bb->count, ef->probability);
2820
2821 code_bb->count = et->count;
2822 code_bb->frequency = EDGE_FREQUENCY (et);
2823
2824 transaction_bb = join_bb;
2825 }
2826
2827 // If we have an ABORT edge, create a test to perform the abort.
2828 if (abort_edge)
2829 {
2830 basic_block test_bb = create_empty_bb (transaction_bb);
2831 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2832 if (region->restart_block == region->entry_block)
2833 region->restart_block = test_bb;
2834
2835 tree t1 = create_tmp_reg (tm_state_type);
2836 tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION);
2837 gimple stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2838 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2839 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2840
2841 t2 = build_int_cst (tm_state_type, 0);
2842 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2843 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2844
2845 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2846 test_bb->frequency = transaction_bb->frequency;
2847 test_bb->count = transaction_bb->count;
2848 ei->probability = PROB_ALWAYS;
2849
2850 // Not abort edge. If both are live, chose one at random as we'll
2851 // we'll be fixing that up below.
2852 redirect_edge_pred (fallthru_edge, test_bb);
2853 fallthru_edge->flags = EDGE_FALSE_VALUE;
2854 fallthru_edge->probability = PROB_VERY_LIKELY;
2855 fallthru_edge->count
2856 = apply_probability (test_bb->count, fallthru_edge->probability);
2857
2858 // Abort/over edge.
2859 redirect_edge_pred (abort_edge, test_bb);
2860 abort_edge->flags = EDGE_TRUE_VALUE;
2861 abort_edge->probability = PROB_VERY_UNLIKELY;
2862 abort_edge->count
2863 = apply_probability (test_bb->count, abort_edge->probability);
2864
2865 transaction_bb = test_bb;
2866 }
2867
2868 // If we have both instrumented and uninstrumented code paths, select one.
2869 if (inst_edge && uninst_edge)
2870 {
2871 basic_block test_bb = create_empty_bb (transaction_bb);
2872 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2873 if (region->restart_block == region->entry_block)
2874 region->restart_block = test_bb;
2875
2876 tree t1 = create_tmp_reg (tm_state_type);
2877 tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE);
2878
2879 gimple stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2880 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2881 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2882
2883 t2 = build_int_cst (tm_state_type, 0);
2884 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2885 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2886
2887 // Create the edge into test_bb first, as we want to copy values
2888 // out of the fallthru edge.
2889 edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags);
2890 e->probability = fallthru_edge->probability;
2891 test_bb->count = e->count = fallthru_edge->count;
2892 test_bb->frequency = EDGE_FREQUENCY (e);
2893
2894 // Now update the edges to the inst/uninist implementations.
2895 // For now assume that the paths are equally likely. When using HTM,
2896 // we'll try the uninst path first and fallback to inst path if htm
2897 // buffers are exceeded. Without HTM we start with the inst path and
2898 // use the uninst path when falling back to serial mode.
2899 redirect_edge_pred (inst_edge, test_bb);
2900 inst_edge->flags = EDGE_FALSE_VALUE;
2901 inst_edge->probability = REG_BR_PROB_BASE / 2;
2902 inst_edge->count
2903 = apply_probability (test_bb->count, inst_edge->probability);
2904
2905 redirect_edge_pred (uninst_edge, test_bb);
2906 uninst_edge->flags = EDGE_TRUE_VALUE;
2907 uninst_edge->probability = REG_BR_PROB_BASE / 2;
2908 uninst_edge->count
2909 = apply_probability (test_bb->count, uninst_edge->probability);
2910 }
2911
2912 // If we have no previous special cases, and we have PHIs at the beginning
2913 // of the atomic region, this means we have a loop at the beginning of the
2914 // atomic region that shares the first block. This can cause problems with
2915 // the transaction restart abnormal edges to be added in the tm_edges pass.
2916 // Solve this by adding a new empty block to receive the abnormal edges.
2917 if (region->restart_block == region->entry_block
2918 && phi_nodes (region->entry_block))
2919 {
2920 basic_block empty_bb = create_empty_bb (transaction_bb);
2921 region->restart_block = empty_bb;
2922 add_bb_to_loop (empty_bb, transaction_bb->loop_father);
2923
2924 redirect_edge_pred (fallthru_edge, empty_bb);
2925 make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU);
2926 }
2927
2928 return NULL;
2929 }
2930
2931 /* Generate the temporary to be used for the return value of
2932 BUILT_IN_TM_START. */
2933
2934 static void *
2935 generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2936 {
2937 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2938 region->tm_state =
2939 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
2940
2941 // Reset the subcode, post optimizations. We'll fill this in
2942 // again as we process blocks.
2943 if (region->exit_blocks)
2944 {
2945 gtransaction *transaction_stmt = region->get_transaction_stmt ();
2946 unsigned int subcode = gimple_transaction_subcode (transaction_stmt);
2947
2948 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2949 subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
2950 | GTMA_MAY_ENTER_IRREVOCABLE
2951 | GTMA_HAS_NO_INSTRUMENTATION);
2952 else
2953 subcode &= GTMA_DECLARATION_MASK;
2954 gimple_transaction_set_subcode (transaction_stmt, subcode);
2955 }
2956
2957 return NULL;
2958 }
2959
2960 // Propagate flags from inner transactions outwards.
2961 static void
2962 propagate_tm_flags_out (struct tm_region *region)
2963 {
2964 if (region == NULL)
2965 return;
2966 propagate_tm_flags_out (region->inner);
2967
2968 if (region->outer && region->outer->transaction_stmt)
2969 {
2970 unsigned s
2971 = gimple_transaction_subcode (region->get_transaction_stmt ());
2972 s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE
2973 | GTMA_MAY_ENTER_IRREVOCABLE);
2974 s |= gimple_transaction_subcode (region->outer->get_transaction_stmt ());
2975 gimple_transaction_set_subcode (region->outer->get_transaction_stmt (),
2976 s);
2977 }
2978
2979 propagate_tm_flags_out (region->next);
2980 }
2981
2982 /* Entry point to the MARK phase of TM expansion. Here we replace
2983 transactional memory statements with calls to builtins, and function
2984 calls with their transactional clones (if available). But we don't
2985 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
2986
2987 static unsigned int
2988 execute_tm_mark (void)
2989 {
2990 pending_edge_inserts_p = false;
2991
2992 expand_regions (all_tm_regions, generate_tm_state, NULL,
2993 /*traverse_clones=*/true);
2994
2995 tm_log_init ();
2996
2997 vec<tm_region_p> bb_regions
2998 = get_bb_regions_instrumented (/*traverse_clones=*/true,
2999 /*include_uninstrumented_p=*/false);
3000 struct tm_region *r;
3001 unsigned i;
3002
3003 // Expand memory operations into calls into the runtime.
3004 // This collects log entries as well.
3005 FOR_EACH_VEC_ELT (bb_regions, i, r)
3006 {
3007 if (r != NULL)
3008 {
3009 if (r->transaction_stmt)
3010 {
3011 unsigned sub
3012 = gimple_transaction_subcode (r->get_transaction_stmt ());
3013
3014 /* If we're sure to go irrevocable, there won't be
3015 anything to expand, since the run-time will go
3016 irrevocable right away. */
3017 if (sub & GTMA_DOES_GO_IRREVOCABLE
3018 && sub & GTMA_MAY_ENTER_IRREVOCABLE)
3019 continue;
3020 }
3021 expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i));
3022 }
3023 }
3024
3025 bb_regions.release ();
3026
3027 // Propagate flags from inner transactions outwards.
3028 propagate_tm_flags_out (all_tm_regions);
3029
3030 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
3031 expand_regions (all_tm_regions, expand_transaction, NULL,
3032 /*traverse_clones=*/false);
3033
3034 tm_log_emit ();
3035 tm_log_delete ();
3036
3037 if (pending_edge_inserts_p)
3038 gsi_commit_edge_inserts ();
3039 free_dominance_info (CDI_DOMINATORS);
3040 return 0;
3041 }
3042
3043 namespace {
3044
3045 const pass_data pass_data_tm_mark =
3046 {
3047 GIMPLE_PASS, /* type */
3048 "tmmark", /* name */
3049 OPTGROUP_NONE, /* optinfo_flags */
3050 TV_TRANS_MEM, /* tv_id */
3051 ( PROP_ssa | PROP_cfg ), /* properties_required */
3052 0, /* properties_provided */
3053 0, /* properties_destroyed */
3054 0, /* todo_flags_start */
3055 TODO_update_ssa, /* todo_flags_finish */
3056 };
3057
3058 class pass_tm_mark : public gimple_opt_pass
3059 {
3060 public:
3061 pass_tm_mark (gcc::context *ctxt)
3062 : gimple_opt_pass (pass_data_tm_mark, ctxt)
3063 {}
3064
3065 /* opt_pass methods: */
3066 virtual unsigned int execute (function *) { return execute_tm_mark (); }
3067
3068 }; // class pass_tm_mark
3069
3070 } // anon namespace
3071
3072 gimple_opt_pass *
3073 make_pass_tm_mark (gcc::context *ctxt)
3074 {
3075 return new pass_tm_mark (ctxt);
3076 }
3077 \f
3078
3079 /* Create an abnormal edge from STMT at iter, splitting the block
3080 as necessary. Adjust *PNEXT as needed for the split block. */
3081
3082 static inline void
3083 split_bb_make_tm_edge (gimple stmt, basic_block dest_bb,
3084 gimple_stmt_iterator iter, gimple_stmt_iterator *pnext)
3085 {
3086 basic_block bb = gimple_bb (stmt);
3087 if (!gsi_one_before_end_p (iter))
3088 {
3089 edge e = split_block (bb, stmt);
3090 *pnext = gsi_start_bb (e->dest);
3091 }
3092 make_edge (bb, dest_bb, EDGE_ABNORMAL);
3093
3094 // Record the need for the edge for the benefit of the rtl passes.
3095 if (cfun->gimple_df->tm_restart == NULL)
3096 cfun->gimple_df->tm_restart
3097 = hash_table<tm_restart_hasher>::create_ggc (31);
3098
3099 struct tm_restart_node dummy;
3100 dummy.stmt = stmt;
3101 dummy.label_or_list = gimple_block_label (dest_bb);
3102
3103 tm_restart_node **slot = cfun->gimple_df->tm_restart->find_slot (&dummy,
3104 INSERT);
3105 struct tm_restart_node *n = *slot;
3106 if (n == NULL)
3107 {
3108 n = ggc_alloc<tm_restart_node> ();
3109 *n = dummy;
3110 }
3111 else
3112 {
3113 tree old = n->label_or_list;
3114 if (TREE_CODE (old) == LABEL_DECL)
3115 old = tree_cons (NULL, old, NULL);
3116 n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
3117 }
3118 }
3119
3120 /* Split block BB as necessary for every builtin function we added, and
3121 wire up the abnormal back edges implied by the transaction restart. */
3122
3123 static void
3124 expand_block_edges (struct tm_region *const region, basic_block bb)
3125 {
3126 gimple_stmt_iterator gsi, next_gsi;
3127
3128 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi)
3129 {
3130 gimple stmt = gsi_stmt (gsi);
3131 gcall *call_stmt;
3132
3133 next_gsi = gsi;
3134 gsi_next (&next_gsi);
3135
3136 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
3137 call_stmt = dyn_cast <gcall *> (stmt);
3138 if ((!call_stmt)
3139 || (gimple_call_flags (call_stmt) & ECF_TM_BUILTIN) == 0)
3140 continue;
3141
3142 if (DECL_FUNCTION_CODE (gimple_call_fndecl (call_stmt))
3143 == BUILT_IN_TM_ABORT)
3144 {
3145 // If we have a ``_transaction_cancel [[outer]]'', there is only
3146 // one abnormal edge: to the transaction marked OUTER.
3147 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3148 // constant argument, which we can examine here. Users invoking
3149 // TM_ABORT directly get what they deserve.
3150 tree arg = gimple_call_arg (call_stmt, 0);
3151 if (TREE_CODE (arg) == INTEGER_CST
3152 && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0
3153 && !decl_is_tm_clone (current_function_decl))
3154 {
3155 // Find the GTMA_IS_OUTER transaction.
3156 for (struct tm_region *o = region; o; o = o->outer)
3157 if (o->original_transaction_was_outer)
3158 {
3159 split_bb_make_tm_edge (call_stmt, o->restart_block,
3160 gsi, &next_gsi);
3161 break;
3162 }
3163
3164 // Otherwise, the front-end should have semantically checked
3165 // outer aborts, but in either case the target region is not
3166 // within this function.
3167 continue;
3168 }
3169
3170 // Non-outer, TM aborts have an abnormal edge to the inner-most
3171 // transaction, the one being aborted;
3172 split_bb_make_tm_edge (call_stmt, region->restart_block, gsi,
3173 &next_gsi);
3174 }
3175
3176 // All TM builtins have an abnormal edge to the outer-most transaction.
3177 // We never restart inner transactions. For tm clones, we know a-priori
3178 // that the outer-most transaction is outside the function.
3179 if (decl_is_tm_clone (current_function_decl))
3180 continue;
3181
3182 if (cfun->gimple_df->tm_restart == NULL)
3183 cfun->gimple_df->tm_restart
3184 = hash_table<tm_restart_hasher>::create_ggc (31);
3185
3186 // All TM builtins have an abnormal edge to the outer-most transaction.
3187 // We never restart inner transactions.
3188 for (struct tm_region *o = region; o; o = o->outer)
3189 if (!o->outer)
3190 {
3191 split_bb_make_tm_edge (call_stmt, o->restart_block, gsi, &next_gsi);
3192 break;
3193 }
3194
3195 // Delete any tail-call annotation that may have been added.
3196 // The tail-call pass may have mis-identified the commit as being
3197 // a candidate because we had not yet added this restart edge.
3198 gimple_call_set_tail (call_stmt, false);
3199 }
3200 }
3201
3202 /* Entry point to the final expansion of transactional nodes. */
3203
3204 namespace {
3205
3206 const pass_data pass_data_tm_edges =
3207 {
3208 GIMPLE_PASS, /* type */
3209 "tmedge", /* name */
3210 OPTGROUP_NONE, /* optinfo_flags */
3211 TV_TRANS_MEM, /* tv_id */
3212 ( PROP_ssa | PROP_cfg ), /* properties_required */
3213 0, /* properties_provided */
3214 0, /* properties_destroyed */
3215 0, /* todo_flags_start */
3216 TODO_update_ssa, /* todo_flags_finish */
3217 };
3218
3219 class pass_tm_edges : public gimple_opt_pass
3220 {
3221 public:
3222 pass_tm_edges (gcc::context *ctxt)
3223 : gimple_opt_pass (pass_data_tm_edges, ctxt)
3224 {}
3225
3226 /* opt_pass methods: */
3227 virtual unsigned int execute (function *);
3228
3229 }; // class pass_tm_edges
3230
3231 unsigned int
3232 pass_tm_edges::execute (function *fun)
3233 {
3234 vec<tm_region_p> bb_regions
3235 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3236 /*include_uninstrumented_p=*/true);
3237 struct tm_region *r;
3238 unsigned i;
3239
3240 FOR_EACH_VEC_ELT (bb_regions, i, r)
3241 if (r != NULL)
3242 expand_block_edges (r, BASIC_BLOCK_FOR_FN (fun, i));
3243
3244 bb_regions.release ();
3245
3246 /* We've got to release the dominance info now, to indicate that it
3247 must be rebuilt completely. Otherwise we'll crash trying to update
3248 the SSA web in the TODO section following this pass. */
3249 free_dominance_info (CDI_DOMINATORS);
3250 bitmap_obstack_release (&tm_obstack);
3251 all_tm_regions = NULL;
3252
3253 return 0;
3254 }
3255
3256 } // anon namespace
3257
3258 gimple_opt_pass *
3259 make_pass_tm_edges (gcc::context *ctxt)
3260 {
3261 return new pass_tm_edges (ctxt);
3262 }
3263 \f
3264 /* Helper function for expand_regions. Expand REGION and recurse to
3265 the inner region. Call CALLBACK on each region. CALLBACK returns
3266 NULL to continue the traversal, otherwise a non-null value which
3267 this function will return as well. TRAVERSE_CLONES is true if we
3268 should traverse transactional clones. */
3269
3270 static void *
3271 expand_regions_1 (struct tm_region *region,
3272 void *(*callback)(struct tm_region *, void *),
3273 void *data,
3274 bool traverse_clones)
3275 {
3276 void *retval = NULL;
3277 if (region->exit_blocks
3278 || (traverse_clones && decl_is_tm_clone (current_function_decl)))
3279 {
3280 retval = callback (region, data);
3281 if (retval)
3282 return retval;
3283 }
3284 if (region->inner)
3285 {
3286 retval = expand_regions (region->inner, callback, data, traverse_clones);
3287 if (retval)
3288 return retval;
3289 }
3290 return retval;
3291 }
3292
3293 /* Traverse the regions enclosed and including REGION. Execute
3294 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3295 continue the traversal, otherwise a non-null value which this
3296 function will return as well. TRAVERSE_CLONES is true if we should
3297 traverse transactional clones. */
3298
3299 static void *
3300 expand_regions (struct tm_region *region,
3301 void *(*callback)(struct tm_region *, void *),
3302 void *data,
3303 bool traverse_clones)
3304 {
3305 void *retval = NULL;
3306 while (region)
3307 {
3308 retval = expand_regions_1 (region, callback, data, traverse_clones);
3309 if (retval)
3310 return retval;
3311 region = region->next;
3312 }
3313 return retval;
3314 }
3315
3316 \f
3317 /* A unique TM memory operation. */
3318 typedef struct tm_memop
3319 {
3320 /* Unique ID that all memory operations to the same location have. */
3321 unsigned int value_id;
3322 /* Address of load/store. */
3323 tree addr;
3324 } *tm_memop_t;
3325
3326 /* TM memory operation hashtable helpers. */
3327
3328 struct tm_memop_hasher : free_ptr_hash <tm_memop>
3329 {
3330 static inline hashval_t hash (const tm_memop *);
3331 static inline bool equal (const tm_memop *, const tm_memop *);
3332 };
3333
3334 /* Htab support. Return a hash value for a `tm_memop'. */
3335 inline hashval_t
3336 tm_memop_hasher::hash (const tm_memop *mem)
3337 {
3338 tree addr = mem->addr;
3339 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3340 actually done with operand_equal_p (see tm_memop_eq). */
3341 if (TREE_CODE (addr) == ADDR_EXPR)
3342 addr = TREE_OPERAND (addr, 0);
3343 return iterative_hash_expr (addr, 0);
3344 }
3345
3346 /* Htab support. Return true if two tm_memop's are the same. */
3347 inline bool
3348 tm_memop_hasher::equal (const tm_memop *mem1, const tm_memop *mem2)
3349 {
3350 return operand_equal_p (mem1->addr, mem2->addr, 0);
3351 }
3352
3353 /* Sets for solving data flow equations in the memory optimization pass. */
3354 struct tm_memopt_bitmaps
3355 {
3356 /* Stores available to this BB upon entry. Basically, stores that
3357 dominate this BB. */
3358 bitmap store_avail_in;
3359 /* Stores available at the end of this BB. */
3360 bitmap store_avail_out;
3361 bitmap store_antic_in;
3362 bitmap store_antic_out;
3363 /* Reads available to this BB upon entry. Basically, reads that
3364 dominate this BB. */
3365 bitmap read_avail_in;
3366 /* Reads available at the end of this BB. */
3367 bitmap read_avail_out;
3368 /* Reads performed in this BB. */
3369 bitmap read_local;
3370 /* Writes performed in this BB. */
3371 bitmap store_local;
3372
3373 /* Temporary storage for pass. */
3374 /* Is the current BB in the worklist? */
3375 bool avail_in_worklist_p;
3376 /* Have we visited this BB? */
3377 bool visited_p;
3378 };
3379
3380 static bitmap_obstack tm_memopt_obstack;
3381
3382 /* Unique counter for TM loads and stores. Loads and stores of the
3383 same address get the same ID. */
3384 static unsigned int tm_memopt_value_id;
3385 static hash_table<tm_memop_hasher> *tm_memopt_value_numbers;
3386
3387 #define STORE_AVAIL_IN(BB) \
3388 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3389 #define STORE_AVAIL_OUT(BB) \
3390 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3391 #define STORE_ANTIC_IN(BB) \
3392 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3393 #define STORE_ANTIC_OUT(BB) \
3394 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3395 #define READ_AVAIL_IN(BB) \
3396 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3397 #define READ_AVAIL_OUT(BB) \
3398 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3399 #define READ_LOCAL(BB) \
3400 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3401 #define STORE_LOCAL(BB) \
3402 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3403 #define AVAIL_IN_WORKLIST_P(BB) \
3404 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3405 #define BB_VISITED_P(BB) \
3406 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3407
3408 /* Given a TM load/store in STMT, return the value number for the address
3409 it accesses. */
3410
3411 static unsigned int
3412 tm_memopt_value_number (gimple stmt, enum insert_option op)
3413 {
3414 struct tm_memop tmpmem, *mem;
3415 tm_memop **slot;
3416
3417 gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
3418 tmpmem.addr = gimple_call_arg (stmt, 0);
3419 slot = tm_memopt_value_numbers->find_slot (&tmpmem, op);
3420 if (*slot)
3421 mem = *slot;
3422 else if (op == INSERT)
3423 {
3424 mem = XNEW (struct tm_memop);
3425 *slot = mem;
3426 mem->value_id = tm_memopt_value_id++;
3427 mem->addr = tmpmem.addr;
3428 }
3429 else
3430 gcc_unreachable ();
3431 return mem->value_id;
3432 }
3433
3434 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3435
3436 static void
3437 tm_memopt_accumulate_memops (basic_block bb)
3438 {
3439 gimple_stmt_iterator gsi;
3440
3441 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3442 {
3443 gimple stmt = gsi_stmt (gsi);
3444 bitmap bits;
3445 unsigned int loc;
3446
3447 if (is_tm_store (stmt))
3448 bits = STORE_LOCAL (bb);
3449 else if (is_tm_load (stmt))
3450 bits = READ_LOCAL (bb);
3451 else
3452 continue;
3453
3454 loc = tm_memopt_value_number (stmt, INSERT);
3455 bitmap_set_bit (bits, loc);
3456 if (dump_file)
3457 {
3458 fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
3459 is_tm_load (stmt) ? "LOAD" : "STORE", loc,
3460 gimple_bb (stmt)->index);
3461 print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
3462 fprintf (dump_file, "\n");
3463 }
3464 }
3465 }
3466
3467 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3468
3469 static void
3470 dump_tm_memopt_set (const char *set_name, bitmap bits)
3471 {
3472 unsigned i;
3473 bitmap_iterator bi;
3474 const char *comma = "";
3475
3476 fprintf (dump_file, "TM memopt: %s: [", set_name);
3477 EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
3478 {
3479 hash_table<tm_memop_hasher>::iterator hi;
3480 struct tm_memop *mem = NULL;
3481
3482 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
3483 FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi)
3484 if (mem->value_id == i)
3485 break;
3486 gcc_assert (mem->value_id == i);
3487 fprintf (dump_file, "%s", comma);
3488 comma = ", ";
3489 print_generic_expr (dump_file, mem->addr, 0);
3490 }
3491 fprintf (dump_file, "]\n");
3492 }
3493
3494 /* Prettily dump all of the memopt sets in BLOCKS. */
3495
3496 static void
3497 dump_tm_memopt_sets (vec<basic_block> blocks)
3498 {
3499 size_t i;
3500 basic_block bb;
3501
3502 for (i = 0; blocks.iterate (i, &bb); ++i)
3503 {
3504 fprintf (dump_file, "------------BB %d---------\n", bb->index);
3505 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
3506 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
3507 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
3508 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
3509 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
3510 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
3511 }
3512 }
3513
3514 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3515
3516 static void
3517 tm_memopt_compute_avin (basic_block bb)
3518 {
3519 edge e;
3520 unsigned ix;
3521
3522 /* Seed with the AVOUT of any predecessor. */
3523 for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
3524 {
3525 e = EDGE_PRED (bb, ix);
3526 /* Make sure we have already visited this BB, and is thus
3527 initialized.
3528
3529 If e->src->aux is NULL, this predecessor is actually on an
3530 enclosing transaction. We only care about the current
3531 transaction, so ignore it. */
3532 if (e->src->aux && BB_VISITED_P (e->src))
3533 {
3534 bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3535 bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3536 break;
3537 }
3538 }
3539
3540 for (; ix < EDGE_COUNT (bb->preds); ix++)
3541 {
3542 e = EDGE_PRED (bb, ix);
3543 if (e->src->aux && BB_VISITED_P (e->src))
3544 {
3545 bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3546 bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3547 }
3548 }
3549
3550 BB_VISITED_P (bb) = true;
3551 }
3552
3553 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3554
3555 static void
3556 tm_memopt_compute_antin (basic_block bb)
3557 {
3558 edge e;
3559 unsigned ix;
3560
3561 /* Seed with the ANTIC_OUT of any successor. */
3562 for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
3563 {
3564 e = EDGE_SUCC (bb, ix);
3565 /* Make sure we have already visited this BB, and is thus
3566 initialized. */
3567 if (BB_VISITED_P (e->dest))
3568 {
3569 bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3570 break;
3571 }
3572 }
3573
3574 for (; ix < EDGE_COUNT (bb->succs); ix++)
3575 {
3576 e = EDGE_SUCC (bb, ix);
3577 if (BB_VISITED_P (e->dest))
3578 bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3579 }
3580
3581 BB_VISITED_P (bb) = true;
3582 }
3583
3584 /* Compute the AVAIL sets for every basic block in BLOCKS.
3585
3586 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3587
3588 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3589 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3590
3591 This is basically what we do in lcm's compute_available(), but here
3592 we calculate two sets of sets (one for STOREs and one for READs),
3593 and we work on a region instead of the entire CFG.
3594
3595 REGION is the TM region.
3596 BLOCKS are the basic blocks in the region. */
3597
3598 static void
3599 tm_memopt_compute_available (struct tm_region *region,
3600 vec<basic_block> blocks)
3601 {
3602 edge e;
3603 basic_block *worklist, *qin, *qout, *qend, bb;
3604 unsigned int qlen, i;
3605 edge_iterator ei;
3606 bool changed;
3607
3608 /* Allocate a worklist array/queue. Entries are only added to the
3609 list if they were not already on the list. So the size is
3610 bounded by the number of basic blocks in the region. */
3611 qlen = blocks.length () - 1;
3612 qin = qout = worklist =
3613 XNEWVEC (basic_block, qlen);
3614
3615 /* Put every block in the region on the worklist. */
3616 for (i = 0; blocks.iterate (i, &bb); ++i)
3617 {
3618 /* Seed AVAIL_OUT with the LOCAL set. */
3619 bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
3620 bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
3621
3622 AVAIL_IN_WORKLIST_P (bb) = true;
3623 /* No need to insert the entry block, since it has an AVIN of
3624 null, and an AVOUT that has already been seeded in. */
3625 if (bb != region->entry_block)
3626 *qin++ = bb;
3627 }
3628
3629 /* The entry block has been initialized with the local sets. */
3630 BB_VISITED_P (region->entry_block) = true;
3631
3632 qin = worklist;
3633 qend = &worklist[qlen];
3634
3635 /* Iterate until the worklist is empty. */
3636 while (qlen)
3637 {
3638 /* Take the first entry off the worklist. */
3639 bb = *qout++;
3640 qlen--;
3641
3642 if (qout >= qend)
3643 qout = worklist;
3644
3645 /* This block can be added to the worklist again if necessary. */
3646 AVAIL_IN_WORKLIST_P (bb) = false;
3647 tm_memopt_compute_avin (bb);
3648
3649 /* Note: We do not add the LOCAL sets here because we already
3650 seeded the AVAIL_OUT sets with them. */
3651 changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
3652 changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
3653 if (changed
3654 && (region->exit_blocks == NULL
3655 || !bitmap_bit_p (region->exit_blocks, bb->index)))
3656 /* If the out state of this block changed, then we need to add
3657 its successors to the worklist if they are not already in. */
3658 FOR_EACH_EDGE (e, ei, bb->succs)
3659 if (!AVAIL_IN_WORKLIST_P (e->dest)
3660 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3661 {
3662 *qin++ = e->dest;
3663 AVAIL_IN_WORKLIST_P (e->dest) = true;
3664 qlen++;
3665
3666 if (qin >= qend)
3667 qin = worklist;
3668 }
3669 }
3670
3671 free (worklist);
3672
3673 if (dump_file)
3674 dump_tm_memopt_sets (blocks);
3675 }
3676
3677 /* Compute ANTIC sets for every basic block in BLOCKS.
3678
3679 We compute STORE_ANTIC_OUT as follows:
3680
3681 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3682 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3683
3684 REGION is the TM region.
3685 BLOCKS are the basic blocks in the region. */
3686
3687 static void
3688 tm_memopt_compute_antic (struct tm_region *region,
3689 vec<basic_block> blocks)
3690 {
3691 edge e;
3692 basic_block *worklist, *qin, *qout, *qend, bb;
3693 unsigned int qlen;
3694 int i;
3695 edge_iterator ei;
3696
3697 /* Allocate a worklist array/queue. Entries are only added to the
3698 list if they were not already on the list. So the size is
3699 bounded by the number of basic blocks in the region. */
3700 qin = qout = worklist = XNEWVEC (basic_block, blocks.length ());
3701
3702 for (qlen = 0, i = blocks.length () - 1; i >= 0; --i)
3703 {
3704 bb = blocks[i];
3705
3706 /* Seed ANTIC_OUT with the LOCAL set. */
3707 bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
3708
3709 /* Put every block in the region on the worklist. */
3710 AVAIL_IN_WORKLIST_P (bb) = true;
3711 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3712 and their ANTIC_OUT has already been seeded in. */
3713 if (region->exit_blocks
3714 && !bitmap_bit_p (region->exit_blocks, bb->index))
3715 {
3716 qlen++;
3717 *qin++ = bb;
3718 }
3719 }
3720
3721 /* The exit blocks have been initialized with the local sets. */
3722 if (region->exit_blocks)
3723 {
3724 unsigned int i;
3725 bitmap_iterator bi;
3726 EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
3727 BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true;
3728 }
3729
3730 qin = worklist;
3731 qend = &worklist[qlen];
3732
3733 /* Iterate until the worklist is empty. */
3734 while (qlen)
3735 {
3736 /* Take the first entry off the worklist. */
3737 bb = *qout++;
3738 qlen--;
3739
3740 if (qout >= qend)
3741 qout = worklist;
3742
3743 /* This block can be added to the worklist again if necessary. */
3744 AVAIL_IN_WORKLIST_P (bb) = false;
3745 tm_memopt_compute_antin (bb);
3746
3747 /* Note: We do not add the LOCAL sets here because we already
3748 seeded the ANTIC_OUT sets with them. */
3749 if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
3750 && bb != region->entry_block)
3751 /* If the out state of this block changed, then we need to add
3752 its predecessors to the worklist if they are not already in. */
3753 FOR_EACH_EDGE (e, ei, bb->preds)
3754 if (!AVAIL_IN_WORKLIST_P (e->src))
3755 {
3756 *qin++ = e->src;
3757 AVAIL_IN_WORKLIST_P (e->src) = true;
3758 qlen++;
3759
3760 if (qin >= qend)
3761 qin = worklist;
3762 }
3763 }
3764
3765 free (worklist);
3766
3767 if (dump_file)
3768 dump_tm_memopt_sets (blocks);
3769 }
3770
3771 /* Offsets of load variants from TM_LOAD. For example,
3772 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3773 See gtm-builtins.def. */
3774 #define TRANSFORM_RAR 1
3775 #define TRANSFORM_RAW 2
3776 #define TRANSFORM_RFW 3
3777 /* Offsets of store variants from TM_STORE. */
3778 #define TRANSFORM_WAR 1
3779 #define TRANSFORM_WAW 2
3780
3781 /* Inform about a load/store optimization. */
3782
3783 static void
3784 dump_tm_memopt_transform (gimple stmt)
3785 {
3786 if (dump_file)
3787 {
3788 fprintf (dump_file, "TM memopt: transforming: ");
3789 print_gimple_stmt (dump_file, stmt, 0, 0);
3790 fprintf (dump_file, "\n");
3791 }
3792 }
3793
3794 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3795 by a builtin that is OFFSET entries down in the builtins table in
3796 gtm-builtins.def. */
3797
3798 static void
3799 tm_memopt_transform_stmt (unsigned int offset,
3800 gcall *stmt,
3801 gimple_stmt_iterator *gsi)
3802 {
3803 tree fn = gimple_call_fn (stmt);
3804 gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
3805 TREE_OPERAND (fn, 0)
3806 = builtin_decl_explicit ((enum built_in_function)
3807 (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
3808 + offset));
3809 gimple_call_set_fn (stmt, fn);
3810 gsi_replace (gsi, stmt, true);
3811 dump_tm_memopt_transform (stmt);
3812 }
3813
3814 /* Perform the actual TM memory optimization transformations in the
3815 basic blocks in BLOCKS. */
3816
3817 static void
3818 tm_memopt_transform_blocks (vec<basic_block> blocks)
3819 {
3820 size_t i;
3821 basic_block bb;
3822 gimple_stmt_iterator gsi;
3823
3824 for (i = 0; blocks.iterate (i, &bb); ++i)
3825 {
3826 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3827 {
3828 gimple stmt = gsi_stmt (gsi);
3829 bitmap read_avail = READ_AVAIL_IN (bb);
3830 bitmap store_avail = STORE_AVAIL_IN (bb);
3831 bitmap store_antic = STORE_ANTIC_OUT (bb);
3832 unsigned int loc;
3833
3834 if (is_tm_simple_load (stmt))
3835 {
3836 gcall *call_stmt = as_a <gcall *> (stmt);
3837 loc = tm_memopt_value_number (stmt, NO_INSERT);
3838 if (store_avail && bitmap_bit_p (store_avail, loc))
3839 tm_memopt_transform_stmt (TRANSFORM_RAW, call_stmt, &gsi);
3840 else if (store_antic && bitmap_bit_p (store_antic, loc))
3841 {
3842 tm_memopt_transform_stmt (TRANSFORM_RFW, call_stmt, &gsi);
3843 bitmap_set_bit (store_avail, loc);
3844 }
3845 else if (read_avail && bitmap_bit_p (read_avail, loc))
3846 tm_memopt_transform_stmt (TRANSFORM_RAR, call_stmt, &gsi);
3847 else
3848 bitmap_set_bit (read_avail, loc);
3849 }
3850 else if (is_tm_simple_store (stmt))
3851 {
3852 gcall *call_stmt = as_a <gcall *> (stmt);
3853 loc = tm_memopt_value_number (stmt, NO_INSERT);
3854 if (store_avail && bitmap_bit_p (store_avail, loc))
3855 tm_memopt_transform_stmt (TRANSFORM_WAW, call_stmt, &gsi);
3856 else
3857 {
3858 if (read_avail && bitmap_bit_p (read_avail, loc))
3859 tm_memopt_transform_stmt (TRANSFORM_WAR, call_stmt, &gsi);
3860 bitmap_set_bit (store_avail, loc);
3861 }
3862 }
3863 }
3864 }
3865 }
3866
3867 /* Return a new set of bitmaps for a BB. */
3868
3869 static struct tm_memopt_bitmaps *
3870 tm_memopt_init_sets (void)
3871 {
3872 struct tm_memopt_bitmaps *b
3873 = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
3874 b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3875 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3876 b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
3877 b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
3878 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3879 b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3880 b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3881 b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
3882 b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
3883 return b;
3884 }
3885
3886 /* Free sets computed for each BB. */
3887
3888 static void
3889 tm_memopt_free_sets (vec<basic_block> blocks)
3890 {
3891 size_t i;
3892 basic_block bb;
3893
3894 for (i = 0; blocks.iterate (i, &bb); ++i)
3895 bb->aux = NULL;
3896 }
3897
3898 /* Clear the visited bit for every basic block in BLOCKS. */
3899
3900 static void
3901 tm_memopt_clear_visited (vec<basic_block> blocks)
3902 {
3903 size_t i;
3904 basic_block bb;
3905
3906 for (i = 0; blocks.iterate (i, &bb); ++i)
3907 BB_VISITED_P (bb) = false;
3908 }
3909
3910 /* Replace TM load/stores with hints for the runtime. We handle
3911 things like read-after-write, write-after-read, read-after-read,
3912 read-for-write, etc. */
3913
3914 static unsigned int
3915 execute_tm_memopt (void)
3916 {
3917 struct tm_region *region;
3918 vec<basic_block> bbs;
3919
3920 tm_memopt_value_id = 0;
3921 tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10);
3922
3923 for (region = all_tm_regions; region; region = region->next)
3924 {
3925 /* All the TM stores/loads in the current region. */
3926 size_t i;
3927 basic_block bb;
3928
3929 bitmap_obstack_initialize (&tm_memopt_obstack);
3930
3931 /* Save all BBs for the current region. */
3932 bbs = get_tm_region_blocks (region->entry_block,
3933 region->exit_blocks,
3934 region->irr_blocks,
3935 NULL,
3936 false);
3937
3938 /* Collect all the memory operations. */
3939 for (i = 0; bbs.iterate (i, &bb); ++i)
3940 {
3941 bb->aux = tm_memopt_init_sets ();
3942 tm_memopt_accumulate_memops (bb);
3943 }
3944
3945 /* Solve data flow equations and transform each block accordingly. */
3946 tm_memopt_clear_visited (bbs);
3947 tm_memopt_compute_available (region, bbs);
3948 tm_memopt_clear_visited (bbs);
3949 tm_memopt_compute_antic (region, bbs);
3950 tm_memopt_transform_blocks (bbs);
3951
3952 tm_memopt_free_sets (bbs);
3953 bbs.release ();
3954 bitmap_obstack_release (&tm_memopt_obstack);
3955 tm_memopt_value_numbers->empty ();
3956 }
3957
3958 delete tm_memopt_value_numbers;
3959 tm_memopt_value_numbers = NULL;
3960 return 0;
3961 }
3962
3963 namespace {
3964
3965 const pass_data pass_data_tm_memopt =
3966 {
3967 GIMPLE_PASS, /* type */
3968 "tmmemopt", /* name */
3969 OPTGROUP_NONE, /* optinfo_flags */
3970 TV_TRANS_MEM, /* tv_id */
3971 ( PROP_ssa | PROP_cfg ), /* properties_required */
3972 0, /* properties_provided */
3973 0, /* properties_destroyed */
3974 0, /* todo_flags_start */
3975 0, /* todo_flags_finish */
3976 };
3977
3978 class pass_tm_memopt : public gimple_opt_pass
3979 {
3980 public:
3981 pass_tm_memopt (gcc::context *ctxt)
3982 : gimple_opt_pass (pass_data_tm_memopt, ctxt)
3983 {}
3984
3985 /* opt_pass methods: */
3986 virtual bool gate (function *) { return flag_tm && optimize > 0; }
3987 virtual unsigned int execute (function *) { return execute_tm_memopt (); }
3988
3989 }; // class pass_tm_memopt
3990
3991 } // anon namespace
3992
3993 gimple_opt_pass *
3994 make_pass_tm_memopt (gcc::context *ctxt)
3995 {
3996 return new pass_tm_memopt (ctxt);
3997 }
3998
3999 \f
4000 /* Interprocedual analysis for the creation of transactional clones.
4001 The aim of this pass is to find which functions are referenced in
4002 a non-irrevocable transaction context, and for those over which
4003 we have control (or user directive), create a version of the
4004 function which uses only the transactional interface to reference
4005 protected memories. This analysis proceeds in several steps:
4006
4007 (1) Collect the set of all possible transactional clones:
4008
4009 (a) For all local public functions marked tm_callable, push
4010 it onto the tm_callee queue.
4011
4012 (b) For all local functions, scan for calls in transaction blocks.
4013 Push the caller and callee onto the tm_caller and tm_callee
4014 queues. Count the number of callers for each callee.
4015
4016 (c) For each local function on the callee list, assume we will
4017 create a transactional clone. Push *all* calls onto the
4018 callee queues; count the number of clone callers separately
4019 to the number of original callers.
4020
4021 (2) Propagate irrevocable status up the dominator tree:
4022
4023 (a) Any external function on the callee list that is not marked
4024 tm_callable is irrevocable. Push all callers of such onto
4025 a worklist.
4026
4027 (b) For each function on the worklist, mark each block that
4028 contains an irrevocable call. Use the AND operator to
4029 propagate that mark up the dominator tree.
4030
4031 (c) If we reach the entry block for a possible transactional
4032 clone, then the transactional clone is irrevocable, and
4033 we should not create the clone after all. Push all
4034 callers onto the worklist.
4035
4036 (d) Place tm_irrevocable calls at the beginning of the relevant
4037 blocks. Special case here is the entry block for the entire
4038 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
4039 the library to begin the region in serial mode. Decrement
4040 the call count for all callees in the irrevocable region.
4041
4042 (3) Create the transactional clones:
4043
4044 Any tm_callee that still has a non-zero call count is cloned.
4045 */
4046
4047 /* This structure is stored in the AUX field of each cgraph_node. */
4048 struct tm_ipa_cg_data
4049 {
4050 /* The clone of the function that got created. */
4051 struct cgraph_node *clone;
4052
4053 /* The tm regions in the normal function. */
4054 struct tm_region *all_tm_regions;
4055
4056 /* The blocks of the normal/clone functions that contain irrevocable
4057 calls, or blocks that are post-dominated by irrevocable calls. */
4058 bitmap irrevocable_blocks_normal;
4059 bitmap irrevocable_blocks_clone;
4060
4061 /* The blocks of the normal function that are involved in transactions. */
4062 bitmap transaction_blocks_normal;
4063
4064 /* The number of callers to the transactional clone of this function
4065 from normal and transactional clones respectively. */
4066 unsigned tm_callers_normal;
4067 unsigned tm_callers_clone;
4068
4069 /* True if all calls to this function's transactional clone
4070 are irrevocable. Also automatically true if the function
4071 has no transactional clone. */
4072 bool is_irrevocable;
4073
4074 /* Flags indicating the presence of this function in various queues. */
4075 bool in_callee_queue;
4076 bool in_worklist;
4077
4078 /* Flags indicating the kind of scan desired while in the worklist. */
4079 bool want_irr_scan_normal;
4080 };
4081
4082 typedef vec<cgraph_node *> cgraph_node_queue;
4083
4084 /* Return the ipa data associated with NODE, allocating zeroed memory
4085 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
4086 and set *NODE accordingly. */
4087
4088 static struct tm_ipa_cg_data *
4089 get_cg_data (struct cgraph_node **node, bool traverse_aliases)
4090 {
4091 struct tm_ipa_cg_data *d;
4092
4093 if (traverse_aliases && (*node)->alias)
4094 *node = (*node)->get_alias_target ();
4095
4096 d = (struct tm_ipa_cg_data *) (*node)->aux;
4097
4098 if (d == NULL)
4099 {
4100 d = (struct tm_ipa_cg_data *)
4101 obstack_alloc (&tm_obstack.obstack, sizeof (*d));
4102 (*node)->aux = (void *) d;
4103 memset (d, 0, sizeof (*d));
4104 }
4105
4106 return d;
4107 }
4108
4109 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
4110 it is already present. */
4111
4112 static void
4113 maybe_push_queue (struct cgraph_node *node,
4114 cgraph_node_queue *queue_p, bool *in_queue_p)
4115 {
4116 if (!*in_queue_p)
4117 {
4118 *in_queue_p = true;
4119 queue_p->safe_push (node);
4120 }
4121 }
4122
4123 /* Duplicate the basic blocks in QUEUE for use in the uninstrumented
4124 code path. QUEUE are the basic blocks inside the transaction
4125 represented in REGION.
4126
4127 Later in split_code_paths() we will add the conditional to choose
4128 between the two alternatives. */
4129
4130 static void
4131 ipa_uninstrument_transaction (struct tm_region *region,
4132 vec<basic_block> queue)
4133 {
4134 gimple transaction = region->transaction_stmt;
4135 basic_block transaction_bb = gimple_bb (transaction);
4136 int n = queue.length ();
4137 basic_block *new_bbs = XNEWVEC (basic_block, n);
4138
4139 copy_bbs (queue.address (), n, new_bbs, NULL, 0, NULL, NULL, transaction_bb,
4140 true);
4141 edge e = make_edge (transaction_bb, new_bbs[0], EDGE_TM_UNINSTRUMENTED);
4142 add_phi_args_after_copy (new_bbs, n, e);
4143
4144 // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it.
4145 // a) EDGE_FALLTHRU into the transaction
4146 // b) EDGE_TM_ABORT out of the transaction
4147 // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks.
4148
4149 free (new_bbs);
4150 }
4151
4152 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
4153 Queue all callees within block BB. */
4154
4155 static void
4156 ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
4157 basic_block bb, bool for_clone)
4158 {
4159 gimple_stmt_iterator gsi;
4160
4161 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4162 {
4163 gimple stmt = gsi_stmt (gsi);
4164 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4165 {
4166 tree fndecl = gimple_call_fndecl (stmt);
4167 if (fndecl)
4168 {
4169 struct tm_ipa_cg_data *d;
4170 unsigned *pcallers;
4171 struct cgraph_node *node;
4172
4173 if (is_tm_ending_fndecl (fndecl))
4174 continue;
4175 if (find_tm_replacement_function (fndecl))
4176 continue;
4177
4178 node = cgraph_node::get (fndecl);
4179 gcc_assert (node != NULL);
4180 d = get_cg_data (&node, true);
4181
4182 pcallers = (for_clone ? &d->tm_callers_clone
4183 : &d->tm_callers_normal);
4184 *pcallers += 1;
4185
4186 maybe_push_queue (node, callees_p, &d->in_callee_queue);
4187 }
4188 }
4189 }
4190 }
4191
4192 /* Scan all calls in NODE that are within a transaction region,
4193 and push the resulting nodes into the callee queue. */
4194
4195 static void
4196 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
4197 cgraph_node_queue *callees_p)
4198 {
4199 struct tm_region *r;
4200
4201 d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
4202 d->all_tm_regions = all_tm_regions;
4203
4204 for (r = all_tm_regions; r; r = r->next)
4205 {
4206 vec<basic_block> bbs;
4207 basic_block bb;
4208 unsigned i;
4209
4210 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
4211 d->transaction_blocks_normal, false);
4212
4213 // Generate the uninstrumented code path for this transaction.
4214 ipa_uninstrument_transaction (r, bbs);
4215
4216 FOR_EACH_VEC_ELT (bbs, i, bb)
4217 ipa_tm_scan_calls_block (callees_p, bb, false);
4218
4219 bbs.release ();
4220 }
4221
4222 // ??? copy_bbs should maintain cgraph edges for the blocks as it is
4223 // copying them, rather than forcing us to do this externally.
4224 cgraph_edge::rebuild_edges ();
4225
4226 // ??? In ipa_uninstrument_transaction we don't try to update dominators
4227 // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects.
4228 // Instead, just release dominators here so update_ssa recomputes them.
4229 free_dominance_info (CDI_DOMINATORS);
4230
4231 // When building the uninstrumented code path, copy_bbs will have invoked
4232 // create_new_def_for starting an "ssa update context". There is only one
4233 // instance of this context, so resolve ssa updates before moving on to
4234 // the next function.
4235 update_ssa (TODO_update_ssa);
4236 }
4237
4238 /* Scan all calls in NODE as if this is the transactional clone,
4239 and push the destinations into the callee queue. */
4240
4241 static void
4242 ipa_tm_scan_calls_clone (struct cgraph_node *node,
4243 cgraph_node_queue *callees_p)
4244 {
4245 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
4246 basic_block bb;
4247
4248 FOR_EACH_BB_FN (bb, fn)
4249 ipa_tm_scan_calls_block (callees_p, bb, true);
4250 }
4251
4252 /* The function NODE has been detected to be irrevocable. Push all
4253 of its callers onto WORKLIST for the purpose of re-scanning them. */
4254
4255 static void
4256 ipa_tm_note_irrevocable (struct cgraph_node *node,
4257 cgraph_node_queue *worklist_p)
4258 {
4259 struct tm_ipa_cg_data *d = get_cg_data (&node, true);
4260 struct cgraph_edge *e;
4261
4262 d->is_irrevocable = true;
4263
4264 for (e = node->callers; e ; e = e->next_caller)
4265 {
4266 basic_block bb;
4267 struct cgraph_node *caller;
4268
4269 /* Don't examine recursive calls. */
4270 if (e->caller == node)
4271 continue;
4272 /* Even if we think we can go irrevocable, believe the user
4273 above all. */
4274 if (is_tm_safe_or_pure (e->caller->decl))
4275 continue;
4276
4277 caller = e->caller;
4278 d = get_cg_data (&caller, true);
4279
4280 /* Check if the callee is in a transactional region. If so,
4281 schedule the function for normal re-scan as well. */
4282 bb = gimple_bb (e->call_stmt);
4283 gcc_assert (bb != NULL);
4284 if (d->transaction_blocks_normal
4285 && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
4286 d->want_irr_scan_normal = true;
4287
4288 maybe_push_queue (caller, worklist_p, &d->in_worklist);
4289 }
4290 }
4291
4292 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4293 within the block is irrevocable. */
4294
4295 static bool
4296 ipa_tm_scan_irr_block (basic_block bb)
4297 {
4298 gimple_stmt_iterator gsi;
4299 tree fn;
4300
4301 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4302 {
4303 gimple stmt = gsi_stmt (gsi);
4304 switch (gimple_code (stmt))
4305 {
4306 case GIMPLE_ASSIGN:
4307 if (gimple_assign_single_p (stmt))
4308 {
4309 tree lhs = gimple_assign_lhs (stmt);
4310 tree rhs = gimple_assign_rhs1 (stmt);
4311 if (volatile_var_p (lhs) || volatile_var_p (rhs))
4312 return true;
4313 }
4314 break;
4315
4316 case GIMPLE_CALL:
4317 {
4318 tree lhs = gimple_call_lhs (stmt);
4319 if (lhs && volatile_var_p (lhs))
4320 return true;
4321
4322 if (is_tm_pure_call (stmt))
4323 break;
4324
4325 fn = gimple_call_fn (stmt);
4326
4327 /* Functions with the attribute are by definition irrevocable. */
4328 if (is_tm_irrevocable (fn))
4329 return true;
4330
4331 /* For direct function calls, go ahead and check for replacement
4332 functions, or transitive irrevocable functions. For indirect
4333 functions, we'll ask the runtime. */
4334 if (TREE_CODE (fn) == ADDR_EXPR)
4335 {
4336 struct tm_ipa_cg_data *d;
4337 struct cgraph_node *node;
4338
4339 fn = TREE_OPERAND (fn, 0);
4340 if (is_tm_ending_fndecl (fn))
4341 break;
4342 if (find_tm_replacement_function (fn))
4343 break;
4344
4345 node = cgraph_node::get (fn);
4346 d = get_cg_data (&node, true);
4347
4348 /* Return true if irrevocable, but above all, believe
4349 the user. */
4350 if (d->is_irrevocable
4351 && !is_tm_safe_or_pure (fn))
4352 return true;
4353 }
4354 break;
4355 }
4356
4357 case GIMPLE_ASM:
4358 /* ??? The Approved Method of indicating that an inline
4359 assembly statement is not relevant to the transaction
4360 is to wrap it in a __tm_waiver block. This is not
4361 yet implemented, so we can't check for it. */
4362 if (is_tm_safe (current_function_decl))
4363 {
4364 tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
4365 SET_EXPR_LOCATION (t, gimple_location (stmt));
4366 error ("%Kasm not allowed in %<transaction_safe%> function", t);
4367 }
4368 return true;
4369
4370 default:
4371 break;
4372 }
4373 }
4374
4375 return false;
4376 }
4377
4378 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4379 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4380 scanning past OLD_IRR or EXIT_BLOCKS. */
4381
4382 static bool
4383 ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr,
4384 bitmap old_irr, bitmap exit_blocks)
4385 {
4386 bool any_new_irr = false;
4387 edge e;
4388 edge_iterator ei;
4389 bitmap visited_blocks = BITMAP_ALLOC (NULL);
4390
4391 do
4392 {
4393 basic_block bb = pqueue->pop ();
4394
4395 /* Don't re-scan blocks we know already are irrevocable. */
4396 if (old_irr && bitmap_bit_p (old_irr, bb->index))
4397 continue;
4398
4399 if (ipa_tm_scan_irr_block (bb))
4400 {
4401 bitmap_set_bit (new_irr, bb->index);
4402 any_new_irr = true;
4403 }
4404 else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
4405 {
4406 FOR_EACH_EDGE (e, ei, bb->succs)
4407 if (!bitmap_bit_p (visited_blocks, e->dest->index))
4408 {
4409 bitmap_set_bit (visited_blocks, e->dest->index);
4410 pqueue->safe_push (e->dest);
4411 }
4412 }
4413 }
4414 while (!pqueue->is_empty ());
4415
4416 BITMAP_FREE (visited_blocks);
4417
4418 return any_new_irr;
4419 }
4420
4421 /* Propagate the irrevocable property both up and down the dominator tree.
4422 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4423 TM regions; OLD_IRR are the results of a previous scan of the dominator
4424 tree which has been fully propagated; NEW_IRR is the set of new blocks
4425 which are gaining the irrevocable property during the current scan. */
4426
4427 static void
4428 ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
4429 bitmap old_irr, bitmap exit_blocks)
4430 {
4431 vec<basic_block> bbs;
4432 bitmap all_region_blocks;
4433
4434 /* If this block is in the old set, no need to rescan. */
4435 if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
4436 return;
4437
4438 all_region_blocks = BITMAP_ALLOC (&tm_obstack);
4439 bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
4440 all_region_blocks, false);
4441 do
4442 {
4443 basic_block bb = bbs.pop ();
4444 bool this_irr = bitmap_bit_p (new_irr, bb->index);
4445 bool all_son_irr = false;
4446 edge_iterator ei;
4447 edge e;
4448
4449 /* Propagate up. If my children are, I am too, but we must have
4450 at least one child that is. */
4451 if (!this_irr)
4452 {
4453 FOR_EACH_EDGE (e, ei, bb->succs)
4454 {
4455 if (!bitmap_bit_p (new_irr, e->dest->index))
4456 {
4457 all_son_irr = false;
4458 break;
4459 }
4460 else
4461 all_son_irr = true;
4462 }
4463 if (all_son_irr)
4464 {
4465 /* Add block to new_irr if it hasn't already been processed. */
4466 if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
4467 {
4468 bitmap_set_bit (new_irr, bb->index);
4469 this_irr = true;
4470 }
4471 }
4472 }
4473
4474 /* Propagate down to everyone we immediately dominate. */
4475 if (this_irr)
4476 {
4477 basic_block son;
4478 for (son = first_dom_son (CDI_DOMINATORS, bb);
4479 son;
4480 son = next_dom_son (CDI_DOMINATORS, son))
4481 {
4482 /* Make sure block is actually in a TM region, and it
4483 isn't already in old_irr. */
4484 if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
4485 && bitmap_bit_p (all_region_blocks, son->index))
4486 bitmap_set_bit (new_irr, son->index);
4487 }
4488 }
4489 }
4490 while (!bbs.is_empty ());
4491
4492 BITMAP_FREE (all_region_blocks);
4493 bbs.release ();
4494 }
4495
4496 static void
4497 ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
4498 {
4499 gimple_stmt_iterator gsi;
4500
4501 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4502 {
4503 gimple stmt = gsi_stmt (gsi);
4504 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4505 {
4506 tree fndecl = gimple_call_fndecl (stmt);
4507 if (fndecl)
4508 {
4509 struct tm_ipa_cg_data *d;
4510 unsigned *pcallers;
4511 struct cgraph_node *tnode;
4512
4513 if (is_tm_ending_fndecl (fndecl))
4514 continue;
4515 if (find_tm_replacement_function (fndecl))
4516 continue;
4517
4518 tnode = cgraph_node::get (fndecl);
4519 d = get_cg_data (&tnode, true);
4520
4521 pcallers = (for_clone ? &d->tm_callers_clone
4522 : &d->tm_callers_normal);
4523
4524 gcc_assert (*pcallers > 0);
4525 *pcallers -= 1;
4526 }
4527 }
4528 }
4529 }
4530
4531 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4532 as well as other irrevocable actions such as inline assembly. Mark all
4533 such blocks as irrevocable and decrement the number of calls to
4534 transactional clones. Return true if, for the transactional clone, the
4535 entire function is irrevocable. */
4536
4537 static bool
4538 ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
4539 {
4540 struct tm_ipa_cg_data *d;
4541 bitmap new_irr, old_irr;
4542 bool ret = false;
4543
4544 /* Builtin operators (operator new, and such). */
4545 if (DECL_STRUCT_FUNCTION (node->decl) == NULL
4546 || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL)
4547 return false;
4548
4549 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4550 calculate_dominance_info (CDI_DOMINATORS);
4551
4552 d = get_cg_data (&node, true);
4553 auto_vec<basic_block, 10> queue;
4554 new_irr = BITMAP_ALLOC (&tm_obstack);
4555
4556 /* Scan each tm region, propagating irrevocable status through the tree. */
4557 if (for_clone)
4558 {
4559 old_irr = d->irrevocable_blocks_clone;
4560 queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4561 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
4562 {
4563 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
4564 new_irr,
4565 old_irr, NULL);
4566 ret = bitmap_bit_p (new_irr,
4567 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index);
4568 }
4569 }
4570 else
4571 {
4572 struct tm_region *region;
4573
4574 old_irr = d->irrevocable_blocks_normal;
4575 for (region = d->all_tm_regions; region; region = region->next)
4576 {
4577 queue.quick_push (region->entry_block);
4578 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
4579 region->exit_blocks))
4580 ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
4581 region->exit_blocks);
4582 }
4583 }
4584
4585 /* If we found any new irrevocable blocks, reduce the call count for
4586 transactional clones within the irrevocable blocks. Save the new
4587 set of irrevocable blocks for next time. */
4588 if (!bitmap_empty_p (new_irr))
4589 {
4590 bitmap_iterator bmi;
4591 unsigned i;
4592
4593 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4594 ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i),
4595 for_clone);
4596
4597 if (old_irr)
4598 {
4599 bitmap_ior_into (old_irr, new_irr);
4600 BITMAP_FREE (new_irr);
4601 }
4602 else if (for_clone)
4603 d->irrevocable_blocks_clone = new_irr;
4604 else
4605 d->irrevocable_blocks_normal = new_irr;
4606
4607 if (dump_file && new_irr)
4608 {
4609 const char *dname;
4610 bitmap_iterator bmi;
4611 unsigned i;
4612
4613 dname = lang_hooks.decl_printable_name (current_function_decl, 2);
4614 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4615 fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
4616 }
4617 }
4618 else
4619 BITMAP_FREE (new_irr);
4620
4621 pop_cfun ();
4622
4623 return ret;
4624 }
4625
4626 /* Return true if, for the transactional clone of NODE, any call
4627 may enter irrevocable mode. */
4628
4629 static bool
4630 ipa_tm_mayenterirr_function (struct cgraph_node *node)
4631 {
4632 struct tm_ipa_cg_data *d;
4633 tree decl;
4634 unsigned flags;
4635
4636 d = get_cg_data (&node, true);
4637 decl = node->decl;
4638 flags = flags_from_decl_or_type (decl);
4639
4640 /* Handle some TM builtins. Ordinarily these aren't actually generated
4641 at this point, but handling these functions when written in by the
4642 user makes it easier to build unit tests. */
4643 if (flags & ECF_TM_BUILTIN)
4644 return false;
4645
4646 /* Filter out all functions that are marked. */
4647 if (flags & ECF_TM_PURE)
4648 return false;
4649 if (is_tm_safe (decl))
4650 return false;
4651 if (is_tm_irrevocable (decl))
4652 return true;
4653 if (is_tm_callable (decl))
4654 return true;
4655 if (find_tm_replacement_function (decl))
4656 return true;
4657
4658 /* If we aren't seeing the final version of the function we don't
4659 know what it will contain at runtime. */
4660 if (node->get_availability () < AVAIL_AVAILABLE)
4661 return true;
4662
4663 /* If the function must go irrevocable, then of course true. */
4664 if (d->is_irrevocable)
4665 return true;
4666
4667 /* If there are any blocks marked irrevocable, then the function
4668 as a whole may enter irrevocable. */
4669 if (d->irrevocable_blocks_clone)
4670 return true;
4671
4672 /* We may have previously marked this function as tm_may_enter_irr;
4673 see pass_diagnose_tm_blocks. */
4674 if (node->local.tm_may_enter_irr)
4675 return true;
4676
4677 /* Recurse on the main body for aliases. In general, this will
4678 result in one of the bits above being set so that we will not
4679 have to recurse next time. */
4680 if (node->alias)
4681 return ipa_tm_mayenterirr_function (cgraph_node::get (node->thunk.alias));
4682
4683 /* What remains is unmarked local functions without items that force
4684 the function to go irrevocable. */
4685 return false;
4686 }
4687
4688 /* Diagnose calls from transaction_safe functions to unmarked
4689 functions that are determined to not be safe. */
4690
4691 static void
4692 ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
4693 {
4694 struct cgraph_edge *e;
4695
4696 for (e = node->callees; e ; e = e->next_callee)
4697 if (!is_tm_callable (e->callee->decl)
4698 && e->callee->local.tm_may_enter_irr)
4699 error_at (gimple_location (e->call_stmt),
4700 "unsafe function call %qD within "
4701 "%<transaction_safe%> function", e->callee->decl);
4702 }
4703
4704 /* Diagnose call from atomic transactions to unmarked functions
4705 that are determined to not be safe. */
4706
4707 static void
4708 ipa_tm_diagnose_transaction (struct cgraph_node *node,
4709 struct tm_region *all_tm_regions)
4710 {
4711 struct tm_region *r;
4712
4713 for (r = all_tm_regions; r ; r = r->next)
4714 if (gimple_transaction_subcode (r->get_transaction_stmt ())
4715 & GTMA_IS_RELAXED)
4716 {
4717 /* Atomic transactions can be nested inside relaxed. */
4718 if (r->inner)
4719 ipa_tm_diagnose_transaction (node, r->inner);
4720 }
4721 else
4722 {
4723 vec<basic_block> bbs;
4724 gimple_stmt_iterator gsi;
4725 basic_block bb;
4726 size_t i;
4727
4728 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
4729 r->irr_blocks, NULL, false);
4730
4731 for (i = 0; bbs.iterate (i, &bb); ++i)
4732 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4733 {
4734 gimple stmt = gsi_stmt (gsi);
4735 tree fndecl;
4736
4737 if (gimple_code (stmt) == GIMPLE_ASM)
4738 {
4739 error_at (gimple_location (stmt),
4740 "asm not allowed in atomic transaction");
4741 continue;
4742 }
4743
4744 if (!is_gimple_call (stmt))
4745 continue;
4746 fndecl = gimple_call_fndecl (stmt);
4747
4748 /* Indirect function calls have been diagnosed already. */
4749 if (!fndecl)
4750 continue;
4751
4752 /* Stop at the end of the transaction. */
4753 if (is_tm_ending_fndecl (fndecl))
4754 {
4755 if (bitmap_bit_p (r->exit_blocks, bb->index))
4756 break;
4757 continue;
4758 }
4759
4760 /* Marked functions have been diagnosed already. */
4761 if (is_tm_pure_call (stmt))
4762 continue;
4763 if (is_tm_callable (fndecl))
4764 continue;
4765
4766 if (cgraph_node::local_info (fndecl)->tm_may_enter_irr)
4767 error_at (gimple_location (stmt),
4768 "unsafe function call %qD within "
4769 "atomic transaction", fndecl);
4770 }
4771
4772 bbs.release ();
4773 }
4774 }
4775
4776 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4777 OLD_DECL. The returned value is a freshly malloced pointer that
4778 should be freed by the caller. */
4779
4780 static tree
4781 tm_mangle (tree old_asm_id)
4782 {
4783 const char *old_asm_name;
4784 char *tm_name;
4785 void *alloc = NULL;
4786 struct demangle_component *dc;
4787 tree new_asm_id;
4788
4789 /* Determine if the symbol is already a valid C++ mangled name. Do this
4790 even for C, which might be interfacing with C++ code via appropriately
4791 ugly identifiers. */
4792 /* ??? We could probably do just as well checking for "_Z" and be done. */
4793 old_asm_name = IDENTIFIER_POINTER (old_asm_id);
4794 dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
4795
4796 if (dc == NULL)
4797 {
4798 char length[8];
4799
4800 do_unencoded:
4801 sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
4802 tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
4803 }
4804 else
4805 {
4806 old_asm_name += 2; /* Skip _Z */
4807
4808 switch (dc->type)
4809 {
4810 case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
4811 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
4812 /* Don't play silly games, you! */
4813 goto do_unencoded;
4814
4815 case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
4816 /* I'd really like to know if we can ever be passed one of
4817 these from the C++ front end. The Logical Thing would
4818 seem that hidden-alias should be outer-most, so that we
4819 get hidden-alias of a transaction-clone and not vice-versa. */
4820 old_asm_name += 2;
4821 break;
4822
4823 default:
4824 break;
4825 }
4826
4827 tm_name = concat ("_ZGTt", old_asm_name, NULL);
4828 }
4829 free (alloc);
4830
4831 new_asm_id = get_identifier (tm_name);
4832 free (tm_name);
4833
4834 return new_asm_id;
4835 }
4836
4837 static inline void
4838 ipa_tm_mark_force_output_node (struct cgraph_node *node)
4839 {
4840 node->mark_force_output ();
4841 node->analyzed = true;
4842 }
4843
4844 static inline void
4845 ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node)
4846 {
4847 node->forced_by_abi = true;
4848 node->analyzed = true;
4849 }
4850
4851 /* Callback data for ipa_tm_create_version_alias. */
4852 struct create_version_alias_info
4853 {
4854 struct cgraph_node *old_node;
4855 tree new_decl;
4856 };
4857
4858 /* A subroutine of ipa_tm_create_version, called via
4859 cgraph_for_node_and_aliases. Create new tm clones for each of
4860 the existing aliases. */
4861 static bool
4862 ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
4863 {
4864 struct create_version_alias_info *info
4865 = (struct create_version_alias_info *)data;
4866 tree old_decl, new_decl, tm_name;
4867 struct cgraph_node *new_node;
4868
4869 if (!node->cpp_implicit_alias)
4870 return false;
4871
4872 old_decl = node->decl;
4873 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4874 new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
4875 TREE_CODE (old_decl), tm_name,
4876 TREE_TYPE (old_decl));
4877
4878 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4879 SET_DECL_RTL (new_decl, NULL);
4880
4881 /* Based loosely on C++'s make_alias_for(). */
4882 TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
4883 DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
4884 DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
4885 TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
4886 DECL_EXTERNAL (new_decl) = 0;
4887 DECL_ARTIFICIAL (new_decl) = 1;
4888 TREE_ADDRESSABLE (new_decl) = 1;
4889 TREE_USED (new_decl) = 1;
4890 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4891
4892 /* Perform the same remapping to the comdat group. */
4893 if (DECL_ONE_ONLY (new_decl))
4894 varpool_node::get (new_decl)->set_comdat_group
4895 (tm_mangle (decl_comdat_group_id (old_decl)));
4896
4897 new_node = cgraph_node::create_same_body_alias (new_decl, info->new_decl);
4898 new_node->tm_clone = true;
4899 new_node->externally_visible = info->old_node->externally_visible;
4900 new_node->no_reorder = info->old_node->no_reorder;
4901 /* ?? Do not traverse aliases here. */
4902 get_cg_data (&node, false)->clone = new_node;
4903
4904 record_tm_clone_pair (old_decl, new_decl);
4905
4906 if (info->old_node->force_output
4907 || info->old_node->ref_list.first_referring ())
4908 ipa_tm_mark_force_output_node (new_node);
4909 if (info->old_node->forced_by_abi)
4910 ipa_tm_mark_forced_by_abi_node (new_node);
4911 return false;
4912 }
4913
4914 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4915 appropriate for the transactional clone. */
4916
4917 static void
4918 ipa_tm_create_version (struct cgraph_node *old_node)
4919 {
4920 tree new_decl, old_decl, tm_name;
4921 struct cgraph_node *new_node;
4922
4923 old_decl = old_node->decl;
4924 new_decl = copy_node (old_decl);
4925
4926 /* DECL_ASSEMBLER_NAME needs to be set before we call
4927 cgraph_copy_node_for_versioning below, because cgraph_node will
4928 fill the assembler_name_hash. */
4929 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4930 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4931 SET_DECL_RTL (new_decl, NULL);
4932 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4933
4934 /* Perform the same remapping to the comdat group. */
4935 if (DECL_ONE_ONLY (new_decl))
4936 varpool_node::get (new_decl)->set_comdat_group
4937 (tm_mangle (DECL_COMDAT_GROUP (old_decl)));
4938
4939 gcc_assert (!old_node->ipa_transforms_to_apply.exists ());
4940 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
4941 new_node->local.local = false;
4942 new_node->externally_visible = old_node->externally_visible;
4943 new_node->lowered = true;
4944 new_node->tm_clone = 1;
4945 if (!old_node->implicit_section)
4946 new_node->set_section (old_node->get_section ());
4947 get_cg_data (&old_node, true)->clone = new_node;
4948
4949 if (old_node->get_availability () >= AVAIL_INTERPOSABLE)
4950 {
4951 /* Remap extern inline to static inline. */
4952 /* ??? Is it worth trying to use make_decl_one_only? */
4953 if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
4954 {
4955 DECL_EXTERNAL (new_decl) = 0;
4956 TREE_PUBLIC (new_decl) = 0;
4957 DECL_WEAK (new_decl) = 0;
4958 }
4959
4960 tree_function_versioning (old_decl, new_decl,
4961 NULL, false, NULL,
4962 false, NULL, NULL);
4963 }
4964
4965 record_tm_clone_pair (old_decl, new_decl);
4966
4967 symtab->call_cgraph_insertion_hooks (new_node);
4968 if (old_node->force_output
4969 || old_node->ref_list.first_referring ())
4970 ipa_tm_mark_force_output_node (new_node);
4971 if (old_node->forced_by_abi)
4972 ipa_tm_mark_forced_by_abi_node (new_node);
4973
4974 /* Do the same thing, but for any aliases of the original node. */
4975 {
4976 struct create_version_alias_info data;
4977 data.old_node = old_node;
4978 data.new_decl = new_decl;
4979 old_node->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias,
4980 &data, true);
4981 }
4982 }
4983
4984 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
4985
4986 static void
4987 ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
4988 basic_block bb)
4989 {
4990 gimple_stmt_iterator gsi;
4991 gcall *g;
4992
4993 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4994
4995 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
4996 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
4997
4998 split_block_after_labels (bb);
4999 gsi = gsi_after_labels (bb);
5000 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
5001
5002 node->create_edge (cgraph_node::get_create
5003 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
5004 g, 0,
5005 compute_call_stmt_bb_frequency (node->decl,
5006 gimple_bb (g)));
5007 }
5008
5009 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
5010
5011 static bool
5012 ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
5013 struct tm_region *region,
5014 gimple_stmt_iterator *gsi, gcall *stmt)
5015 {
5016 tree gettm_fn, ret, old_fn, callfn;
5017 gcall *g;
5018 gassign *g2;
5019 bool safe;
5020
5021 old_fn = gimple_call_fn (stmt);
5022
5023 if (TREE_CODE (old_fn) == ADDR_EXPR)
5024 {
5025 tree fndecl = TREE_OPERAND (old_fn, 0);
5026 tree clone = get_tm_clone_pair (fndecl);
5027
5028 /* By transforming the call into a TM_GETTMCLONE, we are
5029 technically taking the address of the original function and
5030 its clone. Explain this so inlining will know this function
5031 is needed. */
5032 cgraph_node::get (fndecl)->mark_address_taken () ;
5033 if (clone)
5034 cgraph_node::get (clone)->mark_address_taken ();
5035 }
5036
5037 safe = is_tm_safe (TREE_TYPE (old_fn));
5038 gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
5039 : BUILT_IN_TM_GETTMCLONE_IRR);
5040 ret = create_tmp_var (ptr_type_node);
5041
5042 if (!safe)
5043 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
5044
5045 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
5046 if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
5047 old_fn = OBJ_TYPE_REF_EXPR (old_fn);
5048
5049 g = gimple_build_call (gettm_fn, 1, old_fn);
5050 ret = make_ssa_name (ret, g);
5051 gimple_call_set_lhs (g, ret);
5052
5053 gsi_insert_before (gsi, g, GSI_SAME_STMT);
5054
5055 node->create_edge (cgraph_node::get_create (gettm_fn), g, 0,
5056 compute_call_stmt_bb_frequency (node->decl,
5057 gimple_bb (g)));
5058
5059 /* Cast return value from tm_gettmclone* into appropriate function
5060 pointer. */
5061 callfn = create_tmp_var (TREE_TYPE (old_fn));
5062 g2 = gimple_build_assign (callfn,
5063 fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
5064 callfn = make_ssa_name (callfn, g2);
5065 gimple_assign_set_lhs (g2, callfn);
5066 gsi_insert_before (gsi, g2, GSI_SAME_STMT);
5067
5068 /* ??? This is a hack to preserve the NOTHROW bit on the call,
5069 which we would have derived from the decl. Failure to save
5070 this bit means we might have to split the basic block. */
5071 if (gimple_call_nothrow_p (stmt))
5072 gimple_call_set_nothrow (stmt, true);
5073
5074 gimple_call_set_fn (stmt, callfn);
5075
5076 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
5077 for a call statement. Fix it. */
5078 {
5079 tree lhs = gimple_call_lhs (stmt);
5080 tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
5081 if (lhs
5082 && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
5083 {
5084 tree temp;
5085
5086 temp = create_tmp_reg (rettype);
5087 gimple_call_set_lhs (stmt, temp);
5088
5089 g2 = gimple_build_assign (lhs,
5090 fold_build1 (VIEW_CONVERT_EXPR,
5091 TREE_TYPE (lhs), temp));
5092 gsi_insert_after (gsi, g2, GSI_SAME_STMT);
5093 }
5094 }
5095
5096 update_stmt (stmt);
5097 cgraph_edge *e = cgraph_node::get (current_function_decl)->get_edge (stmt);
5098 if (e && e->indirect_info)
5099 e->indirect_info->polymorphic = false;
5100
5101 return true;
5102 }
5103
5104 /* Helper function for ipa_tm_transform_calls*. Given a call
5105 statement in GSI which resides inside transaction REGION, redirect
5106 the call to either its wrapper function, or its clone. */
5107
5108 static void
5109 ipa_tm_transform_calls_redirect (struct cgraph_node *node,
5110 struct tm_region *region,
5111 gimple_stmt_iterator *gsi,
5112 bool *need_ssa_rename_p)
5113 {
5114 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
5115 struct cgraph_node *new_node;
5116 struct cgraph_edge *e = node->get_edge (stmt);
5117 tree fndecl = gimple_call_fndecl (stmt);
5118
5119 /* For indirect calls, pass the address through the runtime. */
5120 if (fndecl == NULL)
5121 {
5122 *need_ssa_rename_p |=
5123 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5124 return;
5125 }
5126
5127 /* Handle some TM builtins. Ordinarily these aren't actually generated
5128 at this point, but handling these functions when written in by the
5129 user makes it easier to build unit tests. */
5130 if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
5131 return;
5132
5133 /* Fixup recursive calls inside clones. */
5134 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
5135 for recursion but not update the call statements themselves? */
5136 if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
5137 {
5138 gimple_call_set_fndecl (stmt, current_function_decl);
5139 return;
5140 }
5141
5142 /* If there is a replacement, use it. */
5143 fndecl = find_tm_replacement_function (fndecl);
5144 if (fndecl)
5145 {
5146 new_node = cgraph_node::get_create (fndecl);
5147
5148 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
5149
5150 We can't do this earlier in record_tm_replacement because
5151 cgraph_remove_unreachable_nodes is called before we inject
5152 references to the node. Further, we can't do this in some
5153 nice central place in ipa_tm_execute because we don't have
5154 the exact list of wrapper functions that would be used.
5155 Marking more wrappers than necessary results in the creation
5156 of unnecessary cgraph_nodes, which can cause some of the
5157 other IPA passes to crash.
5158
5159 We do need to mark these nodes so that we get the proper
5160 result in expand_call_tm. */
5161 /* ??? This seems broken. How is it that we're marking the
5162 CALLEE as may_enter_irr? Surely we should be marking the
5163 CALLER. Also note that find_tm_replacement_function also
5164 contains mappings into the TM runtime, e.g. memcpy. These
5165 we know won't go irrevocable. */
5166 new_node->local.tm_may_enter_irr = 1;
5167 }
5168 else
5169 {
5170 struct tm_ipa_cg_data *d;
5171 struct cgraph_node *tnode = e->callee;
5172
5173 d = get_cg_data (&tnode, true);
5174 new_node = d->clone;
5175
5176 /* As we've already skipped pure calls and appropriate builtins,
5177 and we've already marked irrevocable blocks, if we can't come
5178 up with a static replacement, then ask the runtime. */
5179 if (new_node == NULL)
5180 {
5181 *need_ssa_rename_p |=
5182 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5183 return;
5184 }
5185
5186 fndecl = new_node->decl;
5187 }
5188
5189 e->redirect_callee (new_node);
5190 gimple_call_set_fndecl (stmt, fndecl);
5191 }
5192
5193 /* Helper function for ipa_tm_transform_calls. For a given BB,
5194 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5195 redirect other calls to the generated transactional clone. */
5196
5197 static bool
5198 ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
5199 basic_block bb, bitmap irr_blocks)
5200 {
5201 gimple_stmt_iterator gsi;
5202 bool need_ssa_rename = false;
5203
5204 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5205 {
5206 ipa_tm_insert_irr_call (node, region, bb);
5207 return true;
5208 }
5209
5210 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5211 {
5212 gimple stmt = gsi_stmt (gsi);
5213
5214 if (!is_gimple_call (stmt))
5215 continue;
5216 if (is_tm_pure_call (stmt))
5217 continue;
5218
5219 /* Redirect edges to the appropriate replacement or clone. */
5220 ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
5221 }
5222
5223 return need_ssa_rename;
5224 }
5225
5226 /* Walk the CFG for REGION, beginning at BB. Install calls to
5227 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5228 the generated transactional clone. */
5229
5230 static bool
5231 ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
5232 basic_block bb, bitmap irr_blocks)
5233 {
5234 bool need_ssa_rename = false;
5235 edge e;
5236 edge_iterator ei;
5237 auto_vec<basic_block> queue;
5238 bitmap visited_blocks = BITMAP_ALLOC (NULL);
5239
5240 queue.safe_push (bb);
5241 do
5242 {
5243 bb = queue.pop ();
5244
5245 need_ssa_rename |=
5246 ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
5247
5248 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5249 continue;
5250
5251 if (region && bitmap_bit_p (region->exit_blocks, bb->index))
5252 continue;
5253
5254 FOR_EACH_EDGE (e, ei, bb->succs)
5255 if (!bitmap_bit_p (visited_blocks, e->dest->index))
5256 {
5257 bitmap_set_bit (visited_blocks, e->dest->index);
5258 queue.safe_push (e->dest);
5259 }
5260 }
5261 while (!queue.is_empty ());
5262
5263 BITMAP_FREE (visited_blocks);
5264
5265 return need_ssa_rename;
5266 }
5267
5268 /* Transform the calls within the TM regions within NODE. */
5269
5270 static void
5271 ipa_tm_transform_transaction (struct cgraph_node *node)
5272 {
5273 struct tm_ipa_cg_data *d;
5274 struct tm_region *region;
5275 bool need_ssa_rename = false;
5276
5277 d = get_cg_data (&node, true);
5278
5279 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5280 calculate_dominance_info (CDI_DOMINATORS);
5281
5282 for (region = d->all_tm_regions; region; region = region->next)
5283 {
5284 /* If we're sure to go irrevocable, don't transform anything. */
5285 if (d->irrevocable_blocks_normal
5286 && bitmap_bit_p (d->irrevocable_blocks_normal,
5287 region->entry_block->index))
5288 {
5289 transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE
5290 | GTMA_MAY_ENTER_IRREVOCABLE
5291 | GTMA_HAS_NO_INSTRUMENTATION);
5292 continue;
5293 }
5294
5295 need_ssa_rename |=
5296 ipa_tm_transform_calls (node, region, region->entry_block,
5297 d->irrevocable_blocks_normal);
5298 }
5299
5300 if (need_ssa_rename)
5301 update_ssa (TODO_update_ssa_only_virtuals);
5302
5303 pop_cfun ();
5304 }
5305
5306 /* Transform the calls within the transactional clone of NODE. */
5307
5308 static void
5309 ipa_tm_transform_clone (struct cgraph_node *node)
5310 {
5311 struct tm_ipa_cg_data *d;
5312 bool need_ssa_rename;
5313
5314 d = get_cg_data (&node, true);
5315
5316 /* If this function makes no calls and has no irrevocable blocks,
5317 then there's nothing to do. */
5318 /* ??? Remove non-aborting top-level transactions. */
5319 if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone)
5320 return;
5321
5322 push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl));
5323 calculate_dominance_info (CDI_DOMINATORS);
5324
5325 need_ssa_rename =
5326 ipa_tm_transform_calls (d->clone, NULL,
5327 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
5328 d->irrevocable_blocks_clone);
5329
5330 if (need_ssa_rename)
5331 update_ssa (TODO_update_ssa_only_virtuals);
5332
5333 pop_cfun ();
5334 }
5335
5336 /* Main entry point for the transactional memory IPA pass. */
5337
5338 static unsigned int
5339 ipa_tm_execute (void)
5340 {
5341 cgraph_node_queue tm_callees = cgraph_node_queue ();
5342 /* List of functions that will go irrevocable. */
5343 cgraph_node_queue irr_worklist = cgraph_node_queue ();
5344
5345 struct cgraph_node *node;
5346 struct tm_ipa_cg_data *d;
5347 enum availability a;
5348 unsigned int i;
5349
5350 #ifdef ENABLE_CHECKING
5351 cgraph_node::verify_cgraph_nodes ();
5352 #endif
5353
5354 bitmap_obstack_initialize (&tm_obstack);
5355 initialize_original_copy_tables ();
5356
5357 /* For all local functions marked tm_callable, queue them. */
5358 FOR_EACH_DEFINED_FUNCTION (node)
5359 if (is_tm_callable (node->decl)
5360 && node->get_availability () >= AVAIL_INTERPOSABLE)
5361 {
5362 d = get_cg_data (&node, true);
5363 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5364 }
5365
5366 /* For all local reachable functions... */
5367 FOR_EACH_DEFINED_FUNCTION (node)
5368 if (node->lowered
5369 && node->get_availability () >= AVAIL_INTERPOSABLE)
5370 {
5371 /* ... marked tm_pure, record that fact for the runtime by
5372 indicating that the pure function is its own tm_callable.
5373 No need to do this if the function's address can't be taken. */
5374 if (is_tm_pure (node->decl))
5375 {
5376 if (!node->local.local)
5377 record_tm_clone_pair (node->decl, node->decl);
5378 continue;
5379 }
5380
5381 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5382 calculate_dominance_info (CDI_DOMINATORS);
5383
5384 tm_region_init (NULL);
5385 if (all_tm_regions)
5386 {
5387 d = get_cg_data (&node, true);
5388
5389 /* Scan for calls that are in each transaction, and
5390 generate the uninstrumented code path. */
5391 ipa_tm_scan_calls_transaction (d, &tm_callees);
5392
5393 /* Put it in the worklist so we can scan the function
5394 later (ipa_tm_scan_irr_function) and mark the
5395 irrevocable blocks. */
5396 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5397 d->want_irr_scan_normal = true;
5398 }
5399
5400 pop_cfun ();
5401 }
5402
5403 /* For every local function on the callee list, scan as if we will be
5404 creating a transactional clone, queueing all new functions we find
5405 along the way. */
5406 for (i = 0; i < tm_callees.length (); ++i)
5407 {
5408 node = tm_callees[i];
5409 a = node->get_availability ();
5410 d = get_cg_data (&node, true);
5411
5412 /* Put it in the worklist so we can scan the function later
5413 (ipa_tm_scan_irr_function) and mark the irrevocable
5414 blocks. */
5415 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5416
5417 /* Some callees cannot be arbitrarily cloned. These will always be
5418 irrevocable. Mark these now, so that we need not scan them. */
5419 if (is_tm_irrevocable (node->decl))
5420 ipa_tm_note_irrevocable (node, &irr_worklist);
5421 else if (a <= AVAIL_NOT_AVAILABLE
5422 && !is_tm_safe_or_pure (node->decl))
5423 ipa_tm_note_irrevocable (node, &irr_worklist);
5424 else if (a >= AVAIL_INTERPOSABLE)
5425 {
5426 if (!tree_versionable_function_p (node->decl))
5427 ipa_tm_note_irrevocable (node, &irr_worklist);
5428 else if (!d->is_irrevocable)
5429 {
5430 /* If this is an alias, make sure its base is queued as well.
5431 we need not scan the callees now, as the base will do. */
5432 if (node->alias)
5433 {
5434 node = cgraph_node::get (node->thunk.alias);
5435 d = get_cg_data (&node, true);
5436 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5437 continue;
5438 }
5439
5440 /* Add all nodes called by this function into
5441 tm_callees as well. */
5442 ipa_tm_scan_calls_clone (node, &tm_callees);
5443 }
5444 }
5445 }
5446
5447 /* Iterate scans until no more work to be done. Prefer not to use
5448 vec::pop because the worklist tends to follow a breadth-first
5449 search of the callgraph, which should allow convergance with a
5450 minimum number of scans. But we also don't want the worklist
5451 array to grow without bound, so we shift the array up periodically. */
5452 for (i = 0; i < irr_worklist.length (); ++i)
5453 {
5454 if (i > 256 && i == irr_worklist.length () / 8)
5455 {
5456 irr_worklist.block_remove (0, i);
5457 i = 0;
5458 }
5459
5460 node = irr_worklist[i];
5461 d = get_cg_data (&node, true);
5462 d->in_worklist = false;
5463
5464 if (d->want_irr_scan_normal)
5465 {
5466 d->want_irr_scan_normal = false;
5467 ipa_tm_scan_irr_function (node, false);
5468 }
5469 if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
5470 ipa_tm_note_irrevocable (node, &irr_worklist);
5471 }
5472
5473 /* For every function on the callee list, collect the tm_may_enter_irr
5474 bit on the node. */
5475 irr_worklist.truncate (0);
5476 for (i = 0; i < tm_callees.length (); ++i)
5477 {
5478 node = tm_callees[i];
5479 if (ipa_tm_mayenterirr_function (node))
5480 {
5481 d = get_cg_data (&node, true);
5482 gcc_assert (d->in_worklist == false);
5483 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5484 }
5485 }
5486
5487 /* Propagate the tm_may_enter_irr bit to callers until stable. */
5488 for (i = 0; i < irr_worklist.length (); ++i)
5489 {
5490 struct cgraph_node *caller;
5491 struct cgraph_edge *e;
5492 struct ipa_ref *ref;
5493
5494 if (i > 256 && i == irr_worklist.length () / 8)
5495 {
5496 irr_worklist.block_remove (0, i);
5497 i = 0;
5498 }
5499
5500 node = irr_worklist[i];
5501 d = get_cg_data (&node, true);
5502 d->in_worklist = false;
5503 node->local.tm_may_enter_irr = true;
5504
5505 /* Propagate back to normal callers. */
5506 for (e = node->callers; e ; e = e->next_caller)
5507 {
5508 caller = e->caller;
5509 if (!is_tm_safe_or_pure (caller->decl)
5510 && !caller->local.tm_may_enter_irr)
5511 {
5512 d = get_cg_data (&caller, true);
5513 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5514 }
5515 }
5516
5517 /* Propagate back to referring aliases as well. */
5518 FOR_EACH_ALIAS (node, ref)
5519 {
5520 caller = dyn_cast<cgraph_node *> (ref->referring);
5521 if (!caller->local.tm_may_enter_irr)
5522 {
5523 /* ?? Do not traverse aliases here. */
5524 d = get_cg_data (&caller, false);
5525 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5526 }
5527 }
5528 }
5529
5530 /* Now validate all tm_safe functions, and all atomic regions in
5531 other functions. */
5532 FOR_EACH_DEFINED_FUNCTION (node)
5533 if (node->lowered
5534 && node->get_availability () >= AVAIL_INTERPOSABLE)
5535 {
5536 d = get_cg_data (&node, true);
5537 if (is_tm_safe (node->decl))
5538 ipa_tm_diagnose_tm_safe (node);
5539 else if (d->all_tm_regions)
5540 ipa_tm_diagnose_transaction (node, d->all_tm_regions);
5541 }
5542
5543 /* Create clones. Do those that are not irrevocable and have a
5544 positive call count. Do those publicly visible functions that
5545 the user directed us to clone. */
5546 for (i = 0; i < tm_callees.length (); ++i)
5547 {
5548 bool doit = false;
5549
5550 node = tm_callees[i];
5551 if (node->cpp_implicit_alias)
5552 continue;
5553
5554 a = node->get_availability ();
5555 d = get_cg_data (&node, true);
5556
5557 if (a <= AVAIL_NOT_AVAILABLE)
5558 doit = is_tm_callable (node->decl);
5559 else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl))
5560 doit = true;
5561 else if (!d->is_irrevocable
5562 && d->tm_callers_normal + d->tm_callers_clone > 0)
5563 doit = true;
5564
5565 if (doit)
5566 ipa_tm_create_version (node);
5567 }
5568
5569 /* Redirect calls to the new clones, and insert irrevocable marks. */
5570 for (i = 0; i < tm_callees.length (); ++i)
5571 {
5572 node = tm_callees[i];
5573 if (node->analyzed)
5574 {
5575 d = get_cg_data (&node, true);
5576 if (d->clone)
5577 ipa_tm_transform_clone (node);
5578 }
5579 }
5580 FOR_EACH_DEFINED_FUNCTION (node)
5581 if (node->lowered
5582 && node->get_availability () >= AVAIL_INTERPOSABLE)
5583 {
5584 d = get_cg_data (&node, true);
5585 if (d->all_tm_regions)
5586 ipa_tm_transform_transaction (node);
5587 }
5588
5589 /* Free and clear all data structures. */
5590 tm_callees.release ();
5591 irr_worklist.release ();
5592 bitmap_obstack_release (&tm_obstack);
5593 free_original_copy_tables ();
5594
5595 FOR_EACH_FUNCTION (node)
5596 node->aux = NULL;
5597
5598 #ifdef ENABLE_CHECKING
5599 cgraph_node::verify_cgraph_nodes ();
5600 #endif
5601
5602 return 0;
5603 }
5604
5605 namespace {
5606
5607 const pass_data pass_data_ipa_tm =
5608 {
5609 SIMPLE_IPA_PASS, /* type */
5610 "tmipa", /* name */
5611 OPTGROUP_NONE, /* optinfo_flags */
5612 TV_TRANS_MEM, /* tv_id */
5613 ( PROP_ssa | PROP_cfg ), /* properties_required */
5614 0, /* properties_provided */
5615 0, /* properties_destroyed */
5616 0, /* todo_flags_start */
5617 0, /* todo_flags_finish */
5618 };
5619
5620 class pass_ipa_tm : public simple_ipa_opt_pass
5621 {
5622 public:
5623 pass_ipa_tm (gcc::context *ctxt)
5624 : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt)
5625 {}
5626
5627 /* opt_pass methods: */
5628 virtual bool gate (function *) { return flag_tm; }
5629 virtual unsigned int execute (function *) { return ipa_tm_execute (); }
5630
5631 }; // class pass_ipa_tm
5632
5633 } // anon namespace
5634
5635 simple_ipa_opt_pass *
5636 make_pass_ipa_tm (gcc::context *ctxt)
5637 {
5638 return new pass_ipa_tm (ctxt);
5639 }
5640
5641 #include "gt-trans-mem.h"