re PR target/92744 (error: insn does not satisfy its constraints since r278439)
[gcc.git] / gcc / ipa-utils.c
1 /* Utilities for ipa analysis.
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "predict.h"
28 #include "alloc-pool.h"
29 #include "cgraph.h"
30 #include "lto-streamer.h"
31 #include "dumpfile.h"
32 #include "splay-tree.h"
33 #include "ipa-utils.h"
34 #include "symbol-summary.h"
35 #include "tree-vrp.h"
36 #include "ipa-prop.h"
37 #include "ipa-fnsummary.h"
38
39 /* Debugging function for postorder and inorder code. NOTE is a string
40 that is printed before the nodes are printed. ORDER is an array of
41 cgraph_nodes that has COUNT useful nodes in it. */
42
43 void
44 ipa_print_order (FILE* out,
45 const char * note,
46 struct cgraph_node** order,
47 int count)
48 {
49 int i;
50 fprintf (out, "\n\n ordered call graph: %s\n", note);
51
52 for (i = count - 1; i >= 0; i--)
53 order[i]->dump (out);
54 fprintf (out, "\n");
55 fflush (out);
56 }
57
58
59 struct searchc_env {
60 struct cgraph_node **stack;
61 struct cgraph_node **result;
62 int stack_size;
63 int order_pos;
64 splay_tree nodes_marked_new;
65 bool reduce;
66 int count;
67 };
68
69 /* This is an implementation of Tarjan's strongly connected region
70 finder as reprinted in Aho Hopcraft and Ullman's The Design and
71 Analysis of Computer Programs (1975) pages 192-193. This version
72 has been customized for cgraph_nodes. The env parameter is because
73 it is recursive and there are no nested functions here. This
74 function should only be called from itself or
75 ipa_reduced_postorder. ENV is a stack env and would be
76 unnecessary if C had nested functions. V is the node to start
77 searching from. */
78
79 static void
80 searchc (struct searchc_env* env, struct cgraph_node *v,
81 bool (*ignore_edge) (struct cgraph_edge *))
82 {
83 struct cgraph_edge *edge;
84 struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->aux;
85
86 /* mark node as old */
87 v_info->new_node = false;
88 splay_tree_remove (env->nodes_marked_new, v->get_uid ());
89
90 v_info->dfn_number = env->count;
91 v_info->low_link = env->count;
92 env->count++;
93 env->stack[(env->stack_size)++] = v;
94 v_info->on_stack = true;
95
96 for (edge = v->callees; edge; edge = edge->next_callee)
97 {
98 struct ipa_dfs_info * w_info;
99 enum availability avail;
100 struct cgraph_node *w = edge->callee->ultimate_alias_target (&avail);
101
102 if (!w || (ignore_edge && ignore_edge (edge)))
103 continue;
104
105 if (w->aux
106 && (avail >= AVAIL_INTERPOSABLE))
107 {
108 w_info = (struct ipa_dfs_info *) w->aux;
109 if (w_info->new_node)
110 {
111 searchc (env, w, ignore_edge);
112 v_info->low_link =
113 (v_info->low_link < w_info->low_link) ?
114 v_info->low_link : w_info->low_link;
115 }
116 else
117 if ((w_info->dfn_number < v_info->dfn_number)
118 && (w_info->on_stack))
119 v_info->low_link =
120 (w_info->dfn_number < v_info->low_link) ?
121 w_info->dfn_number : v_info->low_link;
122 }
123 }
124
125
126 if (v_info->low_link == v_info->dfn_number)
127 {
128 struct cgraph_node *last = NULL;
129 struct cgraph_node *x;
130 struct ipa_dfs_info *x_info;
131 do {
132 x = env->stack[--(env->stack_size)];
133 x_info = (struct ipa_dfs_info *) x->aux;
134 x_info->on_stack = false;
135 x_info->scc_no = v_info->dfn_number;
136
137 if (env->reduce)
138 {
139 x_info->next_cycle = last;
140 last = x;
141 }
142 else
143 env->result[env->order_pos++] = x;
144 }
145 while (v != x);
146 if (env->reduce)
147 env->result[env->order_pos++] = v;
148 }
149 }
150
151 /* Topsort the call graph by caller relation. Put the result in ORDER.
152
153 The REDUCE flag is true if you want the cycles reduced to single nodes.
154 You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
155 call graph nodes in a reduced node.
156
157 Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
158 IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
159 for the topological sort. */
160
161 int
162 ipa_reduced_postorder (struct cgraph_node **order,
163 bool reduce,
164 bool (*ignore_edge) (struct cgraph_edge *))
165 {
166 struct cgraph_node *node;
167 struct searchc_env env;
168 splay_tree_node result;
169 env.stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
170 env.stack_size = 0;
171 env.result = order;
172 env.order_pos = 0;
173 env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
174 env.count = 1;
175 env.reduce = reduce;
176
177 FOR_EACH_DEFINED_FUNCTION (node)
178 {
179 enum availability avail = node->get_availability ();
180
181 if (avail > AVAIL_INTERPOSABLE
182 || avail == AVAIL_INTERPOSABLE)
183 {
184 /* Reuse the info if it is already there. */
185 struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux;
186 if (!info)
187 info = XCNEW (struct ipa_dfs_info);
188 info->new_node = true;
189 info->on_stack = false;
190 info->next_cycle = NULL;
191 node->aux = info;
192
193 splay_tree_insert (env.nodes_marked_new,
194 (splay_tree_key)node->get_uid (),
195 (splay_tree_value)node);
196 }
197 else
198 node->aux = NULL;
199 }
200 result = splay_tree_min (env.nodes_marked_new);
201 while (result)
202 {
203 node = (struct cgraph_node *)result->value;
204 searchc (&env, node, ignore_edge);
205 result = splay_tree_min (env.nodes_marked_new);
206 }
207 splay_tree_delete (env.nodes_marked_new);
208 free (env.stack);
209
210 return env.order_pos;
211 }
212
213 /* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
214 graph nodes. */
215
216 void
217 ipa_free_postorder_info (void)
218 {
219 struct cgraph_node *node;
220 FOR_EACH_DEFINED_FUNCTION (node)
221 {
222 /* Get rid of the aux information. */
223 if (node->aux)
224 {
225 free (node->aux);
226 node->aux = NULL;
227 }
228 }
229 }
230
231 /* Get the set of nodes for the cycle in the reduced call graph starting
232 from NODE. */
233
234 vec<cgraph_node *>
235 ipa_get_nodes_in_cycle (struct cgraph_node *node)
236 {
237 vec<cgraph_node *> v = vNULL;
238 struct ipa_dfs_info *node_dfs_info;
239 while (node)
240 {
241 v.safe_push (node);
242 node_dfs_info = (struct ipa_dfs_info *) node->aux;
243 node = node_dfs_info->next_cycle;
244 }
245 return v;
246 }
247
248 /* Return true iff the CS is an edge within a strongly connected component as
249 computed by ipa_reduced_postorder. */
250
251 bool
252 ipa_edge_within_scc (struct cgraph_edge *cs)
253 {
254 struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->aux;
255 struct ipa_dfs_info *callee_dfs;
256 struct cgraph_node *callee = cs->callee->function_symbol ();
257
258 callee_dfs = (struct ipa_dfs_info *) callee->aux;
259 return (caller_dfs
260 && callee_dfs
261 && caller_dfs->scc_no == callee_dfs->scc_no);
262 }
263
264 struct postorder_stack
265 {
266 struct cgraph_node *node;
267 struct cgraph_edge *edge;
268 int ref;
269 };
270
271 /* Fill array order with all nodes with output flag set in the reverse
272 topological order. Return the number of elements in the array.
273 FIXME: While walking, consider aliases, too. */
274
275 int
276 ipa_reverse_postorder (struct cgraph_node **order)
277 {
278 struct cgraph_node *node, *node2;
279 int stack_size = 0;
280 int order_pos = 0;
281 struct cgraph_edge *edge;
282 int pass;
283 struct ipa_ref *ref = NULL;
284
285 struct postorder_stack *stack =
286 XCNEWVEC (struct postorder_stack, symtab->cgraph_count);
287
288 /* We have to deal with cycles nicely, so use a depth first traversal
289 output algorithm. Ignore the fact that some functions won't need
290 to be output and put them into order as well, so we get dependencies
291 right through inline functions. */
292 FOR_EACH_FUNCTION (node)
293 node->aux = NULL;
294 for (pass = 0; pass < 2; pass++)
295 FOR_EACH_FUNCTION (node)
296 if (!node->aux
297 && (pass
298 || (!node->address_taken
299 && !node->inlined_to
300 && !node->alias && !node->thunk.thunk_p
301 && !node->only_called_directly_p ())))
302 {
303 stack_size = 0;
304 stack[stack_size].node = node;
305 stack[stack_size].edge = node->callers;
306 stack[stack_size].ref = 0;
307 node->aux = (void *)(size_t)1;
308 while (stack_size >= 0)
309 {
310 while (true)
311 {
312 node2 = NULL;
313 while (stack[stack_size].edge && !node2)
314 {
315 edge = stack[stack_size].edge;
316 node2 = edge->caller;
317 stack[stack_size].edge = edge->next_caller;
318 /* Break possible cycles involving always-inline
319 functions by ignoring edges from always-inline
320 functions to non-always-inline functions. */
321 if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl)
322 && !DECL_DISREGARD_INLINE_LIMITS
323 (edge->callee->function_symbol ()->decl))
324 node2 = NULL;
325 }
326 for (; stack[stack_size].node->iterate_referring (
327 stack[stack_size].ref,
328 ref) && !node2;
329 stack[stack_size].ref++)
330 {
331 if (ref->use == IPA_REF_ALIAS)
332 node2 = dyn_cast <cgraph_node *> (ref->referring);
333 }
334 if (!node2)
335 break;
336 if (!node2->aux)
337 {
338 stack[++stack_size].node = node2;
339 stack[stack_size].edge = node2->callers;
340 stack[stack_size].ref = 0;
341 node2->aux = (void *)(size_t)1;
342 }
343 }
344 order[order_pos++] = stack[stack_size--].node;
345 }
346 }
347 free (stack);
348 FOR_EACH_FUNCTION (node)
349 node->aux = NULL;
350 return order_pos;
351 }
352
353
354
355 /* Given a memory reference T, will return the variable at the bottom
356 of the access. Unlike get_base_address, this will recurse through
357 INDIRECT_REFS. */
358
359 tree
360 get_base_var (tree t)
361 {
362 while (!SSA_VAR_P (t)
363 && (!CONSTANT_CLASS_P (t))
364 && TREE_CODE (t) != LABEL_DECL
365 && TREE_CODE (t) != FUNCTION_DECL
366 && TREE_CODE (t) != CONST_DECL
367 && TREE_CODE (t) != CONSTRUCTOR)
368 {
369 t = TREE_OPERAND (t, 0);
370 }
371 return t;
372 }
373
374 /* Scale function of calls in NODE by ratio ORIG_COUNT/NODE->count. */
375
376 void
377 scale_ipa_profile_for_fn (struct cgraph_node *node, profile_count orig_count)
378 {
379 profile_count to = node->count;
380 profile_count::adjust_for_ipa_scaling (&to, &orig_count);
381 struct cgraph_edge *e;
382
383 for (e = node->callees; e; e = e->next_callee)
384 e->count = e->count.apply_scale (to, orig_count);
385 for (e = node->indirect_calls; e; e = e->next_callee)
386 e->count = e->count.apply_scale (to, orig_count);
387 }
388
389 /* SRC and DST are going to be merged. Take SRC's profile and merge it into
390 DST so it is not going to be lost. Possibly destroy SRC's body on the way
391 unless PRESERVE_BODY is set. */
392
393 void
394 ipa_merge_profiles (struct cgraph_node *dst,
395 struct cgraph_node *src,
396 bool preserve_body)
397 {
398 tree oldsrcdecl = src->decl;
399 struct function *srccfun, *dstcfun;
400 bool match = true;
401 bool copy_counts = false;
402
403 if (!src->definition
404 || !dst->definition)
405 return;
406
407 if (src->frequency < dst->frequency)
408 src->frequency = dst->frequency;
409
410 /* Time profiles are merged. */
411 if (dst->tp_first_run > src->tp_first_run && src->tp_first_run)
412 dst->tp_first_run = src->tp_first_run;
413
414 if (src->profile_id && !dst->profile_id)
415 dst->profile_id = src->profile_id;
416
417 /* Merging zero profile to dst is no-op. */
418 if (src->count.ipa () == profile_count::zero ())
419 return;
420
421 /* FIXME when we merge in unknown profile, we ought to set counts as
422 unsafe. */
423 if (!src->count.initialized_p ()
424 || !(src->count.ipa () == src->count))
425 return;
426 if (symtab->dump_file)
427 {
428 fprintf (symtab->dump_file, "Merging profiles of %s to %s\n",
429 src->dump_name (), dst->dump_name ());
430 }
431 profile_count orig_count = dst->count;
432
433 /* Either sum the profiles if both are IPA and not global0, or
434 pick more informative one (that is nonzero IPA if other is
435 uninitialized, guessed or global0). */
436
437 if ((dst->count.ipa ().nonzero_p ()
438 || src->count.ipa ().nonzero_p ())
439 && dst->count.ipa ().initialized_p ()
440 && src->count.ipa ().initialized_p ())
441 dst->count = dst->count.ipa () + src->count.ipa ();
442 else if (dst->count.ipa ().initialized_p ())
443 ;
444 else if (src->count.ipa ().initialized_p ())
445 {
446 copy_counts = true;
447 dst->count = src->count.ipa ();
448 }
449
450 /* If no updating needed return early. */
451 if (dst->count == orig_count)
452 return;
453
454 /* First handle functions with no gimple body. */
455 if (dst->thunk.thunk_p || dst->alias
456 || src->thunk.thunk_p || src->alias)
457 {
458 scale_ipa_profile_for_fn (dst, orig_count);
459 return;
460 }
461
462 /* This is ugly. We need to get both function bodies into memory.
463 If declaration is merged, we need to duplicate it to be able
464 to load body that is being replaced. This makes symbol table
465 temporarily inconsistent. */
466 if (src->decl == dst->decl)
467 {
468 struct lto_in_decl_state temp;
469 struct lto_in_decl_state *state;
470
471 /* We are going to move the decl, we want to remove its file decl data.
472 and link these with the new decl. */
473 temp.fn_decl = src->decl;
474 lto_in_decl_state **slot
475 = src->lto_file_data->function_decl_states->find_slot (&temp,
476 NO_INSERT);
477 state = *slot;
478 src->lto_file_data->function_decl_states->clear_slot (slot);
479 gcc_assert (state);
480
481 /* Duplicate the decl and be sure it does not link into body of DST. */
482 src->decl = copy_node (src->decl);
483 DECL_STRUCT_FUNCTION (src->decl) = NULL;
484 DECL_ARGUMENTS (src->decl) = NULL;
485 DECL_INITIAL (src->decl) = NULL;
486 DECL_RESULT (src->decl) = NULL;
487
488 /* Associate the decl state with new declaration, so LTO streamer
489 can look it up. */
490 state->fn_decl = src->decl;
491 slot
492 = src->lto_file_data->function_decl_states->find_slot (state, INSERT);
493 gcc_assert (!*slot);
494 *slot = state;
495 }
496 src->get_untransformed_body ();
497 dst->get_untransformed_body ();
498 srccfun = DECL_STRUCT_FUNCTION (src->decl);
499 dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
500 if (n_basic_blocks_for_fn (srccfun)
501 != n_basic_blocks_for_fn (dstcfun))
502 {
503 if (symtab->dump_file)
504 fprintf (symtab->dump_file,
505 "Giving up; number of basic block mismatch.\n");
506 match = false;
507 }
508 else if (last_basic_block_for_fn (srccfun)
509 != last_basic_block_for_fn (dstcfun))
510 {
511 if (symtab->dump_file)
512 fprintf (symtab->dump_file,
513 "Giving up; last block mismatch.\n");
514 match = false;
515 }
516 else
517 {
518 basic_block srcbb, dstbb;
519
520 FOR_ALL_BB_FN (srcbb, srccfun)
521 {
522 unsigned int i;
523
524 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
525 if (dstbb == NULL)
526 {
527 if (symtab->dump_file)
528 fprintf (symtab->dump_file,
529 "No matching block for bb %i.\n",
530 srcbb->index);
531 match = false;
532 break;
533 }
534 if (EDGE_COUNT (srcbb->succs) != EDGE_COUNT (dstbb->succs))
535 {
536 if (symtab->dump_file)
537 fprintf (symtab->dump_file,
538 "Edge count mismatch for bb %i.\n",
539 srcbb->index);
540 match = false;
541 break;
542 }
543 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
544 {
545 edge srce = EDGE_SUCC (srcbb, i);
546 edge dste = EDGE_SUCC (dstbb, i);
547 if (srce->dest->index != dste->dest->index)
548 {
549 if (symtab->dump_file)
550 fprintf (symtab->dump_file,
551 "Succ edge mismatch for bb %i.\n",
552 srce->dest->index);
553 match = false;
554 break;
555 }
556 }
557 }
558 }
559 if (match)
560 {
561 struct cgraph_edge *e, *e2;
562 basic_block srcbb, dstbb;
563
564 /* Function and global profile may be out of sync. First scale it same
565 way as fixup_cfg would. */
566 profile_count srcnum = src->count;
567 profile_count srcden = ENTRY_BLOCK_PTR_FOR_FN (srccfun)->count;
568 bool srcscale = srcnum.initialized_p () && !(srcnum == srcden);
569 profile_count dstnum = orig_count;
570 profile_count dstden = ENTRY_BLOCK_PTR_FOR_FN (dstcfun)->count;
571 bool dstscale = !copy_counts
572 && dstnum.initialized_p () && !(dstnum == dstden);
573
574 /* TODO: merge also statement histograms. */
575 FOR_ALL_BB_FN (srcbb, srccfun)
576 {
577 unsigned int i;
578
579 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
580
581 profile_count srccount = srcbb->count;
582 if (srcscale)
583 srccount = srccount.apply_scale (srcnum, srcden);
584 if (dstscale)
585 dstbb->count = dstbb->count.apply_scale (dstnum, dstden);
586
587 if (copy_counts)
588 {
589 dstbb->count = srccount;
590 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
591 {
592 edge srce = EDGE_SUCC (srcbb, i);
593 edge dste = EDGE_SUCC (dstbb, i);
594 if (srce->probability.initialized_p ())
595 dste->probability = srce->probability;
596 }
597 }
598 else
599 {
600 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
601 {
602 edge srce = EDGE_SUCC (srcbb, i);
603 edge dste = EDGE_SUCC (dstbb, i);
604 dste->probability =
605 dste->probability * dstbb->count.ipa ().probability_in
606 (dstbb->count.ipa ()
607 + srccount.ipa ())
608 + srce->probability * srcbb->count.ipa ().probability_in
609 (dstbb->count.ipa ()
610 + srccount.ipa ());
611 }
612 dstbb->count = dstbb->count.ipa () + srccount.ipa ();
613 }
614 }
615 push_cfun (dstcfun);
616 update_max_bb_count ();
617 compute_function_frequency ();
618 pop_cfun ();
619 for (e = dst->callees; e; e = e->next_callee)
620 {
621 if (e->speculative)
622 continue;
623 e->count = gimple_bb (e->call_stmt)->count;
624 }
625 for (e = dst->indirect_calls, e2 = src->indirect_calls; e;
626 e2 = (e2 ? e2->next_callee : NULL), e = e->next_callee)
627 {
628 profile_count count = gimple_bb (e->call_stmt)->count;
629 /* When call is speculative, we need to re-distribute probabilities
630 the same way as they was. This is not really correct because
631 in the other copy the speculation may differ; but probably it
632 is not really worth the effort. */
633 if (e->speculative)
634 {
635 cgraph_edge *direct, *indirect;
636 cgraph_edge *direct2 = NULL, *indirect2 = NULL;
637 ipa_ref *ref;
638
639 e->speculative_call_info (direct, indirect, ref);
640 gcc_assert (e == indirect);
641 if (e2 && e2->speculative)
642 e2->speculative_call_info (direct2, indirect2, ref);
643 if (indirect->count > profile_count::zero ()
644 || direct->count > profile_count::zero ())
645 {
646 /* We should mismatch earlier if there is no matching
647 indirect edge. */
648 if (!e2)
649 {
650 if (dump_file)
651 fprintf (dump_file,
652 "Mismatch in merging indirect edges\n");
653 }
654 else if (!e2->speculative)
655 indirect->count += e2->count;
656 else if (e2->speculative)
657 {
658 if (DECL_ASSEMBLER_NAME (direct2->callee->decl)
659 != DECL_ASSEMBLER_NAME (direct->callee->decl))
660 {
661 if (direct2->count >= direct->count)
662 {
663 direct->redirect_callee (direct2->callee);
664 indirect->count += indirect2->count
665 + direct->count;
666 direct->count = direct2->count;
667 }
668 else
669 indirect->count += indirect2->count + direct2->count;
670 }
671 else
672 {
673 direct->count += direct2->count;
674 indirect->count += indirect2->count;
675 }
676 }
677 }
678 else
679 /* At the moment we should have only profile feedback based
680 speculations when merging. */
681 gcc_unreachable ();
682 }
683 else if (e2 && e2->speculative)
684 {
685 cgraph_edge *direct, *indirect;
686 ipa_ref *ref;
687
688 e2->speculative_call_info (direct, indirect, ref);
689 e->count = count;
690 e->make_speculative (direct->callee, direct->count);
691 }
692 else
693 e->count = count;
694 }
695 if (!preserve_body)
696 src->release_body ();
697 /* Update summary. */
698 compute_fn_summary (dst, 0);
699 }
700 /* We can't update CFG profile, but we can scale IPA profile. CFG
701 will be scaled according to dst->count after IPA passes. */
702 else
703 scale_ipa_profile_for_fn (dst, orig_count);
704 src->decl = oldsrcdecl;
705 }
706
707 /* Return true if call to DEST is known to be self-recusive call withing FUNC. */
708
709 bool
710 recursive_call_p (tree func, tree dest)
711 {
712 struct cgraph_node *dest_node = cgraph_node::get_create (dest);
713 struct cgraph_node *cnode = cgraph_node::get_create (func);
714 ipa_ref *alias;
715 enum availability avail;
716
717 gcc_assert (!cnode->alias);
718 if (cnode != dest_node->ultimate_alias_target (&avail))
719 return false;
720 if (avail >= AVAIL_AVAILABLE)
721 return true;
722 if (!dest_node->semantically_equivalent_p (cnode))
723 return false;
724 /* If there is only one way to call the fuction or we know all of them
725 are semantically equivalent, we still can consider call recursive. */
726 FOR_EACH_ALIAS (cnode, alias)
727 if (!dest_node->semantically_equivalent_p (alias->referring))
728 return false;
729 return true;
730 }