07e6a5a2887f6014b01aae0a2f69bfe92f371f3e
[gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "target.h"
24 #include "function.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tree-pass.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "emit-rtl.h"
31 #include "stor-layout.h"
32 #include "cfgbuild.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
36
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "profile-count.h" /* For expr.h */
39 #include "expr.h" /* init_return_column_size */
40 #include "output.h" /* asm_out_file */
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
42
43
44 /* ??? Poison these here until it can be done generically. They've been
45 totally replaced in this file; make sure it stays that way. */
46 #undef DWARF2_UNWIND_INFO
47 #undef DWARF2_FRAME_INFO
48 #if (GCC_VERSION >= 3000)
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
50 #endif
51
52 #ifndef INCOMING_RETURN_ADDR_RTX
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
54 #endif
55
56 #ifndef DEFAULT_INCOMING_FRAME_SP_OFFSET
57 #define DEFAULT_INCOMING_FRAME_SP_OFFSET INCOMING_FRAME_SP_OFFSET
58 #endif
59 \f
60 /* A collected description of an entire row of the abstract CFI table. */
61 struct GTY(()) dw_cfi_row
62 {
63 /* The expression that computes the CFA, expressed in two different ways.
64 The CFA member for the simple cases, and the full CFI expression for
65 the complex cases. The later will be a DW_CFA_cfa_expression. */
66 dw_cfa_location cfa;
67 dw_cfi_ref cfa_cfi;
68
69 /* The expressions for any register column that is saved. */
70 cfi_vec reg_save;
71 };
72
73 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
74 struct GTY(()) reg_saved_in_data {
75 rtx orig_reg;
76 rtx saved_in_reg;
77 };
78
79
80 /* Since we no longer have a proper CFG, we're going to create a facsimile
81 of one on the fly while processing the frame-related insns.
82
83 We create dw_trace_info structures for each extended basic block beginning
84 and ending at a "save point". Save points are labels, barriers, certain
85 notes, and of course the beginning and end of the function.
86
87 As we encounter control transfer insns, we propagate the "current"
88 row state across the edges to the starts of traces. When checking is
89 enabled, we validate that we propagate the same data from all sources.
90
91 All traces are members of the TRACE_INFO array, in the order in which
92 they appear in the instruction stream.
93
94 All save points are present in the TRACE_INDEX hash, mapping the insn
95 starting a trace to the dw_trace_info describing the trace. */
96
97 struct dw_trace_info
98 {
99 /* The insn that begins the trace. */
100 rtx_insn *head;
101
102 /* The row state at the beginning and end of the trace. */
103 dw_cfi_row *beg_row, *end_row;
104
105 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
106 while scanning insns. However, the args_size value is irrelevant at
107 any point except can_throw_internal_p insns. Therefore the "delay"
108 sizes the values that must actually be emitted for this trace. */
109 poly_int64_pod beg_true_args_size, end_true_args_size;
110 poly_int64_pod beg_delay_args_size, end_delay_args_size;
111
112 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
113 rtx_insn *eh_head;
114
115 /* The following variables contain data used in interpreting frame related
116 expressions. These are not part of the "real" row state as defined by
117 Dwarf, but it seems like they need to be propagated into a trace in case
118 frame related expressions have been sunk. */
119 /* ??? This seems fragile. These variables are fragments of a larger
120 expression. If we do not keep the entire expression together, we risk
121 not being able to put it together properly. Consider forcing targets
122 to generate self-contained expressions and dropping all of the magic
123 interpretation code in this file. Or at least refusing to shrink wrap
124 any frame related insn that doesn't contain a complete expression. */
125
126 /* The register used for saving registers to the stack, and its offset
127 from the CFA. */
128 dw_cfa_location cfa_store;
129
130 /* A temporary register holding an integral value used in adjusting SP
131 or setting up the store_reg. The "offset" field holds the integer
132 value, not an offset. */
133 dw_cfa_location cfa_temp;
134
135 /* A set of registers saved in other registers. This is the inverse of
136 the row->reg_save info, if the entry is a DW_CFA_register. This is
137 implemented as a flat array because it normally contains zero or 1
138 entry, depending on the target. IA-64 is the big spender here, using
139 a maximum of 5 entries. */
140 vec<reg_saved_in_data> regs_saved_in_regs;
141
142 /* An identifier for this trace. Used only for debugging dumps. */
143 unsigned id;
144
145 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
146 bool switch_sections;
147
148 /* True if we've seen different values incoming to beg_true_args_size. */
149 bool args_size_undefined;
150 };
151
152
153 /* Hashtable helpers. */
154
155 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
156 {
157 static inline hashval_t hash (const dw_trace_info *);
158 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
159 };
160
161 inline hashval_t
162 trace_info_hasher::hash (const dw_trace_info *ti)
163 {
164 return INSN_UID (ti->head);
165 }
166
167 inline bool
168 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
169 {
170 return a->head == b->head;
171 }
172
173
174 /* The variables making up the pseudo-cfg, as described above. */
175 static vec<dw_trace_info> trace_info;
176 static vec<dw_trace_info *> trace_work_list;
177 static hash_table<trace_info_hasher> *trace_index;
178
179 /* A vector of call frame insns for the CIE. */
180 cfi_vec cie_cfi_vec;
181
182 /* The state of the first row of the FDE table, which includes the
183 state provided by the CIE. */
184 static GTY(()) dw_cfi_row *cie_cfi_row;
185
186 static GTY(()) reg_saved_in_data *cie_return_save;
187
188 static GTY(()) unsigned long dwarf2out_cfi_label_num;
189
190 /* The insn after which a new CFI note should be emitted. */
191 static rtx_insn *add_cfi_insn;
192
193 /* When non-null, add_cfi will add the CFI to this vector. */
194 static cfi_vec *add_cfi_vec;
195
196 /* The current instruction trace. */
197 static dw_trace_info *cur_trace;
198
199 /* The current, i.e. most recently generated, row of the CFI table. */
200 static dw_cfi_row *cur_row;
201
202 /* A copy of the current CFA, for use during the processing of a
203 single insn. */
204 static dw_cfa_location *cur_cfa;
205
206 /* We delay emitting a register save until either (a) we reach the end
207 of the prologue or (b) the register is clobbered. This clusters
208 register saves so that there are fewer pc advances. */
209
210 struct queued_reg_save {
211 rtx reg;
212 rtx saved_reg;
213 poly_int64_pod cfa_offset;
214 };
215
216
217 static vec<queued_reg_save> queued_reg_saves;
218
219 /* True if any CFI directives were emitted at the current insn. */
220 static bool any_cfis_emitted;
221
222 /* Short-hand for commonly used register numbers. */
223 static unsigned dw_stack_pointer_regnum;
224 static unsigned dw_frame_pointer_regnum;
225 \f
226 /* Hook used by __throw. */
227
228 rtx
229 expand_builtin_dwarf_sp_column (void)
230 {
231 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
232 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
233 }
234
235 /* MEM is a memory reference for the register size table, each element of
236 which has mode MODE. Initialize column C as a return address column. */
237
238 static void
239 init_return_column_size (scalar_int_mode mode, rtx mem, unsigned int c)
240 {
241 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
242 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
243 emit_move_insn (adjust_address (mem, mode, offset),
244 gen_int_mode (size, mode));
245 }
246
247 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
248 init_one_dwarf_reg_size to communicate on what has been done by the
249 latter. */
250
251 struct init_one_dwarf_reg_state
252 {
253 /* Whether the dwarf return column was initialized. */
254 bool wrote_return_column;
255
256 /* For each hard register REGNO, whether init_one_dwarf_reg_size
257 was given REGNO to process already. */
258 bool processed_regno [FIRST_PSEUDO_REGISTER];
259
260 };
261
262 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
263 initialize the dwarf register size table entry corresponding to register
264 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
265 use for the size entry to initialize, and INIT_STATE is the communication
266 datastructure conveying what we're doing to our caller. */
267
268 static
269 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
270 rtx table, machine_mode slotmode,
271 init_one_dwarf_reg_state *init_state)
272 {
273 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
274 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
275 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
276
277 poly_int64 slotoffset = dcol * GET_MODE_SIZE (slotmode);
278 poly_int64 regsize = GET_MODE_SIZE (regmode);
279
280 init_state->processed_regno[regno] = true;
281
282 if (rnum >= DWARF_FRAME_REGISTERS)
283 return;
284
285 if (dnum == DWARF_FRAME_RETURN_COLUMN)
286 {
287 if (regmode == VOIDmode)
288 return;
289 init_state->wrote_return_column = true;
290 }
291
292 /* ??? When is this true? Should it be a test based on DCOL instead? */
293 if (maybe_lt (slotoffset, 0))
294 return;
295
296 emit_move_insn (adjust_address (table, slotmode, slotoffset),
297 gen_int_mode (regsize, slotmode));
298 }
299
300 /* Generate code to initialize the dwarf register size table located
301 at the provided ADDRESS. */
302
303 void
304 expand_builtin_init_dwarf_reg_sizes (tree address)
305 {
306 unsigned int i;
307 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (char_type_node);
308 rtx addr = expand_normal (address);
309 rtx mem = gen_rtx_MEM (BLKmode, addr);
310
311 init_one_dwarf_reg_state init_state;
312
313 memset ((char *)&init_state, 0, sizeof (init_state));
314
315 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
316 {
317 machine_mode save_mode;
318 rtx span;
319
320 /* No point in processing a register multiple times. This could happen
321 with register spans, e.g. when a reg is first processed as a piece of
322 a span, then as a register on its own later on. */
323
324 if (init_state.processed_regno[i])
325 continue;
326
327 save_mode = targetm.dwarf_frame_reg_mode (i);
328 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
329
330 if (!span)
331 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
332 else
333 {
334 for (int si = 0; si < XVECLEN (span, 0); si++)
335 {
336 rtx reg = XVECEXP (span, 0, si);
337
338 init_one_dwarf_reg_size
339 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
340 }
341 }
342 }
343
344 if (!init_state.wrote_return_column)
345 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
346
347 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
348 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
349 #endif
350
351 targetm.init_dwarf_reg_sizes_extra (address);
352 }
353
354 \f
355 static dw_trace_info *
356 get_trace_info (rtx_insn *insn)
357 {
358 dw_trace_info dummy;
359 dummy.head = insn;
360 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
361 }
362
363 static bool
364 save_point_p (rtx_insn *insn)
365 {
366 /* Labels, except those that are really jump tables. */
367 if (LABEL_P (insn))
368 return inside_basic_block_p (insn);
369
370 /* We split traces at the prologue/epilogue notes because those
371 are points at which the unwind info is usually stable. This
372 makes it easier to find spots with identical unwind info so
373 that we can use remember/restore_state opcodes. */
374 if (NOTE_P (insn))
375 switch (NOTE_KIND (insn))
376 {
377 case NOTE_INSN_PROLOGUE_END:
378 case NOTE_INSN_EPILOGUE_BEG:
379 return true;
380 }
381
382 return false;
383 }
384
385 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
386
387 static inline HOST_WIDE_INT
388 div_data_align (HOST_WIDE_INT off)
389 {
390 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
391 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
392 return r;
393 }
394
395 /* Return true if we need a signed version of a given opcode
396 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
397
398 static inline bool
399 need_data_align_sf_opcode (HOST_WIDE_INT off)
400 {
401 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
402 }
403
404 /* Return a pointer to a newly allocated Call Frame Instruction. */
405
406 static inline dw_cfi_ref
407 new_cfi (void)
408 {
409 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
410
411 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
412 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
413
414 return cfi;
415 }
416
417 /* Return a newly allocated CFI row, with no defined data. */
418
419 static dw_cfi_row *
420 new_cfi_row (void)
421 {
422 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
423
424 row->cfa.reg = INVALID_REGNUM;
425
426 return row;
427 }
428
429 /* Return a copy of an existing CFI row. */
430
431 static dw_cfi_row *
432 copy_cfi_row (dw_cfi_row *src)
433 {
434 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
435
436 *dst = *src;
437 dst->reg_save = vec_safe_copy (src->reg_save);
438
439 return dst;
440 }
441
442 /* Return a copy of an existing CFA location. */
443
444 static dw_cfa_location *
445 copy_cfa (dw_cfa_location *src)
446 {
447 dw_cfa_location *dst = ggc_alloc<dw_cfa_location> ();
448 *dst = *src;
449 return dst;
450 }
451
452 /* Generate a new label for the CFI info to refer to. */
453
454 static char *
455 dwarf2out_cfi_label (void)
456 {
457 int num = dwarf2out_cfi_label_num++;
458 char label[20];
459
460 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
461
462 return xstrdup (label);
463 }
464
465 /* Add CFI either to the current insn stream or to a vector, or both. */
466
467 static void
468 add_cfi (dw_cfi_ref cfi)
469 {
470 any_cfis_emitted = true;
471
472 if (add_cfi_insn != NULL)
473 {
474 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
475 NOTE_CFI (add_cfi_insn) = cfi;
476 }
477
478 if (add_cfi_vec != NULL)
479 vec_safe_push (*add_cfi_vec, cfi);
480 }
481
482 static void
483 add_cfi_args_size (poly_int64 size)
484 {
485 /* We don't yet have a representation for polynomial sizes. */
486 HOST_WIDE_INT const_size = size.to_constant ();
487
488 dw_cfi_ref cfi = new_cfi ();
489
490 /* While we can occasionally have args_size < 0 internally, this state
491 should not persist at a point we actually need an opcode. */
492 gcc_assert (const_size >= 0);
493
494 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
495 cfi->dw_cfi_oprnd1.dw_cfi_offset = const_size;
496
497 add_cfi (cfi);
498 }
499
500 static void
501 add_cfi_restore (unsigned reg)
502 {
503 dw_cfi_ref cfi = new_cfi ();
504
505 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
506 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
507
508 add_cfi (cfi);
509 }
510
511 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
512 that the register column is no longer saved. */
513
514 static void
515 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
516 {
517 if (vec_safe_length (row->reg_save) <= column)
518 vec_safe_grow_cleared (row->reg_save, column + 1);
519 (*row->reg_save)[column] = cfi;
520 }
521
522 /* This function fills in aa dw_cfa_location structure from a dwarf location
523 descriptor sequence. */
524
525 static void
526 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
527 {
528 struct dw_loc_descr_node *ptr;
529 cfa->offset = 0;
530 cfa->base_offset = 0;
531 cfa->indirect = 0;
532 cfa->reg = -1;
533
534 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
535 {
536 enum dwarf_location_atom op = ptr->dw_loc_opc;
537
538 switch (op)
539 {
540 case DW_OP_reg0:
541 case DW_OP_reg1:
542 case DW_OP_reg2:
543 case DW_OP_reg3:
544 case DW_OP_reg4:
545 case DW_OP_reg5:
546 case DW_OP_reg6:
547 case DW_OP_reg7:
548 case DW_OP_reg8:
549 case DW_OP_reg9:
550 case DW_OP_reg10:
551 case DW_OP_reg11:
552 case DW_OP_reg12:
553 case DW_OP_reg13:
554 case DW_OP_reg14:
555 case DW_OP_reg15:
556 case DW_OP_reg16:
557 case DW_OP_reg17:
558 case DW_OP_reg18:
559 case DW_OP_reg19:
560 case DW_OP_reg20:
561 case DW_OP_reg21:
562 case DW_OP_reg22:
563 case DW_OP_reg23:
564 case DW_OP_reg24:
565 case DW_OP_reg25:
566 case DW_OP_reg26:
567 case DW_OP_reg27:
568 case DW_OP_reg28:
569 case DW_OP_reg29:
570 case DW_OP_reg30:
571 case DW_OP_reg31:
572 cfa->reg = op - DW_OP_reg0;
573 break;
574 case DW_OP_regx:
575 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
576 break;
577 case DW_OP_breg0:
578 case DW_OP_breg1:
579 case DW_OP_breg2:
580 case DW_OP_breg3:
581 case DW_OP_breg4:
582 case DW_OP_breg5:
583 case DW_OP_breg6:
584 case DW_OP_breg7:
585 case DW_OP_breg8:
586 case DW_OP_breg9:
587 case DW_OP_breg10:
588 case DW_OP_breg11:
589 case DW_OP_breg12:
590 case DW_OP_breg13:
591 case DW_OP_breg14:
592 case DW_OP_breg15:
593 case DW_OP_breg16:
594 case DW_OP_breg17:
595 case DW_OP_breg18:
596 case DW_OP_breg19:
597 case DW_OP_breg20:
598 case DW_OP_breg21:
599 case DW_OP_breg22:
600 case DW_OP_breg23:
601 case DW_OP_breg24:
602 case DW_OP_breg25:
603 case DW_OP_breg26:
604 case DW_OP_breg27:
605 case DW_OP_breg28:
606 case DW_OP_breg29:
607 case DW_OP_breg30:
608 case DW_OP_breg31:
609 cfa->reg = op - DW_OP_breg0;
610 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
611 break;
612 case DW_OP_bregx:
613 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
614 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
615 break;
616 case DW_OP_deref:
617 cfa->indirect = 1;
618 break;
619 case DW_OP_plus_uconst:
620 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
621 break;
622 default:
623 gcc_unreachable ();
624 }
625 }
626 }
627
628 /* Find the previous value for the CFA, iteratively. CFI is the opcode
629 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
630 one level of remember/restore state processing. */
631
632 void
633 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
634 {
635 switch (cfi->dw_cfi_opc)
636 {
637 case DW_CFA_def_cfa_offset:
638 case DW_CFA_def_cfa_offset_sf:
639 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
640 break;
641 case DW_CFA_def_cfa_register:
642 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
643 break;
644 case DW_CFA_def_cfa:
645 case DW_CFA_def_cfa_sf:
646 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
647 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
648 break;
649 case DW_CFA_def_cfa_expression:
650 if (cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc)
651 *loc = *cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc;
652 else
653 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
654 break;
655
656 case DW_CFA_remember_state:
657 gcc_assert (!remember->in_use);
658 *remember = *loc;
659 remember->in_use = 1;
660 break;
661 case DW_CFA_restore_state:
662 gcc_assert (remember->in_use);
663 *loc = *remember;
664 remember->in_use = 0;
665 break;
666
667 default:
668 break;
669 }
670 }
671
672 /* Determine if two dw_cfa_location structures define the same data. */
673
674 bool
675 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
676 {
677 return (loc1->reg == loc2->reg
678 && known_eq (loc1->offset, loc2->offset)
679 && loc1->indirect == loc2->indirect
680 && (loc1->indirect == 0
681 || known_eq (loc1->base_offset, loc2->base_offset)));
682 }
683
684 /* Determine if two CFI operands are identical. */
685
686 static bool
687 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
688 {
689 switch (t)
690 {
691 case dw_cfi_oprnd_unused:
692 return true;
693 case dw_cfi_oprnd_reg_num:
694 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
695 case dw_cfi_oprnd_offset:
696 return a->dw_cfi_offset == b->dw_cfi_offset;
697 case dw_cfi_oprnd_addr:
698 return (a->dw_cfi_addr == b->dw_cfi_addr
699 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
700 case dw_cfi_oprnd_loc:
701 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
702 case dw_cfi_oprnd_cfa_loc:
703 return cfa_equal_p (a->dw_cfi_cfa_loc, b->dw_cfi_cfa_loc);
704 }
705 gcc_unreachable ();
706 }
707
708 /* Determine if two CFI entries are identical. */
709
710 static bool
711 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
712 {
713 enum dwarf_call_frame_info opc;
714
715 /* Make things easier for our callers, including missing operands. */
716 if (a == b)
717 return true;
718 if (a == NULL || b == NULL)
719 return false;
720
721 /* Obviously, the opcodes must match. */
722 opc = a->dw_cfi_opc;
723 if (opc != b->dw_cfi_opc)
724 return false;
725
726 /* Compare the two operands, re-using the type of the operands as
727 already exposed elsewhere. */
728 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
729 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
730 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
731 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
732 }
733
734 /* Determine if two CFI_ROW structures are identical. */
735
736 static bool
737 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
738 {
739 size_t i, n_a, n_b, n_max;
740
741 if (a->cfa_cfi)
742 {
743 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
744 return false;
745 }
746 else if (!cfa_equal_p (&a->cfa, &b->cfa))
747 return false;
748
749 n_a = vec_safe_length (a->reg_save);
750 n_b = vec_safe_length (b->reg_save);
751 n_max = MAX (n_a, n_b);
752
753 for (i = 0; i < n_max; ++i)
754 {
755 dw_cfi_ref r_a = NULL, r_b = NULL;
756
757 if (i < n_a)
758 r_a = (*a->reg_save)[i];
759 if (i < n_b)
760 r_b = (*b->reg_save)[i];
761
762 if (!cfi_equal_p (r_a, r_b))
763 return false;
764 }
765
766 return true;
767 }
768
769 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
770 what opcode to emit. Returns the CFI opcode to effect the change, or
771 NULL if NEW_CFA == OLD_CFA. */
772
773 static dw_cfi_ref
774 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
775 {
776 dw_cfi_ref cfi;
777
778 /* If nothing changed, no need to issue any call frame instructions. */
779 if (cfa_equal_p (old_cfa, new_cfa))
780 return NULL;
781
782 cfi = new_cfi ();
783
784 HOST_WIDE_INT const_offset;
785 if (new_cfa->reg == old_cfa->reg
786 && !new_cfa->indirect
787 && !old_cfa->indirect
788 && new_cfa->offset.is_constant (&const_offset))
789 {
790 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
791 the CFA register did not change but the offset did. The data
792 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
793 in the assembler via the .cfi_def_cfa_offset directive. */
794 if (const_offset < 0)
795 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
796 else
797 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
798 cfi->dw_cfi_oprnd1.dw_cfi_offset = const_offset;
799 }
800 else if (new_cfa->offset.is_constant ()
801 && known_eq (new_cfa->offset, old_cfa->offset)
802 && old_cfa->reg != INVALID_REGNUM
803 && !new_cfa->indirect
804 && !old_cfa->indirect)
805 {
806 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
807 indicating the CFA register has changed to <register> but the
808 offset has not changed. This requires the old CFA to have
809 been set as a register plus offset rather than a general
810 DW_CFA_def_cfa_expression. */
811 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
812 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
813 }
814 else if (new_cfa->indirect == 0
815 && new_cfa->offset.is_constant (&const_offset))
816 {
817 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
818 indicating the CFA register has changed to <register> with
819 the specified offset. The data factoring for DW_CFA_def_cfa_sf
820 happens in output_cfi, or in the assembler via the .cfi_def_cfa
821 directive. */
822 if (const_offset < 0)
823 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
824 else
825 cfi->dw_cfi_opc = DW_CFA_def_cfa;
826 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
827 cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
828 }
829 else
830 {
831 /* Construct a DW_CFA_def_cfa_expression instruction to
832 calculate the CFA using a full location expression since no
833 register-offset pair is available. */
834 struct dw_loc_descr_node *loc_list;
835
836 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
837 loc_list = build_cfa_loc (new_cfa, 0);
838 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
839 if (!new_cfa->offset.is_constant ()
840 || !new_cfa->base_offset.is_constant ())
841 /* It's hard to reconstruct the CFA location for a polynomial
842 expression, so just cache it instead. */
843 cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = copy_cfa (new_cfa);
844 else
845 cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = NULL;
846 }
847
848 return cfi;
849 }
850
851 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
852
853 static void
854 def_cfa_1 (dw_cfa_location *new_cfa)
855 {
856 dw_cfi_ref cfi;
857
858 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
859 cur_trace->cfa_store.offset = new_cfa->offset;
860
861 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
862 if (cfi)
863 {
864 cur_row->cfa = *new_cfa;
865 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
866 ? cfi : NULL);
867
868 add_cfi (cfi);
869 }
870 }
871
872 /* Add the CFI for saving a register. REG is the CFA column number.
873 If SREG is -1, the register is saved at OFFSET from the CFA;
874 otherwise it is saved in SREG. */
875
876 static void
877 reg_save (unsigned int reg, unsigned int sreg, poly_int64 offset)
878 {
879 dw_fde_ref fde = cfun ? cfun->fde : NULL;
880 dw_cfi_ref cfi = new_cfi ();
881
882 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
883
884 if (sreg == INVALID_REGNUM)
885 {
886 HOST_WIDE_INT const_offset;
887 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
888 if (fde && fde->stack_realign)
889 {
890 cfi->dw_cfi_opc = DW_CFA_expression;
891 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
892 cfi->dw_cfi_oprnd2.dw_cfi_loc
893 = build_cfa_aligned_loc (&cur_row->cfa, offset,
894 fde->stack_realignment);
895 }
896 else if (offset.is_constant (&const_offset))
897 {
898 if (need_data_align_sf_opcode (const_offset))
899 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
900 else if (reg & ~0x3f)
901 cfi->dw_cfi_opc = DW_CFA_offset_extended;
902 else
903 cfi->dw_cfi_opc = DW_CFA_offset;
904 cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
905 }
906 else
907 {
908 cfi->dw_cfi_opc = DW_CFA_expression;
909 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
910 cfi->dw_cfi_oprnd2.dw_cfi_loc
911 = build_cfa_loc (&cur_row->cfa, offset);
912 }
913 }
914 else if (sreg == reg)
915 {
916 /* While we could emit something like DW_CFA_same_value or
917 DW_CFA_restore, we never expect to see something like that
918 in a prologue. This is more likely to be a bug. A backend
919 can always bypass this by using REG_CFA_RESTORE directly. */
920 gcc_unreachable ();
921 }
922 else
923 {
924 cfi->dw_cfi_opc = DW_CFA_register;
925 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
926 }
927
928 add_cfi (cfi);
929 update_row_reg_save (cur_row, reg, cfi);
930 }
931
932 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
933 and adjust data structures to match. */
934
935 static void
936 notice_args_size (rtx_insn *insn)
937 {
938 poly_int64 args_size, delta;
939 rtx note;
940
941 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
942 if (note == NULL)
943 return;
944
945 args_size = get_args_size (note);
946 delta = args_size - cur_trace->end_true_args_size;
947 if (known_eq (delta, 0))
948 return;
949
950 cur_trace->end_true_args_size = args_size;
951
952 /* If the CFA is computed off the stack pointer, then we must adjust
953 the computation of the CFA as well. */
954 if (cur_cfa->reg == dw_stack_pointer_regnum)
955 {
956 gcc_assert (!cur_cfa->indirect);
957
958 /* Convert a change in args_size (always a positive in the
959 direction of stack growth) to a change in stack pointer. */
960 if (!STACK_GROWS_DOWNWARD)
961 delta = -delta;
962
963 cur_cfa->offset += delta;
964 }
965 }
966
967 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
968 data within the trace related to EH insns and args_size. */
969
970 static void
971 notice_eh_throw (rtx_insn *insn)
972 {
973 poly_int64 args_size = cur_trace->end_true_args_size;
974 if (cur_trace->eh_head == NULL)
975 {
976 cur_trace->eh_head = insn;
977 cur_trace->beg_delay_args_size = args_size;
978 cur_trace->end_delay_args_size = args_size;
979 }
980 else if (maybe_ne (cur_trace->end_delay_args_size, args_size))
981 {
982 cur_trace->end_delay_args_size = args_size;
983
984 /* ??? If the CFA is the stack pointer, search backward for the last
985 CFI note and insert there. Given that the stack changed for the
986 args_size change, there *must* be such a note in between here and
987 the last eh insn. */
988 add_cfi_args_size (args_size);
989 }
990 }
991
992 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
993 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
994 used in places where rtl is prohibited. */
995
996 static inline unsigned
997 dwf_regno (const_rtx reg)
998 {
999 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
1000 return DWARF_FRAME_REGNUM (REGNO (reg));
1001 }
1002
1003 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1004
1005 static bool
1006 compare_reg_or_pc (rtx x, rtx y)
1007 {
1008 if (REG_P (x) && REG_P (y))
1009 return REGNO (x) == REGNO (y);
1010 return x == y;
1011 }
1012
1013 /* Record SRC as being saved in DEST. DEST may be null to delete an
1014 existing entry. SRC may be a register or PC_RTX. */
1015
1016 static void
1017 record_reg_saved_in_reg (rtx dest, rtx src)
1018 {
1019 reg_saved_in_data *elt;
1020 size_t i;
1021
1022 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
1023 if (compare_reg_or_pc (elt->orig_reg, src))
1024 {
1025 if (dest == NULL)
1026 cur_trace->regs_saved_in_regs.unordered_remove (i);
1027 else
1028 elt->saved_in_reg = dest;
1029 return;
1030 }
1031
1032 if (dest == NULL)
1033 return;
1034
1035 reg_saved_in_data e = {src, dest};
1036 cur_trace->regs_saved_in_regs.safe_push (e);
1037 }
1038
1039 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1040 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1041
1042 static void
1043 queue_reg_save (rtx reg, rtx sreg, poly_int64 offset)
1044 {
1045 queued_reg_save *q;
1046 queued_reg_save e = {reg, sreg, offset};
1047 size_t i;
1048
1049 /* Duplicates waste space, but it's also necessary to remove them
1050 for correctness, since the queue gets output in reverse order. */
1051 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1052 if (compare_reg_or_pc (q->reg, reg))
1053 {
1054 *q = e;
1055 return;
1056 }
1057
1058 queued_reg_saves.safe_push (e);
1059 }
1060
1061 /* Output all the entries in QUEUED_REG_SAVES. */
1062
1063 static void
1064 dwarf2out_flush_queued_reg_saves (void)
1065 {
1066 queued_reg_save *q;
1067 size_t i;
1068
1069 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1070 {
1071 unsigned int reg, sreg;
1072
1073 record_reg_saved_in_reg (q->saved_reg, q->reg);
1074
1075 if (q->reg == pc_rtx)
1076 reg = DWARF_FRAME_RETURN_COLUMN;
1077 else
1078 reg = dwf_regno (q->reg);
1079 if (q->saved_reg)
1080 sreg = dwf_regno (q->saved_reg);
1081 else
1082 sreg = INVALID_REGNUM;
1083 reg_save (reg, sreg, q->cfa_offset);
1084 }
1085
1086 queued_reg_saves.truncate (0);
1087 }
1088
1089 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1090 location for? Or, does it clobber a register which we've previously
1091 said that some other register is saved in, and for which we now
1092 have a new location for? */
1093
1094 static bool
1095 clobbers_queued_reg_save (const_rtx insn)
1096 {
1097 queued_reg_save *q;
1098 size_t iq;
1099
1100 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1101 {
1102 size_t ir;
1103 reg_saved_in_data *rir;
1104
1105 if (modified_in_p (q->reg, insn))
1106 return true;
1107
1108 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1109 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1110 && modified_in_p (rir->saved_in_reg, insn))
1111 return true;
1112 }
1113
1114 return false;
1115 }
1116
1117 /* What register, if any, is currently saved in REG? */
1118
1119 static rtx
1120 reg_saved_in (rtx reg)
1121 {
1122 unsigned int regn = REGNO (reg);
1123 queued_reg_save *q;
1124 reg_saved_in_data *rir;
1125 size_t i;
1126
1127 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1128 if (q->saved_reg && regn == REGNO (q->saved_reg))
1129 return q->reg;
1130
1131 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1132 if (regn == REGNO (rir->saved_in_reg))
1133 return rir->orig_reg;
1134
1135 return NULL_RTX;
1136 }
1137
1138 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1139
1140 static void
1141 dwarf2out_frame_debug_def_cfa (rtx pat)
1142 {
1143 memset (cur_cfa, 0, sizeof (*cur_cfa));
1144
1145 pat = strip_offset (pat, &cur_cfa->offset);
1146 if (MEM_P (pat))
1147 {
1148 cur_cfa->indirect = 1;
1149 pat = strip_offset (XEXP (pat, 0), &cur_cfa->base_offset);
1150 }
1151 /* ??? If this fails, we could be calling into the _loc functions to
1152 define a full expression. So far no port does that. */
1153 gcc_assert (REG_P (pat));
1154 cur_cfa->reg = dwf_regno (pat);
1155 }
1156
1157 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1158
1159 static void
1160 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1161 {
1162 rtx src, dest;
1163
1164 gcc_assert (GET_CODE (pat) == SET);
1165 dest = XEXP (pat, 0);
1166 src = XEXP (pat, 1);
1167
1168 switch (GET_CODE (src))
1169 {
1170 case PLUS:
1171 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1172 cur_cfa->offset -= rtx_to_poly_int64 (XEXP (src, 1));
1173 break;
1174
1175 case REG:
1176 break;
1177
1178 default:
1179 gcc_unreachable ();
1180 }
1181
1182 cur_cfa->reg = dwf_regno (dest);
1183 gcc_assert (cur_cfa->indirect == 0);
1184 }
1185
1186 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1187
1188 static void
1189 dwarf2out_frame_debug_cfa_offset (rtx set)
1190 {
1191 poly_int64 offset;
1192 rtx src, addr, span;
1193 unsigned int sregno;
1194
1195 src = XEXP (set, 1);
1196 addr = XEXP (set, 0);
1197 gcc_assert (MEM_P (addr));
1198 addr = XEXP (addr, 0);
1199
1200 /* As documented, only consider extremely simple addresses. */
1201 switch (GET_CODE (addr))
1202 {
1203 case REG:
1204 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1205 offset = -cur_cfa->offset;
1206 break;
1207 case PLUS:
1208 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1209 offset = rtx_to_poly_int64 (XEXP (addr, 1)) - cur_cfa->offset;
1210 break;
1211 default:
1212 gcc_unreachable ();
1213 }
1214
1215 if (src == pc_rtx)
1216 {
1217 span = NULL;
1218 sregno = DWARF_FRAME_RETURN_COLUMN;
1219 }
1220 else
1221 {
1222 span = targetm.dwarf_register_span (src);
1223 sregno = dwf_regno (src);
1224 }
1225
1226 /* ??? We'd like to use queue_reg_save, but we need to come up with
1227 a different flushing heuristic for epilogues. */
1228 if (!span)
1229 reg_save (sregno, INVALID_REGNUM, offset);
1230 else
1231 {
1232 /* We have a PARALLEL describing where the contents of SRC live.
1233 Adjust the offset for each piece of the PARALLEL. */
1234 poly_int64 span_offset = offset;
1235
1236 gcc_assert (GET_CODE (span) == PARALLEL);
1237
1238 const int par_len = XVECLEN (span, 0);
1239 for (int par_index = 0; par_index < par_len; par_index++)
1240 {
1241 rtx elem = XVECEXP (span, 0, par_index);
1242 sregno = dwf_regno (src);
1243 reg_save (sregno, INVALID_REGNUM, span_offset);
1244 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1245 }
1246 }
1247 }
1248
1249 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1250
1251 static void
1252 dwarf2out_frame_debug_cfa_register (rtx set)
1253 {
1254 rtx src, dest;
1255 unsigned sregno, dregno;
1256
1257 src = XEXP (set, 1);
1258 dest = XEXP (set, 0);
1259
1260 record_reg_saved_in_reg (dest, src);
1261 if (src == pc_rtx)
1262 sregno = DWARF_FRAME_RETURN_COLUMN;
1263 else
1264 sregno = dwf_regno (src);
1265
1266 dregno = dwf_regno (dest);
1267
1268 /* ??? We'd like to use queue_reg_save, but we need to come up with
1269 a different flushing heuristic for epilogues. */
1270 reg_save (sregno, dregno, 0);
1271 }
1272
1273 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1274
1275 static void
1276 dwarf2out_frame_debug_cfa_expression (rtx set)
1277 {
1278 rtx src, dest, span;
1279 dw_cfi_ref cfi = new_cfi ();
1280 unsigned regno;
1281
1282 dest = SET_DEST (set);
1283 src = SET_SRC (set);
1284
1285 gcc_assert (REG_P (src));
1286 gcc_assert (MEM_P (dest));
1287
1288 span = targetm.dwarf_register_span (src);
1289 gcc_assert (!span);
1290
1291 regno = dwf_regno (src);
1292
1293 cfi->dw_cfi_opc = DW_CFA_expression;
1294 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1295 cfi->dw_cfi_oprnd2.dw_cfi_loc
1296 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1297 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1298
1299 /* ??? We'd like to use queue_reg_save, were the interface different,
1300 and, as above, we could manage flushing for epilogues. */
1301 add_cfi (cfi);
1302 update_row_reg_save (cur_row, regno, cfi);
1303 }
1304
1305 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1306 note. */
1307
1308 static void
1309 dwarf2out_frame_debug_cfa_val_expression (rtx set)
1310 {
1311 rtx dest = SET_DEST (set);
1312 gcc_assert (REG_P (dest));
1313
1314 rtx span = targetm.dwarf_register_span (dest);
1315 gcc_assert (!span);
1316
1317 rtx src = SET_SRC (set);
1318 dw_cfi_ref cfi = new_cfi ();
1319 cfi->dw_cfi_opc = DW_CFA_val_expression;
1320 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest);
1321 cfi->dw_cfi_oprnd2.dw_cfi_loc
1322 = mem_loc_descriptor (src, GET_MODE (src),
1323 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1324 add_cfi (cfi);
1325 update_row_reg_save (cur_row, dwf_regno (dest), cfi);
1326 }
1327
1328 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1329
1330 static void
1331 dwarf2out_frame_debug_cfa_restore (rtx reg)
1332 {
1333 gcc_assert (REG_P (reg));
1334
1335 rtx span = targetm.dwarf_register_span (reg);
1336 if (!span)
1337 {
1338 unsigned int regno = dwf_regno (reg);
1339 add_cfi_restore (regno);
1340 update_row_reg_save (cur_row, regno, NULL);
1341 }
1342 else
1343 {
1344 /* We have a PARALLEL describing where the contents of REG live.
1345 Restore the register for each piece of the PARALLEL. */
1346 gcc_assert (GET_CODE (span) == PARALLEL);
1347
1348 const int par_len = XVECLEN (span, 0);
1349 for (int par_index = 0; par_index < par_len; par_index++)
1350 {
1351 reg = XVECEXP (span, 0, par_index);
1352 gcc_assert (REG_P (reg));
1353 unsigned int regno = dwf_regno (reg);
1354 add_cfi_restore (regno);
1355 update_row_reg_save (cur_row, regno, NULL);
1356 }
1357 }
1358 }
1359
1360 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1361 ??? Perhaps we should note in the CIE where windows are saved (instead of
1362 assuming 0(cfa)) and what registers are in the window. */
1363
1364 static void
1365 dwarf2out_frame_debug_cfa_window_save (void)
1366 {
1367 dw_cfi_ref cfi = new_cfi ();
1368
1369 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1370 add_cfi (cfi);
1371 }
1372
1373 /* Record call frame debugging information for an expression EXPR,
1374 which either sets SP or FP (adjusting how we calculate the frame
1375 address) or saves a register to the stack or another register.
1376 LABEL indicates the address of EXPR.
1377
1378 This function encodes a state machine mapping rtxes to actions on
1379 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1380 users need not read the source code.
1381
1382 The High-Level Picture
1383
1384 Changes in the register we use to calculate the CFA: Currently we
1385 assume that if you copy the CFA register into another register, we
1386 should take the other one as the new CFA register; this seems to
1387 work pretty well. If it's wrong for some target, it's simple
1388 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1389
1390 Changes in the register we use for saving registers to the stack:
1391 This is usually SP, but not always. Again, we deduce that if you
1392 copy SP into another register (and SP is not the CFA register),
1393 then the new register is the one we will be using for register
1394 saves. This also seems to work.
1395
1396 Register saves: There's not much guesswork about this one; if
1397 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1398 register save, and the register used to calculate the destination
1399 had better be the one we think we're using for this purpose.
1400 It's also assumed that a copy from a call-saved register to another
1401 register is saving that register if RTX_FRAME_RELATED_P is set on
1402 that instruction. If the copy is from a call-saved register to
1403 the *same* register, that means that the register is now the same
1404 value as in the caller.
1405
1406 Except: If the register being saved is the CFA register, and the
1407 offset is nonzero, we are saving the CFA, so we assume we have to
1408 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1409 the intent is to save the value of SP from the previous frame.
1410
1411 In addition, if a register has previously been saved to a different
1412 register,
1413
1414 Invariants / Summaries of Rules
1415
1416 cfa current rule for calculating the CFA. It usually
1417 consists of a register and an offset. This is
1418 actually stored in *cur_cfa, but abbreviated
1419 for the purposes of this documentation.
1420 cfa_store register used by prologue code to save things to the stack
1421 cfa_store.offset is the offset from the value of
1422 cfa_store.reg to the actual CFA
1423 cfa_temp register holding an integral value. cfa_temp.offset
1424 stores the value, which will be used to adjust the
1425 stack pointer. cfa_temp is also used like cfa_store,
1426 to track stores to the stack via fp or a temp reg.
1427
1428 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1429 with cfa.reg as the first operand changes the cfa.reg and its
1430 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1431 cfa_temp.offset.
1432
1433 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1434 expression yielding a constant. This sets cfa_temp.reg
1435 and cfa_temp.offset.
1436
1437 Rule 5: Create a new register cfa_store used to save items to the
1438 stack.
1439
1440 Rules 10-14: Save a register to the stack. Define offset as the
1441 difference of the original location and cfa_store's
1442 location (or cfa_temp's location if cfa_temp is used).
1443
1444 Rules 16-20: If AND operation happens on sp in prologue, we assume
1445 stack is realigned. We will use a group of DW_OP_XXX
1446 expressions to represent the location of the stored
1447 register instead of CFA+offset.
1448
1449 The Rules
1450
1451 "{a,b}" indicates a choice of a xor b.
1452 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1453
1454 Rule 1:
1455 (set <reg1> <reg2>:cfa.reg)
1456 effects: cfa.reg = <reg1>
1457 cfa.offset unchanged
1458 cfa_temp.reg = <reg1>
1459 cfa_temp.offset = cfa.offset
1460
1461 Rule 2:
1462 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1463 {<const_int>,<reg>:cfa_temp.reg}))
1464 effects: cfa.reg = sp if fp used
1465 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1466 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1467 if cfa_store.reg==sp
1468
1469 Rule 3:
1470 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1471 effects: cfa.reg = fp
1472 cfa_offset += +/- <const_int>
1473
1474 Rule 4:
1475 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1476 constraints: <reg1> != fp
1477 <reg1> != sp
1478 effects: cfa.reg = <reg1>
1479 cfa_temp.reg = <reg1>
1480 cfa_temp.offset = cfa.offset
1481
1482 Rule 5:
1483 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1484 constraints: <reg1> != fp
1485 <reg1> != sp
1486 effects: cfa_store.reg = <reg1>
1487 cfa_store.offset = cfa.offset - cfa_temp.offset
1488
1489 Rule 6:
1490 (set <reg> <const_int>)
1491 effects: cfa_temp.reg = <reg>
1492 cfa_temp.offset = <const_int>
1493
1494 Rule 7:
1495 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1496 effects: cfa_temp.reg = <reg1>
1497 cfa_temp.offset |= <const_int>
1498
1499 Rule 8:
1500 (set <reg> (high <exp>))
1501 effects: none
1502
1503 Rule 9:
1504 (set <reg> (lo_sum <exp> <const_int>))
1505 effects: cfa_temp.reg = <reg>
1506 cfa_temp.offset = <const_int>
1507
1508 Rule 10:
1509 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1510 effects: cfa_store.offset -= <const_int>
1511 cfa.offset = cfa_store.offset if cfa.reg == sp
1512 cfa.reg = sp
1513 cfa.base_offset = -cfa_store.offset
1514
1515 Rule 11:
1516 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1517 effects: cfa_store.offset += -/+ mode_size(mem)
1518 cfa.offset = cfa_store.offset if cfa.reg == sp
1519 cfa.reg = sp
1520 cfa.base_offset = -cfa_store.offset
1521
1522 Rule 12:
1523 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1524
1525 <reg2>)
1526 effects: cfa.reg = <reg1>
1527 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1528
1529 Rule 13:
1530 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1531 effects: cfa.reg = <reg1>
1532 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1533
1534 Rule 14:
1535 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1536 effects: cfa.reg = <reg1>
1537 cfa.base_offset = -cfa_temp.offset
1538 cfa_temp.offset -= mode_size(mem)
1539
1540 Rule 15:
1541 (set <reg> {unspec, unspec_volatile})
1542 effects: target-dependent
1543
1544 Rule 16:
1545 (set sp (and: sp <const_int>))
1546 constraints: cfa_store.reg == sp
1547 effects: cfun->fde.stack_realign = 1
1548 cfa_store.offset = 0
1549 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1550
1551 Rule 17:
1552 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1553 effects: cfa_store.offset += -/+ mode_size(mem)
1554
1555 Rule 18:
1556 (set (mem ({pre_inc, pre_dec} sp)) fp)
1557 constraints: fde->stack_realign == 1
1558 effects: cfa_store.offset = 0
1559 cfa.reg != HARD_FRAME_POINTER_REGNUM
1560
1561 Rule 19:
1562 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1563 constraints: fde->stack_realign == 1
1564 && cfa.offset == 0
1565 && cfa.indirect == 0
1566 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1567 effects: Use DW_CFA_def_cfa_expression to define cfa
1568 cfa.reg == fde->drap_reg */
1569
1570 static void
1571 dwarf2out_frame_debug_expr (rtx expr)
1572 {
1573 rtx src, dest, span;
1574 poly_int64 offset;
1575 dw_fde_ref fde;
1576
1577 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1578 the PARALLEL independently. The first element is always processed if
1579 it is a SET. This is for backward compatibility. Other elements
1580 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1581 flag is set in them. */
1582 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1583 {
1584 int par_index;
1585 int limit = XVECLEN (expr, 0);
1586 rtx elem;
1587
1588 /* PARALLELs have strict read-modify-write semantics, so we
1589 ought to evaluate every rvalue before changing any lvalue.
1590 It's cumbersome to do that in general, but there's an
1591 easy approximation that is enough for all current users:
1592 handle register saves before register assignments. */
1593 if (GET_CODE (expr) == PARALLEL)
1594 for (par_index = 0; par_index < limit; par_index++)
1595 {
1596 elem = XVECEXP (expr, 0, par_index);
1597 if (GET_CODE (elem) == SET
1598 && MEM_P (SET_DEST (elem))
1599 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1600 dwarf2out_frame_debug_expr (elem);
1601 }
1602
1603 for (par_index = 0; par_index < limit; par_index++)
1604 {
1605 elem = XVECEXP (expr, 0, par_index);
1606 if (GET_CODE (elem) == SET
1607 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1608 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1609 dwarf2out_frame_debug_expr (elem);
1610 }
1611 return;
1612 }
1613
1614 gcc_assert (GET_CODE (expr) == SET);
1615
1616 src = SET_SRC (expr);
1617 dest = SET_DEST (expr);
1618
1619 if (REG_P (src))
1620 {
1621 rtx rsi = reg_saved_in (src);
1622 if (rsi)
1623 src = rsi;
1624 }
1625
1626 fde = cfun->fde;
1627
1628 switch (GET_CODE (dest))
1629 {
1630 case REG:
1631 switch (GET_CODE (src))
1632 {
1633 /* Setting FP from SP. */
1634 case REG:
1635 if (cur_cfa->reg == dwf_regno (src))
1636 {
1637 /* Rule 1 */
1638 /* Update the CFA rule wrt SP or FP. Make sure src is
1639 relative to the current CFA register.
1640
1641 We used to require that dest be either SP or FP, but the
1642 ARM copies SP to a temporary register, and from there to
1643 FP. So we just rely on the backends to only set
1644 RTX_FRAME_RELATED_P on appropriate insns. */
1645 cur_cfa->reg = dwf_regno (dest);
1646 cur_trace->cfa_temp.reg = cur_cfa->reg;
1647 cur_trace->cfa_temp.offset = cur_cfa->offset;
1648 }
1649 else
1650 {
1651 /* Saving a register in a register. */
1652 gcc_assert (!fixed_regs [REGNO (dest)]
1653 /* For the SPARC and its register window. */
1654 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1655
1656 /* After stack is aligned, we can only save SP in FP
1657 if drap register is used. In this case, we have
1658 to restore stack pointer with the CFA value and we
1659 don't generate this DWARF information. */
1660 if (fde
1661 && fde->stack_realign
1662 && REGNO (src) == STACK_POINTER_REGNUM)
1663 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1664 && fde->drap_reg != INVALID_REGNUM
1665 && cur_cfa->reg != dwf_regno (src));
1666 else
1667 queue_reg_save (src, dest, 0);
1668 }
1669 break;
1670
1671 case PLUS:
1672 case MINUS:
1673 case LO_SUM:
1674 if (dest == stack_pointer_rtx)
1675 {
1676 /* Rule 2 */
1677 /* Adjusting SP. */
1678 if (REG_P (XEXP (src, 1)))
1679 {
1680 gcc_assert (dwf_regno (XEXP (src, 1))
1681 == cur_trace->cfa_temp.reg);
1682 offset = cur_trace->cfa_temp.offset;
1683 }
1684 else if (!poly_int_rtx_p (XEXP (src, 1), &offset))
1685 gcc_unreachable ();
1686
1687 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1688 {
1689 /* Restoring SP from FP in the epilogue. */
1690 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1691 cur_cfa->reg = dw_stack_pointer_regnum;
1692 }
1693 else if (GET_CODE (src) == LO_SUM)
1694 /* Assume we've set the source reg of the LO_SUM from sp. */
1695 ;
1696 else
1697 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1698
1699 if (GET_CODE (src) != MINUS)
1700 offset = -offset;
1701 if (cur_cfa->reg == dw_stack_pointer_regnum)
1702 cur_cfa->offset += offset;
1703 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1704 cur_trace->cfa_store.offset += offset;
1705 }
1706 else if (dest == hard_frame_pointer_rtx)
1707 {
1708 /* Rule 3 */
1709 /* Either setting the FP from an offset of the SP,
1710 or adjusting the FP */
1711 gcc_assert (frame_pointer_needed);
1712
1713 gcc_assert (REG_P (XEXP (src, 0))
1714 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1715 offset = rtx_to_poly_int64 (XEXP (src, 1));
1716 if (GET_CODE (src) != MINUS)
1717 offset = -offset;
1718 cur_cfa->offset += offset;
1719 cur_cfa->reg = dw_frame_pointer_regnum;
1720 }
1721 else
1722 {
1723 gcc_assert (GET_CODE (src) != MINUS);
1724
1725 /* Rule 4 */
1726 if (REG_P (XEXP (src, 0))
1727 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1728 && poly_int_rtx_p (XEXP (src, 1), &offset))
1729 {
1730 /* Setting a temporary CFA register that will be copied
1731 into the FP later on. */
1732 offset = -offset;
1733 cur_cfa->offset += offset;
1734 cur_cfa->reg = dwf_regno (dest);
1735 /* Or used to save regs to the stack. */
1736 cur_trace->cfa_temp.reg = cur_cfa->reg;
1737 cur_trace->cfa_temp.offset = cur_cfa->offset;
1738 }
1739
1740 /* Rule 5 */
1741 else if (REG_P (XEXP (src, 0))
1742 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1743 && XEXP (src, 1) == stack_pointer_rtx)
1744 {
1745 /* Setting a scratch register that we will use instead
1746 of SP for saving registers to the stack. */
1747 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1748 cur_trace->cfa_store.reg = dwf_regno (dest);
1749 cur_trace->cfa_store.offset
1750 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1751 }
1752
1753 /* Rule 9 */
1754 else if (GET_CODE (src) == LO_SUM
1755 && poly_int_rtx_p (XEXP (src, 1),
1756 &cur_trace->cfa_temp.offset))
1757 cur_trace->cfa_temp.reg = dwf_regno (dest);
1758 else
1759 gcc_unreachable ();
1760 }
1761 break;
1762
1763 /* Rule 6 */
1764 case CONST_INT:
1765 case POLY_INT_CST:
1766 cur_trace->cfa_temp.reg = dwf_regno (dest);
1767 cur_trace->cfa_temp.offset = rtx_to_poly_int64 (src);
1768 break;
1769
1770 /* Rule 7 */
1771 case IOR:
1772 gcc_assert (REG_P (XEXP (src, 0))
1773 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1774 && CONST_INT_P (XEXP (src, 1)));
1775
1776 cur_trace->cfa_temp.reg = dwf_regno (dest);
1777 if (!can_ior_p (cur_trace->cfa_temp.offset, INTVAL (XEXP (src, 1)),
1778 &cur_trace->cfa_temp.offset))
1779 /* The target shouldn't generate this kind of CFI note if we
1780 can't represent it. */
1781 gcc_unreachable ();
1782 break;
1783
1784 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1785 which will fill in all of the bits. */
1786 /* Rule 8 */
1787 case HIGH:
1788 break;
1789
1790 /* Rule 15 */
1791 case UNSPEC:
1792 case UNSPEC_VOLATILE:
1793 /* All unspecs should be represented by REG_CFA_* notes. */
1794 gcc_unreachable ();
1795 return;
1796
1797 /* Rule 16 */
1798 case AND:
1799 /* If this AND operation happens on stack pointer in prologue,
1800 we assume the stack is realigned and we extract the
1801 alignment. */
1802 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1803 {
1804 /* We interpret reg_save differently with stack_realign set.
1805 Thus we must flush whatever we have queued first. */
1806 dwarf2out_flush_queued_reg_saves ();
1807
1808 gcc_assert (cur_trace->cfa_store.reg
1809 == dwf_regno (XEXP (src, 0)));
1810 fde->stack_realign = 1;
1811 fde->stack_realignment = INTVAL (XEXP (src, 1));
1812 cur_trace->cfa_store.offset = 0;
1813
1814 if (cur_cfa->reg != dw_stack_pointer_regnum
1815 && cur_cfa->reg != dw_frame_pointer_regnum)
1816 fde->drap_reg = cur_cfa->reg;
1817 }
1818 return;
1819
1820 default:
1821 gcc_unreachable ();
1822 }
1823 break;
1824
1825 case MEM:
1826
1827 /* Saving a register to the stack. Make sure dest is relative to the
1828 CFA register. */
1829 switch (GET_CODE (XEXP (dest, 0)))
1830 {
1831 /* Rule 10 */
1832 /* With a push. */
1833 case PRE_MODIFY:
1834 case POST_MODIFY:
1835 /* We can't handle variable size modifications. */
1836 offset = -rtx_to_poly_int64 (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1837
1838 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1839 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1840
1841 cur_trace->cfa_store.offset += offset;
1842 if (cur_cfa->reg == dw_stack_pointer_regnum)
1843 cur_cfa->offset = cur_trace->cfa_store.offset;
1844
1845 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1846 offset -= cur_trace->cfa_store.offset;
1847 else
1848 offset = -cur_trace->cfa_store.offset;
1849 break;
1850
1851 /* Rule 11 */
1852 case PRE_INC:
1853 case PRE_DEC:
1854 case POST_DEC:
1855 offset = GET_MODE_SIZE (GET_MODE (dest));
1856 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1857 offset = -offset;
1858
1859 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1860 == STACK_POINTER_REGNUM)
1861 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1862
1863 cur_trace->cfa_store.offset += offset;
1864
1865 /* Rule 18: If stack is aligned, we will use FP as a
1866 reference to represent the address of the stored
1867 regiser. */
1868 if (fde
1869 && fde->stack_realign
1870 && REG_P (src)
1871 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1872 {
1873 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1874 cur_trace->cfa_store.offset = 0;
1875 }
1876
1877 if (cur_cfa->reg == dw_stack_pointer_regnum)
1878 cur_cfa->offset = cur_trace->cfa_store.offset;
1879
1880 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1881 offset += -cur_trace->cfa_store.offset;
1882 else
1883 offset = -cur_trace->cfa_store.offset;
1884 break;
1885
1886 /* Rule 12 */
1887 /* With an offset. */
1888 case PLUS:
1889 case MINUS:
1890 case LO_SUM:
1891 {
1892 unsigned int regno;
1893
1894 gcc_assert (REG_P (XEXP (XEXP (dest, 0), 0)));
1895 offset = rtx_to_poly_int64 (XEXP (XEXP (dest, 0), 1));
1896 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1897 offset = -offset;
1898
1899 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1900
1901 if (cur_cfa->reg == regno)
1902 offset -= cur_cfa->offset;
1903 else if (cur_trace->cfa_store.reg == regno)
1904 offset -= cur_trace->cfa_store.offset;
1905 else
1906 {
1907 gcc_assert (cur_trace->cfa_temp.reg == regno);
1908 offset -= cur_trace->cfa_temp.offset;
1909 }
1910 }
1911 break;
1912
1913 /* Rule 13 */
1914 /* Without an offset. */
1915 case REG:
1916 {
1917 unsigned int regno = dwf_regno (XEXP (dest, 0));
1918
1919 if (cur_cfa->reg == regno)
1920 offset = -cur_cfa->offset;
1921 else if (cur_trace->cfa_store.reg == regno)
1922 offset = -cur_trace->cfa_store.offset;
1923 else
1924 {
1925 gcc_assert (cur_trace->cfa_temp.reg == regno);
1926 offset = -cur_trace->cfa_temp.offset;
1927 }
1928 }
1929 break;
1930
1931 /* Rule 14 */
1932 case POST_INC:
1933 gcc_assert (cur_trace->cfa_temp.reg
1934 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1935 offset = -cur_trace->cfa_temp.offset;
1936 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1937 break;
1938
1939 default:
1940 gcc_unreachable ();
1941 }
1942
1943 /* Rule 17 */
1944 /* If the source operand of this MEM operation is a memory,
1945 we only care how much stack grew. */
1946 if (MEM_P (src))
1947 break;
1948
1949 if (REG_P (src)
1950 && REGNO (src) != STACK_POINTER_REGNUM
1951 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1952 && dwf_regno (src) == cur_cfa->reg)
1953 {
1954 /* We're storing the current CFA reg into the stack. */
1955
1956 if (known_eq (cur_cfa->offset, 0))
1957 {
1958 /* Rule 19 */
1959 /* If stack is aligned, putting CFA reg into stack means
1960 we can no longer use reg + offset to represent CFA.
1961 Here we use DW_CFA_def_cfa_expression instead. The
1962 result of this expression equals to the original CFA
1963 value. */
1964 if (fde
1965 && fde->stack_realign
1966 && cur_cfa->indirect == 0
1967 && cur_cfa->reg != dw_frame_pointer_regnum)
1968 {
1969 gcc_assert (fde->drap_reg == cur_cfa->reg);
1970
1971 cur_cfa->indirect = 1;
1972 cur_cfa->reg = dw_frame_pointer_regnum;
1973 cur_cfa->base_offset = offset;
1974 cur_cfa->offset = 0;
1975
1976 fde->drap_reg_saved = 1;
1977 break;
1978 }
1979
1980 /* If the source register is exactly the CFA, assume
1981 we're saving SP like any other register; this happens
1982 on the ARM. */
1983 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1984 break;
1985 }
1986 else
1987 {
1988 /* Otherwise, we'll need to look in the stack to
1989 calculate the CFA. */
1990 rtx x = XEXP (dest, 0);
1991
1992 if (!REG_P (x))
1993 x = XEXP (x, 0);
1994 gcc_assert (REG_P (x));
1995
1996 cur_cfa->reg = dwf_regno (x);
1997 cur_cfa->base_offset = offset;
1998 cur_cfa->indirect = 1;
1999 break;
2000 }
2001 }
2002
2003 if (REG_P (src))
2004 span = targetm.dwarf_register_span (src);
2005 else
2006 span = NULL;
2007
2008 if (!span)
2009 queue_reg_save (src, NULL_RTX, offset);
2010 else
2011 {
2012 /* We have a PARALLEL describing where the contents of SRC live.
2013 Queue register saves for each piece of the PARALLEL. */
2014 poly_int64 span_offset = offset;
2015
2016 gcc_assert (GET_CODE (span) == PARALLEL);
2017
2018 const int par_len = XVECLEN (span, 0);
2019 for (int par_index = 0; par_index < par_len; par_index++)
2020 {
2021 rtx elem = XVECEXP (span, 0, par_index);
2022 queue_reg_save (elem, NULL_RTX, span_offset);
2023 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2024 }
2025 }
2026 break;
2027
2028 default:
2029 gcc_unreachable ();
2030 }
2031 }
2032
2033 /* Record call frame debugging information for INSN, which either sets
2034 SP or FP (adjusting how we calculate the frame address) or saves a
2035 register to the stack. */
2036
2037 static void
2038 dwarf2out_frame_debug (rtx_insn *insn)
2039 {
2040 rtx note, n, pat;
2041 bool handled_one = false;
2042
2043 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2044 switch (REG_NOTE_KIND (note))
2045 {
2046 case REG_FRAME_RELATED_EXPR:
2047 pat = XEXP (note, 0);
2048 goto do_frame_expr;
2049
2050 case REG_CFA_DEF_CFA:
2051 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2052 handled_one = true;
2053 break;
2054
2055 case REG_CFA_ADJUST_CFA:
2056 n = XEXP (note, 0);
2057 if (n == NULL)
2058 {
2059 n = PATTERN (insn);
2060 if (GET_CODE (n) == PARALLEL)
2061 n = XVECEXP (n, 0, 0);
2062 }
2063 dwarf2out_frame_debug_adjust_cfa (n);
2064 handled_one = true;
2065 break;
2066
2067 case REG_CFA_OFFSET:
2068 n = XEXP (note, 0);
2069 if (n == NULL)
2070 n = single_set (insn);
2071 dwarf2out_frame_debug_cfa_offset (n);
2072 handled_one = true;
2073 break;
2074
2075 case REG_CFA_REGISTER:
2076 n = XEXP (note, 0);
2077 if (n == NULL)
2078 {
2079 n = PATTERN (insn);
2080 if (GET_CODE (n) == PARALLEL)
2081 n = XVECEXP (n, 0, 0);
2082 }
2083 dwarf2out_frame_debug_cfa_register (n);
2084 handled_one = true;
2085 break;
2086
2087 case REG_CFA_EXPRESSION:
2088 case REG_CFA_VAL_EXPRESSION:
2089 n = XEXP (note, 0);
2090 if (n == NULL)
2091 n = single_set (insn);
2092
2093 if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION)
2094 dwarf2out_frame_debug_cfa_expression (n);
2095 else
2096 dwarf2out_frame_debug_cfa_val_expression (n);
2097
2098 handled_one = true;
2099 break;
2100
2101 case REG_CFA_RESTORE:
2102 n = XEXP (note, 0);
2103 if (n == NULL)
2104 {
2105 n = PATTERN (insn);
2106 if (GET_CODE (n) == PARALLEL)
2107 n = XVECEXP (n, 0, 0);
2108 n = XEXP (n, 0);
2109 }
2110 dwarf2out_frame_debug_cfa_restore (n);
2111 handled_one = true;
2112 break;
2113
2114 case REG_CFA_SET_VDRAP:
2115 n = XEXP (note, 0);
2116 if (REG_P (n))
2117 {
2118 dw_fde_ref fde = cfun->fde;
2119 if (fde)
2120 {
2121 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2122 if (REG_P (n))
2123 fde->vdrap_reg = dwf_regno (n);
2124 }
2125 }
2126 handled_one = true;
2127 break;
2128
2129 case REG_CFA_TOGGLE_RA_MANGLE:
2130 case REG_CFA_WINDOW_SAVE:
2131 /* We overload both of these operations onto the same DWARF opcode. */
2132 dwarf2out_frame_debug_cfa_window_save ();
2133 handled_one = true;
2134 break;
2135
2136 case REG_CFA_FLUSH_QUEUE:
2137 /* The actual flush happens elsewhere. */
2138 handled_one = true;
2139 break;
2140
2141 default:
2142 break;
2143 }
2144
2145 if (!handled_one)
2146 {
2147 pat = PATTERN (insn);
2148 do_frame_expr:
2149 dwarf2out_frame_debug_expr (pat);
2150
2151 /* Check again. A parallel can save and update the same register.
2152 We could probably check just once, here, but this is safer than
2153 removing the check at the start of the function. */
2154 if (clobbers_queued_reg_save (pat))
2155 dwarf2out_flush_queued_reg_saves ();
2156 }
2157 }
2158
2159 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2160
2161 static void
2162 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2163 {
2164 size_t i, n_old, n_new, n_max;
2165 dw_cfi_ref cfi;
2166
2167 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2168 add_cfi (new_row->cfa_cfi);
2169 else
2170 {
2171 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2172 if (cfi)
2173 add_cfi (cfi);
2174 }
2175
2176 n_old = vec_safe_length (old_row->reg_save);
2177 n_new = vec_safe_length (new_row->reg_save);
2178 n_max = MAX (n_old, n_new);
2179
2180 for (i = 0; i < n_max; ++i)
2181 {
2182 dw_cfi_ref r_old = NULL, r_new = NULL;
2183
2184 if (i < n_old)
2185 r_old = (*old_row->reg_save)[i];
2186 if (i < n_new)
2187 r_new = (*new_row->reg_save)[i];
2188
2189 if (r_old == r_new)
2190 ;
2191 else if (r_new == NULL)
2192 add_cfi_restore (i);
2193 else if (!cfi_equal_p (r_old, r_new))
2194 add_cfi (r_new);
2195 }
2196 }
2197
2198 /* Examine CFI and return true if a cfi label and set_loc is needed
2199 beforehand. Even when generating CFI assembler instructions, we
2200 still have to add the cfi to the list so that lookup_cfa_1 works
2201 later on. When -g2 and above we even need to force emitting of
2202 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2203 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2204 and so don't use convert_cfa_to_fb_loc_list. */
2205
2206 static bool
2207 cfi_label_required_p (dw_cfi_ref cfi)
2208 {
2209 if (!dwarf2out_do_cfi_asm ())
2210 return true;
2211
2212 if (dwarf_version == 2
2213 && debug_info_level > DINFO_LEVEL_TERSE
2214 && (write_symbols == DWARF2_DEBUG
2215 || write_symbols == VMS_AND_DWARF2_DEBUG))
2216 {
2217 switch (cfi->dw_cfi_opc)
2218 {
2219 case DW_CFA_def_cfa_offset:
2220 case DW_CFA_def_cfa_offset_sf:
2221 case DW_CFA_def_cfa_register:
2222 case DW_CFA_def_cfa:
2223 case DW_CFA_def_cfa_sf:
2224 case DW_CFA_def_cfa_expression:
2225 case DW_CFA_restore_state:
2226 return true;
2227 default:
2228 return false;
2229 }
2230 }
2231 return false;
2232 }
2233
2234 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2235 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2236 necessary. */
2237 static void
2238 add_cfis_to_fde (void)
2239 {
2240 dw_fde_ref fde = cfun->fde;
2241 rtx_insn *insn, *next;
2242
2243 for (insn = get_insns (); insn; insn = next)
2244 {
2245 next = NEXT_INSN (insn);
2246
2247 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2248 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2249
2250 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2251 {
2252 bool required = cfi_label_required_p (NOTE_CFI (insn));
2253 while (next)
2254 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2255 {
2256 required |= cfi_label_required_p (NOTE_CFI (next));
2257 next = NEXT_INSN (next);
2258 }
2259 else if (active_insn_p (next)
2260 || (NOTE_P (next) && (NOTE_KIND (next)
2261 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2262 break;
2263 else
2264 next = NEXT_INSN (next);
2265 if (required)
2266 {
2267 int num = dwarf2out_cfi_label_num;
2268 const char *label = dwarf2out_cfi_label ();
2269 dw_cfi_ref xcfi;
2270
2271 /* Set the location counter to the new label. */
2272 xcfi = new_cfi ();
2273 xcfi->dw_cfi_opc = DW_CFA_advance_loc4;
2274 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2275 vec_safe_push (fde->dw_fde_cfi, xcfi);
2276
2277 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2278 NOTE_LABEL_NUMBER (tmp) = num;
2279 }
2280
2281 do
2282 {
2283 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2284 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2285 insn = NEXT_INSN (insn);
2286 }
2287 while (insn != next);
2288 }
2289 }
2290 }
2291
2292 static void dump_cfi_row (FILE *f, dw_cfi_row *row);
2293
2294 /* If LABEL is the start of a trace, then initialize the state of that
2295 trace from CUR_TRACE and CUR_ROW. */
2296
2297 static void
2298 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2299 {
2300 dw_trace_info *ti;
2301
2302 ti = get_trace_info (start);
2303 gcc_assert (ti != NULL);
2304
2305 if (dump_file)
2306 {
2307 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2308 cur_trace->id, ti->id,
2309 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2310 (origin ? INSN_UID (origin) : 0));
2311 }
2312
2313 poly_int64 args_size = cur_trace->end_true_args_size;
2314 if (ti->beg_row == NULL)
2315 {
2316 /* This is the first time we've encountered this trace. Propagate
2317 state across the edge and push the trace onto the work list. */
2318 ti->beg_row = copy_cfi_row (cur_row);
2319 ti->beg_true_args_size = args_size;
2320
2321 ti->cfa_store = cur_trace->cfa_store;
2322 ti->cfa_temp = cur_trace->cfa_temp;
2323 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2324
2325 trace_work_list.safe_push (ti);
2326
2327 if (dump_file)
2328 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2329 }
2330 else
2331 {
2332
2333 /* We ought to have the same state incoming to a given trace no
2334 matter how we arrive at the trace. Anything else means we've
2335 got some kind of optimization error. */
2336 #if CHECKING_P
2337 if (!cfi_row_equal_p (cur_row, ti->beg_row))
2338 {
2339 if (dump_file)
2340 {
2341 fprintf (dump_file, "Inconsistent CFI state!\n");
2342 fprintf (dump_file, "SHOULD have:\n");
2343 dump_cfi_row (dump_file, ti->beg_row);
2344 fprintf (dump_file, "DO have:\n");
2345 dump_cfi_row (dump_file, cur_row);
2346 }
2347
2348 gcc_unreachable ();
2349 }
2350 #endif
2351
2352 /* The args_size is allowed to conflict if it isn't actually used. */
2353 if (maybe_ne (ti->beg_true_args_size, args_size))
2354 ti->args_size_undefined = true;
2355 }
2356 }
2357
2358 /* Similarly, but handle the args_size and CFA reset across EH
2359 and non-local goto edges. */
2360
2361 static void
2362 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2363 {
2364 poly_int64 save_args_size, delta;
2365 dw_cfa_location save_cfa;
2366
2367 save_args_size = cur_trace->end_true_args_size;
2368 if (known_eq (save_args_size, 0))
2369 {
2370 maybe_record_trace_start (start, origin);
2371 return;
2372 }
2373
2374 delta = -save_args_size;
2375 cur_trace->end_true_args_size = 0;
2376
2377 save_cfa = cur_row->cfa;
2378 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2379 {
2380 /* Convert a change in args_size (always a positive in the
2381 direction of stack growth) to a change in stack pointer. */
2382 if (!STACK_GROWS_DOWNWARD)
2383 delta = -delta;
2384
2385 cur_row->cfa.offset += delta;
2386 }
2387
2388 maybe_record_trace_start (start, origin);
2389
2390 cur_trace->end_true_args_size = save_args_size;
2391 cur_row->cfa = save_cfa;
2392 }
2393
2394 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2395 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2396
2397 static void
2398 create_trace_edges (rtx_insn *insn)
2399 {
2400 rtx tmp;
2401 int i, n;
2402
2403 if (JUMP_P (insn))
2404 {
2405 rtx_jump_table_data *table;
2406
2407 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2408 return;
2409
2410 if (tablejump_p (insn, NULL, &table))
2411 {
2412 rtvec vec = table->get_labels ();
2413
2414 n = GET_NUM_ELEM (vec);
2415 for (i = 0; i < n; ++i)
2416 {
2417 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2418 maybe_record_trace_start (lab, insn);
2419 }
2420 }
2421 else if (computed_jump_p (insn))
2422 {
2423 rtx_insn *temp;
2424 unsigned int i;
2425 FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
2426 maybe_record_trace_start (temp, insn);
2427 }
2428 else if (returnjump_p (insn))
2429 ;
2430 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2431 {
2432 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2433 for (i = 0; i < n; ++i)
2434 {
2435 rtx_insn *lab =
2436 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2437 maybe_record_trace_start (lab, insn);
2438 }
2439 }
2440 else
2441 {
2442 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2443 gcc_assert (lab != NULL);
2444 maybe_record_trace_start (lab, insn);
2445 }
2446 }
2447 else if (CALL_P (insn))
2448 {
2449 /* Sibling calls don't have edges inside this function. */
2450 if (SIBLING_CALL_P (insn))
2451 return;
2452
2453 /* Process non-local goto edges. */
2454 if (can_nonlocal_goto (insn))
2455 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2456 lab;
2457 lab = lab->next ())
2458 maybe_record_trace_start_abnormal (lab->insn (), insn);
2459 }
2460 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2461 {
2462 int i, n = seq->len ();
2463 for (i = 0; i < n; ++i)
2464 create_trace_edges (seq->insn (i));
2465 return;
2466 }
2467
2468 /* Process EH edges. */
2469 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2470 {
2471 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2472 if (lp)
2473 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2474 }
2475 }
2476
2477 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2478
2479 static void
2480 scan_insn_after (rtx_insn *insn)
2481 {
2482 if (RTX_FRAME_RELATED_P (insn))
2483 dwarf2out_frame_debug (insn);
2484 notice_args_size (insn);
2485 }
2486
2487 /* Scan the trace beginning at INSN and create the CFI notes for the
2488 instructions therein. */
2489
2490 static void
2491 scan_trace (dw_trace_info *trace, bool entry)
2492 {
2493 rtx_insn *prev, *insn = trace->head;
2494 dw_cfa_location this_cfa;
2495
2496 if (dump_file)
2497 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2498 trace->id, rtx_name[(int) GET_CODE (insn)],
2499 INSN_UID (insn));
2500
2501 trace->end_row = copy_cfi_row (trace->beg_row);
2502 trace->end_true_args_size = trace->beg_true_args_size;
2503
2504 cur_trace = trace;
2505 cur_row = trace->end_row;
2506
2507 this_cfa = cur_row->cfa;
2508 cur_cfa = &this_cfa;
2509
2510 /* If the current function starts with a non-standard incoming frame
2511 sp offset, emit a note before the first instruction. */
2512 if (entry
2513 && DEFAULT_INCOMING_FRAME_SP_OFFSET != INCOMING_FRAME_SP_OFFSET)
2514 {
2515 add_cfi_insn = insn;
2516 gcc_assert (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED);
2517 this_cfa.offset = INCOMING_FRAME_SP_OFFSET;
2518 def_cfa_1 (&this_cfa);
2519 }
2520
2521 for (prev = insn, insn = NEXT_INSN (insn);
2522 insn;
2523 prev = insn, insn = NEXT_INSN (insn))
2524 {
2525 rtx_insn *control;
2526
2527 /* Do everything that happens "before" the insn. */
2528 add_cfi_insn = prev;
2529
2530 /* Notice the end of a trace. */
2531 if (BARRIER_P (insn))
2532 {
2533 /* Don't bother saving the unneeded queued registers at all. */
2534 queued_reg_saves.truncate (0);
2535 break;
2536 }
2537 if (save_point_p (insn))
2538 {
2539 /* Propagate across fallthru edges. */
2540 dwarf2out_flush_queued_reg_saves ();
2541 maybe_record_trace_start (insn, NULL);
2542 break;
2543 }
2544
2545 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2546 continue;
2547
2548 /* Handle all changes to the row state. Sequences require special
2549 handling for the positioning of the notes. */
2550 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2551 {
2552 rtx_insn *elt;
2553 int i, n = pat->len ();
2554
2555 control = pat->insn (0);
2556 if (can_throw_internal (control))
2557 notice_eh_throw (control);
2558 dwarf2out_flush_queued_reg_saves ();
2559
2560 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2561 {
2562 /* ??? Hopefully multiple delay slots are not annulled. */
2563 gcc_assert (n == 2);
2564 gcc_assert (!RTX_FRAME_RELATED_P (control));
2565 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2566
2567 elt = pat->insn (1);
2568
2569 if (INSN_FROM_TARGET_P (elt))
2570 {
2571 cfi_vec save_row_reg_save;
2572
2573 /* If ELT is an instruction from target of an annulled
2574 branch, the effects are for the target only and so
2575 the args_size and CFA along the current path
2576 shouldn't change. */
2577 add_cfi_insn = NULL;
2578 poly_int64 restore_args_size = cur_trace->end_true_args_size;
2579 cur_cfa = &cur_row->cfa;
2580 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2581
2582 scan_insn_after (elt);
2583
2584 /* ??? Should we instead save the entire row state? */
2585 gcc_assert (!queued_reg_saves.length ());
2586
2587 create_trace_edges (control);
2588
2589 cur_trace->end_true_args_size = restore_args_size;
2590 cur_row->cfa = this_cfa;
2591 cur_row->reg_save = save_row_reg_save;
2592 cur_cfa = &this_cfa;
2593 }
2594 else
2595 {
2596 /* If ELT is a annulled branch-taken instruction (i.e.
2597 executed only when branch is not taken), the args_size
2598 and CFA should not change through the jump. */
2599 create_trace_edges (control);
2600
2601 /* Update and continue with the trace. */
2602 add_cfi_insn = insn;
2603 scan_insn_after (elt);
2604 def_cfa_1 (&this_cfa);
2605 }
2606 continue;
2607 }
2608
2609 /* The insns in the delay slot should all be considered to happen
2610 "before" a call insn. Consider a call with a stack pointer
2611 adjustment in the delay slot. The backtrace from the callee
2612 should include the sp adjustment. Unfortunately, that leaves
2613 us with an unavoidable unwinding error exactly at the call insn
2614 itself. For jump insns we'd prefer to avoid this error by
2615 placing the notes after the sequence. */
2616 if (JUMP_P (control))
2617 add_cfi_insn = insn;
2618
2619 for (i = 1; i < n; ++i)
2620 {
2621 elt = pat->insn (i);
2622 scan_insn_after (elt);
2623 }
2624
2625 /* Make sure any register saves are visible at the jump target. */
2626 dwarf2out_flush_queued_reg_saves ();
2627 any_cfis_emitted = false;
2628
2629 /* However, if there is some adjustment on the call itself, e.g.
2630 a call_pop, that action should be considered to happen after
2631 the call returns. */
2632 add_cfi_insn = insn;
2633 scan_insn_after (control);
2634 }
2635 else
2636 {
2637 /* Flush data before calls and jumps, and of course if necessary. */
2638 if (can_throw_internal (insn))
2639 {
2640 notice_eh_throw (insn);
2641 dwarf2out_flush_queued_reg_saves ();
2642 }
2643 else if (!NONJUMP_INSN_P (insn)
2644 || clobbers_queued_reg_save (insn)
2645 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2646 dwarf2out_flush_queued_reg_saves ();
2647 any_cfis_emitted = false;
2648
2649 add_cfi_insn = insn;
2650 scan_insn_after (insn);
2651 control = insn;
2652 }
2653
2654 /* Between frame-related-p and args_size we might have otherwise
2655 emitted two cfa adjustments. Do it now. */
2656 def_cfa_1 (&this_cfa);
2657
2658 /* Minimize the number of advances by emitting the entire queue
2659 once anything is emitted. */
2660 if (any_cfis_emitted
2661 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2662 dwarf2out_flush_queued_reg_saves ();
2663
2664 /* Note that a test for control_flow_insn_p does exactly the
2665 same tests as are done to actually create the edges. So
2666 always call the routine and let it not create edges for
2667 non-control-flow insns. */
2668 create_trace_edges (control);
2669 }
2670
2671 add_cfi_insn = NULL;
2672 cur_row = NULL;
2673 cur_trace = NULL;
2674 cur_cfa = NULL;
2675 }
2676
2677 /* Scan the function and create the initial set of CFI notes. */
2678
2679 static void
2680 create_cfi_notes (void)
2681 {
2682 dw_trace_info *ti;
2683
2684 gcc_checking_assert (!queued_reg_saves.exists ());
2685 gcc_checking_assert (!trace_work_list.exists ());
2686
2687 /* Always begin at the entry trace. */
2688 ti = &trace_info[0];
2689 scan_trace (ti, true);
2690
2691 while (!trace_work_list.is_empty ())
2692 {
2693 ti = trace_work_list.pop ();
2694 scan_trace (ti, false);
2695 }
2696
2697 queued_reg_saves.release ();
2698 trace_work_list.release ();
2699 }
2700
2701 /* Return the insn before the first NOTE_INSN_CFI after START. */
2702
2703 static rtx_insn *
2704 before_next_cfi_note (rtx_insn *start)
2705 {
2706 rtx_insn *prev = start;
2707 while (start)
2708 {
2709 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2710 return prev;
2711 prev = start;
2712 start = NEXT_INSN (start);
2713 }
2714 gcc_unreachable ();
2715 }
2716
2717 /* Insert CFI notes between traces to properly change state between them. */
2718
2719 static void
2720 connect_traces (void)
2721 {
2722 unsigned i, n = trace_info.length ();
2723 dw_trace_info *prev_ti, *ti;
2724
2725 /* ??? Ideally, we should have both queued and processed every trace.
2726 However the current representation of constant pools on various targets
2727 is indistinguishable from unreachable code. Assume for the moment that
2728 we can simply skip over such traces. */
2729 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2730 these are not "real" instructions, and should not be considered.
2731 This could be generically useful for tablejump data as well. */
2732 /* Remove all unprocessed traces from the list. */
2733 for (i = n - 1; i > 0; --i)
2734 {
2735 ti = &trace_info[i];
2736 if (ti->beg_row == NULL)
2737 {
2738 trace_info.ordered_remove (i);
2739 n -= 1;
2740 }
2741 else
2742 gcc_assert (ti->end_row != NULL);
2743 }
2744
2745 /* Work from the end back to the beginning. This lets us easily insert
2746 remember/restore_state notes in the correct order wrt other notes. */
2747 prev_ti = &trace_info[n - 1];
2748 for (i = n - 1; i > 0; --i)
2749 {
2750 dw_cfi_row *old_row;
2751
2752 ti = prev_ti;
2753 prev_ti = &trace_info[i - 1];
2754
2755 add_cfi_insn = ti->head;
2756
2757 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2758 for the portion of the function in the alternate text
2759 section. The row state at the very beginning of that
2760 new FDE will be exactly the row state from the CIE. */
2761 if (ti->switch_sections)
2762 old_row = cie_cfi_row;
2763 else
2764 {
2765 old_row = prev_ti->end_row;
2766 /* If there's no change from the previous end state, fine. */
2767 if (cfi_row_equal_p (old_row, ti->beg_row))
2768 ;
2769 /* Otherwise check for the common case of sharing state with
2770 the beginning of an epilogue, but not the end. Insert
2771 remember/restore opcodes in that case. */
2772 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2773 {
2774 dw_cfi_ref cfi;
2775
2776 /* Note that if we blindly insert the remember at the
2777 start of the trace, we can wind up increasing the
2778 size of the unwind info due to extra advance opcodes.
2779 Instead, put the remember immediately before the next
2780 state change. We know there must be one, because the
2781 state at the beginning and head of the trace differ. */
2782 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2783 cfi = new_cfi ();
2784 cfi->dw_cfi_opc = DW_CFA_remember_state;
2785 add_cfi (cfi);
2786
2787 add_cfi_insn = ti->head;
2788 cfi = new_cfi ();
2789 cfi->dw_cfi_opc = DW_CFA_restore_state;
2790 add_cfi (cfi);
2791
2792 old_row = prev_ti->beg_row;
2793 }
2794 /* Otherwise, we'll simply change state from the previous end. */
2795 }
2796
2797 change_cfi_row (old_row, ti->beg_row);
2798
2799 if (dump_file && add_cfi_insn != ti->head)
2800 {
2801 rtx_insn *note;
2802
2803 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2804 prev_ti->id, ti->id);
2805
2806 note = ti->head;
2807 do
2808 {
2809 note = NEXT_INSN (note);
2810 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2811 output_cfi_directive (dump_file, NOTE_CFI (note));
2812 }
2813 while (note != add_cfi_insn);
2814 }
2815 }
2816
2817 /* Connect args_size between traces that have can_throw_internal insns. */
2818 if (cfun->eh->lp_array)
2819 {
2820 poly_int64 prev_args_size = 0;
2821
2822 for (i = 0; i < n; ++i)
2823 {
2824 ti = &trace_info[i];
2825
2826 if (ti->switch_sections)
2827 prev_args_size = 0;
2828 if (ti->eh_head == NULL)
2829 continue;
2830 gcc_assert (!ti->args_size_undefined);
2831
2832 if (maybe_ne (ti->beg_delay_args_size, prev_args_size))
2833 {
2834 /* ??? Search back to previous CFI note. */
2835 add_cfi_insn = PREV_INSN (ti->eh_head);
2836 add_cfi_args_size (ti->beg_delay_args_size);
2837 }
2838
2839 prev_args_size = ti->end_delay_args_size;
2840 }
2841 }
2842 }
2843
2844 /* Set up the pseudo-cfg of instruction traces, as described at the
2845 block comment at the top of the file. */
2846
2847 static void
2848 create_pseudo_cfg (void)
2849 {
2850 bool saw_barrier, switch_sections;
2851 dw_trace_info ti;
2852 rtx_insn *insn;
2853 unsigned i;
2854
2855 /* The first trace begins at the start of the function,
2856 and begins with the CIE row state. */
2857 trace_info.create (16);
2858 memset (&ti, 0, sizeof (ti));
2859 ti.head = get_insns ();
2860 ti.beg_row = cie_cfi_row;
2861 ti.cfa_store = cie_cfi_row->cfa;
2862 ti.cfa_temp.reg = INVALID_REGNUM;
2863 trace_info.quick_push (ti);
2864
2865 if (cie_return_save)
2866 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2867
2868 /* Walk all the insns, collecting start of trace locations. */
2869 saw_barrier = false;
2870 switch_sections = false;
2871 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2872 {
2873 if (BARRIER_P (insn))
2874 saw_barrier = true;
2875 else if (NOTE_P (insn)
2876 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2877 {
2878 /* We should have just seen a barrier. */
2879 gcc_assert (saw_barrier);
2880 switch_sections = true;
2881 }
2882 /* Watch out for save_point notes between basic blocks.
2883 In particular, a note after a barrier. Do not record these,
2884 delaying trace creation until the label. */
2885 else if (save_point_p (insn)
2886 && (LABEL_P (insn) || !saw_barrier))
2887 {
2888 memset (&ti, 0, sizeof (ti));
2889 ti.head = insn;
2890 ti.switch_sections = switch_sections;
2891 ti.id = trace_info.length ();
2892 trace_info.safe_push (ti);
2893
2894 saw_barrier = false;
2895 switch_sections = false;
2896 }
2897 }
2898
2899 /* Create the trace index after we've finished building trace_info,
2900 avoiding stale pointer problems due to reallocation. */
2901 trace_index
2902 = new hash_table<trace_info_hasher> (trace_info.length ());
2903 dw_trace_info *tp;
2904 FOR_EACH_VEC_ELT (trace_info, i, tp)
2905 {
2906 dw_trace_info **slot;
2907
2908 if (dump_file)
2909 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2910 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2911 tp->switch_sections ? " (section switch)" : "");
2912
2913 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2914 gcc_assert (*slot == NULL);
2915 *slot = tp;
2916 }
2917 }
2918
2919 /* Record the initial position of the return address. RTL is
2920 INCOMING_RETURN_ADDR_RTX. */
2921
2922 static void
2923 initial_return_save (rtx rtl)
2924 {
2925 unsigned int reg = INVALID_REGNUM;
2926 poly_int64 offset = 0;
2927
2928 switch (GET_CODE (rtl))
2929 {
2930 case REG:
2931 /* RA is in a register. */
2932 reg = dwf_regno (rtl);
2933 break;
2934
2935 case MEM:
2936 /* RA is on the stack. */
2937 rtl = XEXP (rtl, 0);
2938 switch (GET_CODE (rtl))
2939 {
2940 case REG:
2941 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2942 offset = 0;
2943 break;
2944
2945 case PLUS:
2946 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2947 offset = rtx_to_poly_int64 (XEXP (rtl, 1));
2948 break;
2949
2950 case MINUS:
2951 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2952 offset = -rtx_to_poly_int64 (XEXP (rtl, 1));
2953 break;
2954
2955 default:
2956 gcc_unreachable ();
2957 }
2958
2959 break;
2960
2961 case PLUS:
2962 /* The return address is at some offset from any value we can
2963 actually load. For instance, on the SPARC it is in %i7+8. Just
2964 ignore the offset for now; it doesn't matter for unwinding frames. */
2965 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2966 initial_return_save (XEXP (rtl, 0));
2967 return;
2968
2969 default:
2970 gcc_unreachable ();
2971 }
2972
2973 if (reg != DWARF_FRAME_RETURN_COLUMN)
2974 {
2975 if (reg != INVALID_REGNUM)
2976 record_reg_saved_in_reg (rtl, pc_rtx);
2977 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2978 }
2979 }
2980
2981 static void
2982 create_cie_data (void)
2983 {
2984 dw_cfa_location loc;
2985 dw_trace_info cie_trace;
2986
2987 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2988
2989 memset (&cie_trace, 0, sizeof (cie_trace));
2990 cur_trace = &cie_trace;
2991
2992 add_cfi_vec = &cie_cfi_vec;
2993 cie_cfi_row = cur_row = new_cfi_row ();
2994
2995 /* On entry, the Canonical Frame Address is at SP. */
2996 memset (&loc, 0, sizeof (loc));
2997 loc.reg = dw_stack_pointer_regnum;
2998 /* create_cie_data is called just once per TU, and when using .cfi_startproc
2999 is even done by the assembler rather than the compiler. If the target
3000 has different incoming frame sp offsets depending on what kind of
3001 function it is, use a single constant offset for the target and
3002 if needed, adjust before the first instruction in insn stream. */
3003 loc.offset = DEFAULT_INCOMING_FRAME_SP_OFFSET;
3004 def_cfa_1 (&loc);
3005
3006 if (targetm.debug_unwind_info () == UI_DWARF2
3007 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3008 {
3009 initial_return_save (INCOMING_RETURN_ADDR_RTX);
3010
3011 /* For a few targets, we have the return address incoming into a
3012 register, but choose a different return column. This will result
3013 in a DW_CFA_register for the return, and an entry in
3014 regs_saved_in_regs to match. If the target later stores that
3015 return address register to the stack, we want to be able to emit
3016 the DW_CFA_offset against the return column, not the intermediate
3017 save register. Save the contents of regs_saved_in_regs so that
3018 we can re-initialize it at the start of each function. */
3019 switch (cie_trace.regs_saved_in_regs.length ())
3020 {
3021 case 0:
3022 break;
3023 case 1:
3024 cie_return_save = ggc_alloc<reg_saved_in_data> ();
3025 *cie_return_save = cie_trace.regs_saved_in_regs[0];
3026 cie_trace.regs_saved_in_regs.release ();
3027 break;
3028 default:
3029 gcc_unreachable ();
3030 }
3031 }
3032
3033 add_cfi_vec = NULL;
3034 cur_row = NULL;
3035 cur_trace = NULL;
3036 }
3037
3038 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3039 state at each location within the function. These notes will be
3040 emitted during pass_final. */
3041
3042 static unsigned int
3043 execute_dwarf2_frame (void)
3044 {
3045 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
3046 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
3047
3048 /* The first time we're called, compute the incoming frame state. */
3049 if (cie_cfi_vec == NULL)
3050 create_cie_data ();
3051
3052 dwarf2out_alloc_current_fde ();
3053
3054 create_pseudo_cfg ();
3055
3056 /* Do the work. */
3057 create_cfi_notes ();
3058 connect_traces ();
3059 add_cfis_to_fde ();
3060
3061 /* Free all the data we allocated. */
3062 {
3063 size_t i;
3064 dw_trace_info *ti;
3065
3066 FOR_EACH_VEC_ELT (trace_info, i, ti)
3067 ti->regs_saved_in_regs.release ();
3068 }
3069 trace_info.release ();
3070
3071 delete trace_index;
3072 trace_index = NULL;
3073
3074 return 0;
3075 }
3076 \f
3077 /* Convert a DWARF call frame info. operation to its string name */
3078
3079 static const char *
3080 dwarf_cfi_name (unsigned int cfi_opc)
3081 {
3082 const char *name = get_DW_CFA_name (cfi_opc);
3083
3084 if (name != NULL)
3085 return name;
3086
3087 return "DW_CFA_<unknown>";
3088 }
3089
3090 /* This routine will generate the correct assembly data for a location
3091 description based on a cfi entry with a complex address. */
3092
3093 static void
3094 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3095 {
3096 dw_loc_descr_ref loc;
3097 unsigned long size;
3098
3099 if (cfi->dw_cfi_opc == DW_CFA_expression
3100 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3101 {
3102 unsigned r =
3103 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3104 dw2_asm_output_data (1, r, NULL);
3105 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3106 }
3107 else
3108 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3109
3110 /* Output the size of the block. */
3111 size = size_of_locs (loc);
3112 dw2_asm_output_data_uleb128 (size, NULL);
3113
3114 /* Now output the operations themselves. */
3115 output_loc_sequence (loc, for_eh);
3116 }
3117
3118 /* Similar, but used for .cfi_escape. */
3119
3120 static void
3121 output_cfa_loc_raw (dw_cfi_ref cfi)
3122 {
3123 dw_loc_descr_ref loc;
3124 unsigned long size;
3125
3126 if (cfi->dw_cfi_opc == DW_CFA_expression
3127 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3128 {
3129 unsigned r =
3130 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3131 fprintf (asm_out_file, "%#x,", r);
3132 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3133 }
3134 else
3135 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3136
3137 /* Output the size of the block. */
3138 size = size_of_locs (loc);
3139 dw2_asm_output_data_uleb128_raw (size);
3140 fputc (',', asm_out_file);
3141
3142 /* Now output the operations themselves. */
3143 output_loc_sequence_raw (loc);
3144 }
3145
3146 /* Output a Call Frame Information opcode and its operand(s). */
3147
3148 void
3149 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3150 {
3151 unsigned long r;
3152 HOST_WIDE_INT off;
3153
3154 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3155 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3156 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3157 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3158 ((unsigned HOST_WIDE_INT)
3159 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3160 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3161 {
3162 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3163 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3164 "DW_CFA_offset, column %#lx", r);
3165 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3166 dw2_asm_output_data_uleb128 (off, NULL);
3167 }
3168 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3169 {
3170 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3171 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3172 "DW_CFA_restore, column %#lx", r);
3173 }
3174 else
3175 {
3176 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3177 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3178
3179 switch (cfi->dw_cfi_opc)
3180 {
3181 case DW_CFA_set_loc:
3182 if (for_eh)
3183 dw2_asm_output_encoded_addr_rtx (
3184 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3185 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3186 false, NULL);
3187 else
3188 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3189 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3190 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3191 break;
3192
3193 case DW_CFA_advance_loc1:
3194 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3195 fde->dw_fde_current_label, NULL);
3196 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3197 break;
3198
3199 case DW_CFA_advance_loc2:
3200 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3201 fde->dw_fde_current_label, NULL);
3202 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3203 break;
3204
3205 case DW_CFA_advance_loc4:
3206 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3207 fde->dw_fde_current_label, NULL);
3208 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3209 break;
3210
3211 case DW_CFA_MIPS_advance_loc8:
3212 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3213 fde->dw_fde_current_label, NULL);
3214 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3215 break;
3216
3217 case DW_CFA_offset_extended:
3218 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3219 dw2_asm_output_data_uleb128 (r, NULL);
3220 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3221 dw2_asm_output_data_uleb128 (off, NULL);
3222 break;
3223
3224 case DW_CFA_def_cfa:
3225 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3226 dw2_asm_output_data_uleb128 (r, NULL);
3227 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3228 break;
3229
3230 case DW_CFA_offset_extended_sf:
3231 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3232 dw2_asm_output_data_uleb128 (r, NULL);
3233 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3234 dw2_asm_output_data_sleb128 (off, NULL);
3235 break;
3236
3237 case DW_CFA_def_cfa_sf:
3238 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3239 dw2_asm_output_data_uleb128 (r, NULL);
3240 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3241 dw2_asm_output_data_sleb128 (off, NULL);
3242 break;
3243
3244 case DW_CFA_restore_extended:
3245 case DW_CFA_undefined:
3246 case DW_CFA_same_value:
3247 case DW_CFA_def_cfa_register:
3248 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3249 dw2_asm_output_data_uleb128 (r, NULL);
3250 break;
3251
3252 case DW_CFA_register:
3253 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3254 dw2_asm_output_data_uleb128 (r, NULL);
3255 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3256 dw2_asm_output_data_uleb128 (r, NULL);
3257 break;
3258
3259 case DW_CFA_def_cfa_offset:
3260 case DW_CFA_GNU_args_size:
3261 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3262 break;
3263
3264 case DW_CFA_def_cfa_offset_sf:
3265 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3266 dw2_asm_output_data_sleb128 (off, NULL);
3267 break;
3268
3269 case DW_CFA_GNU_window_save:
3270 break;
3271
3272 case DW_CFA_def_cfa_expression:
3273 case DW_CFA_expression:
3274 case DW_CFA_val_expression:
3275 output_cfa_loc (cfi, for_eh);
3276 break;
3277
3278 case DW_CFA_GNU_negative_offset_extended:
3279 /* Obsoleted by DW_CFA_offset_extended_sf. */
3280 gcc_unreachable ();
3281
3282 default:
3283 break;
3284 }
3285 }
3286 }
3287
3288 /* Similar, but do it via assembler directives instead. */
3289
3290 void
3291 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3292 {
3293 unsigned long r, r2;
3294
3295 switch (cfi->dw_cfi_opc)
3296 {
3297 case DW_CFA_advance_loc:
3298 case DW_CFA_advance_loc1:
3299 case DW_CFA_advance_loc2:
3300 case DW_CFA_advance_loc4:
3301 case DW_CFA_MIPS_advance_loc8:
3302 case DW_CFA_set_loc:
3303 /* Should only be created in a code path not followed when emitting
3304 via directives. The assembler is going to take care of this for
3305 us. But this routines is also used for debugging dumps, so
3306 print something. */
3307 gcc_assert (f != asm_out_file);
3308 fprintf (f, "\t.cfi_advance_loc\n");
3309 break;
3310
3311 case DW_CFA_offset:
3312 case DW_CFA_offset_extended:
3313 case DW_CFA_offset_extended_sf:
3314 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3315 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3316 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3317 break;
3318
3319 case DW_CFA_restore:
3320 case DW_CFA_restore_extended:
3321 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3322 fprintf (f, "\t.cfi_restore %lu\n", r);
3323 break;
3324
3325 case DW_CFA_undefined:
3326 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3327 fprintf (f, "\t.cfi_undefined %lu\n", r);
3328 break;
3329
3330 case DW_CFA_same_value:
3331 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3332 fprintf (f, "\t.cfi_same_value %lu\n", r);
3333 break;
3334
3335 case DW_CFA_def_cfa:
3336 case DW_CFA_def_cfa_sf:
3337 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3338 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3339 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3340 break;
3341
3342 case DW_CFA_def_cfa_register:
3343 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3344 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3345 break;
3346
3347 case DW_CFA_register:
3348 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3349 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3350 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3351 break;
3352
3353 case DW_CFA_def_cfa_offset:
3354 case DW_CFA_def_cfa_offset_sf:
3355 fprintf (f, "\t.cfi_def_cfa_offset "
3356 HOST_WIDE_INT_PRINT_DEC"\n",
3357 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3358 break;
3359
3360 case DW_CFA_remember_state:
3361 fprintf (f, "\t.cfi_remember_state\n");
3362 break;
3363 case DW_CFA_restore_state:
3364 fprintf (f, "\t.cfi_restore_state\n");
3365 break;
3366
3367 case DW_CFA_GNU_args_size:
3368 if (f == asm_out_file)
3369 {
3370 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3371 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3372 if (flag_debug_asm)
3373 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3374 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3375 fputc ('\n', f);
3376 }
3377 else
3378 {
3379 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3380 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3381 }
3382 break;
3383
3384 case DW_CFA_GNU_window_save:
3385 fprintf (f, "\t.cfi_window_save\n");
3386 break;
3387
3388 case DW_CFA_def_cfa_expression:
3389 case DW_CFA_expression:
3390 case DW_CFA_val_expression:
3391 if (f != asm_out_file)
3392 {
3393 fprintf (f, "\t.cfi_%scfa_%sexpression ...\n",
3394 cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "",
3395 cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : "");
3396 break;
3397 }
3398 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3399 output_cfa_loc_raw (cfi);
3400 fputc ('\n', f);
3401 break;
3402
3403 default:
3404 gcc_unreachable ();
3405 }
3406 }
3407
3408 void
3409 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3410 {
3411 if (dwarf2out_do_cfi_asm ())
3412 output_cfi_directive (asm_out_file, cfi);
3413 }
3414
3415 static void
3416 dump_cfi_row (FILE *f, dw_cfi_row *row)
3417 {
3418 dw_cfi_ref cfi;
3419 unsigned i;
3420
3421 cfi = row->cfa_cfi;
3422 if (!cfi)
3423 {
3424 dw_cfa_location dummy;
3425 memset (&dummy, 0, sizeof (dummy));
3426 dummy.reg = INVALID_REGNUM;
3427 cfi = def_cfa_0 (&dummy, &row->cfa);
3428 }
3429 output_cfi_directive (f, cfi);
3430
3431 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3432 if (cfi)
3433 output_cfi_directive (f, cfi);
3434 }
3435
3436 void debug_cfi_row (dw_cfi_row *row);
3437
3438 void
3439 debug_cfi_row (dw_cfi_row *row)
3440 {
3441 dump_cfi_row (stderr, row);
3442 }
3443 \f
3444
3445 /* Save the result of dwarf2out_do_frame across PCH.
3446 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3447 static GTY(()) signed char saved_do_cfi_asm = 0;
3448
3449 /* Decide whether to emit EH frame unwind information for the current
3450 translation unit. */
3451
3452 bool
3453 dwarf2out_do_eh_frame (void)
3454 {
3455 return
3456 (flag_unwind_tables || flag_exceptions)
3457 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2;
3458 }
3459
3460 /* Decide whether we want to emit frame unwind information for the current
3461 translation unit. */
3462
3463 bool
3464 dwarf2out_do_frame (void)
3465 {
3466 /* We want to emit correct CFA location expressions or lists, so we
3467 have to return true if we're going to output debug info, even if
3468 we're not going to output frame or unwind info. */
3469 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3470 return true;
3471
3472 if (saved_do_cfi_asm > 0)
3473 return true;
3474
3475 if (targetm.debug_unwind_info () == UI_DWARF2)
3476 return true;
3477
3478 if (dwarf2out_do_eh_frame ())
3479 return true;
3480
3481 return false;
3482 }
3483
3484 /* Decide whether to emit frame unwind via assembler directives. */
3485
3486 bool
3487 dwarf2out_do_cfi_asm (void)
3488 {
3489 int enc;
3490
3491 if (saved_do_cfi_asm != 0)
3492 return saved_do_cfi_asm > 0;
3493
3494 /* Assume failure for a moment. */
3495 saved_do_cfi_asm = -1;
3496
3497 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3498 return false;
3499 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3500 return false;
3501
3502 /* Make sure the personality encoding is one the assembler can support.
3503 In particular, aligned addresses can't be handled. */
3504 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3505 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3506 return false;
3507 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3508 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3509 return false;
3510
3511 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3512 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3513 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE && !dwarf2out_do_eh_frame ())
3514 return false;
3515
3516 /* Success! */
3517 saved_do_cfi_asm = 1;
3518 return true;
3519 }
3520
3521 namespace {
3522
3523 const pass_data pass_data_dwarf2_frame =
3524 {
3525 RTL_PASS, /* type */
3526 "dwarf2", /* name */
3527 OPTGROUP_NONE, /* optinfo_flags */
3528 TV_FINAL, /* tv_id */
3529 0, /* properties_required */
3530 0, /* properties_provided */
3531 0, /* properties_destroyed */
3532 0, /* todo_flags_start */
3533 0, /* todo_flags_finish */
3534 };
3535
3536 class pass_dwarf2_frame : public rtl_opt_pass
3537 {
3538 public:
3539 pass_dwarf2_frame (gcc::context *ctxt)
3540 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3541 {}
3542
3543 /* opt_pass methods: */
3544 virtual bool gate (function *);
3545 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3546
3547 }; // class pass_dwarf2_frame
3548
3549 bool
3550 pass_dwarf2_frame::gate (function *)
3551 {
3552 /* Targets which still implement the prologue in assembler text
3553 cannot use the generic dwarf2 unwinding. */
3554 if (!targetm.have_prologue ())
3555 return false;
3556
3557 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3558 from the optimized shrink-wrapping annotations that we will compute.
3559 For now, only produce the CFI notes for dwarf2. */
3560 return dwarf2out_do_frame ();
3561 }
3562
3563 } // anon namespace
3564
3565 rtl_opt_pass *
3566 make_pass_dwarf2_frame (gcc::context *ctxt)
3567 {
3568 return new pass_dwarf2_frame (ctxt);
3569 }
3570
3571 #include "gt-dwarf2cfi.h"