gdb/
[binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000,
4 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23
24 #include "elf/external.h"
25 #include "elf/common.h"
26 #include "elf/mips.h"
27
28 #include "symtab.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "gdbcore.h"
33 #include "target.h"
34 #include "inferior.h"
35 #include "regcache.h"
36 #include "gdbthread.h"
37 #include "observer.h"
38
39 #include "gdb_assert.h"
40
41 #include "solist.h"
42 #include "solib.h"
43 #include "solib-svr4.h"
44
45 #include "bfd-target.h"
46 #include "elf-bfd.h"
47 #include "exec.h"
48 #include "auxv.h"
49 #include "exceptions.h"
50
51 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52 static int svr4_have_link_map_offsets (void);
53 static void svr4_relocate_main_executable (void);
54
55 /* Link map info to include in an allocated so_list entry. */
56
57 struct lm_info
58 {
59 /* Amount by which addresses in the binary should be relocated to
60 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
61 When prelinking is involved and the prelink base address changes,
62 we may need a different offset - the recomputed offset is in L_ADDR.
63 It is commonly the same value. It is cached as we want to warn about
64 the difference and compute it only once. L_ADDR is valid
65 iff L_ADDR_P. */
66 CORE_ADDR l_addr, l_addr_inferior;
67 unsigned int l_addr_p : 1;
68
69 /* The target location of lm. */
70 CORE_ADDR lm_addr;
71
72 /* Values read in from inferior's fields of the same name. */
73 CORE_ADDR l_ld, l_next, l_prev, l_name;
74 };
75
76 /* On SVR4 systems, a list of symbols in the dynamic linker where
77 GDB can try to place a breakpoint to monitor shared library
78 events.
79
80 If none of these symbols are found, or other errors occur, then
81 SVR4 systems will fall back to using a symbol as the "startup
82 mapping complete" breakpoint address. */
83
84 static const char * const solib_break_names[] =
85 {
86 "r_debug_state",
87 "_r_debug_state",
88 "_dl_debug_state",
89 "rtld_db_dlactivity",
90 "__dl_rtld_db_dlactivity",
91 "_rtld_debug_state",
92
93 NULL
94 };
95
96 static const char * const bkpt_names[] =
97 {
98 "_start",
99 "__start",
100 "main",
101 NULL
102 };
103
104 static const char * const main_name_list[] =
105 {
106 "main_$main",
107 NULL
108 };
109
110 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
111 the same shared library. */
112
113 static int
114 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
115 {
116 if (strcmp (gdb_so_name, inferior_so_name) == 0)
117 return 1;
118
119 /* On Solaris, when starting inferior we think that dynamic linker is
120 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
121 contains /lib/ld.so.1. Sometimes one file is a link to another, but
122 sometimes they have identical content, but are not linked to each
123 other. We don't restrict this check for Solaris, but the chances
124 of running into this situation elsewhere are very low. */
125 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
126 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
127 return 1;
128
129 /* Similarly, we observed the same issue with sparc64, but with
130 different locations. */
131 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
132 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
133 return 1;
134
135 return 0;
136 }
137
138 static int
139 svr4_same (struct so_list *gdb, struct so_list *inferior)
140 {
141 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
142 }
143
144 static struct lm_info *
145 lm_info_read (CORE_ADDR lm_addr)
146 {
147 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
148 gdb_byte *lm;
149 struct lm_info *lm_info;
150 struct cleanup *back_to;
151
152 lm = xmalloc (lmo->link_map_size);
153 back_to = make_cleanup (xfree, lm);
154
155 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
156 {
157 warning (_("Error reading shared library list entry at %s"),
158 paddress (target_gdbarch, lm_addr)),
159 lm_info = NULL;
160 }
161 else
162 {
163 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
164
165 lm_info = xzalloc (sizeof (*lm_info));
166 lm_info->lm_addr = lm_addr;
167
168 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
169 ptr_type);
170 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
171 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
172 ptr_type);
173 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
174 ptr_type);
175 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
176 ptr_type);
177 }
178
179 do_cleanups (back_to);
180
181 return lm_info;
182 }
183
184 static int
185 has_lm_dynamic_from_link_map (void)
186 {
187 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
188
189 return lmo->l_ld_offset >= 0;
190 }
191
192 static CORE_ADDR
193 lm_addr_check (struct so_list *so, bfd *abfd)
194 {
195 if (!so->lm_info->l_addr_p)
196 {
197 struct bfd_section *dyninfo_sect;
198 CORE_ADDR l_addr, l_dynaddr, dynaddr;
199
200 l_addr = so->lm_info->l_addr_inferior;
201
202 if (! abfd || ! has_lm_dynamic_from_link_map ())
203 goto set_addr;
204
205 l_dynaddr = so->lm_info->l_ld;
206
207 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
208 if (dyninfo_sect == NULL)
209 goto set_addr;
210
211 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
212
213 if (dynaddr + l_addr != l_dynaddr)
214 {
215 CORE_ADDR align = 0x1000;
216 CORE_ADDR minpagesize = align;
217
218 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
219 {
220 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
221 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
222 int i;
223
224 align = 1;
225
226 for (i = 0; i < ehdr->e_phnum; i++)
227 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
228 align = phdr[i].p_align;
229
230 minpagesize = get_elf_backend_data (abfd)->minpagesize;
231 }
232
233 /* Turn it into a mask. */
234 align--;
235
236 /* If the changes match the alignment requirements, we
237 assume we're using a core file that was generated by the
238 same binary, just prelinked with a different base offset.
239 If it doesn't match, we may have a different binary, the
240 same binary with the dynamic table loaded at an unrelated
241 location, or anything, really. To avoid regressions,
242 don't adjust the base offset in the latter case, although
243 odds are that, if things really changed, debugging won't
244 quite work.
245
246 One could expect more the condition
247 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
248 but the one below is relaxed for PPC. The PPC kernel supports
249 either 4k or 64k page sizes. To be prepared for 64k pages,
250 PPC ELF files are built using an alignment requirement of 64k.
251 However, when running on a kernel supporting 4k pages, the memory
252 mapping of the library may not actually happen on a 64k boundary!
253
254 (In the usual case where (l_addr & align) == 0, this check is
255 equivalent to the possibly expected check above.)
256
257 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
258
259 l_addr = l_dynaddr - dynaddr;
260
261 if ((l_addr & (minpagesize - 1)) == 0
262 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
263 {
264 if (info_verbose)
265 printf_unfiltered (_("Using PIC (Position Independent Code) "
266 "prelink displacement %s for \"%s\".\n"),
267 paddress (target_gdbarch, l_addr),
268 so->so_name);
269 }
270 else
271 {
272 /* There is no way to verify the library file matches. prelink
273 can during prelinking of an unprelinked file (or unprelinking
274 of a prelinked file) shift the DYNAMIC segment by arbitrary
275 offset without any page size alignment. There is no way to
276 find out the ELF header and/or Program Headers for a limited
277 verification if it they match. One could do a verification
278 of the DYNAMIC segment. Still the found address is the best
279 one GDB could find. */
280
281 warning (_(".dynamic section for \"%s\" "
282 "is not at the expected address "
283 "(wrong library or version mismatch?)"), so->so_name);
284 }
285 }
286
287 set_addr:
288 so->lm_info->l_addr = l_addr;
289 so->lm_info->l_addr_p = 1;
290 }
291
292 return so->lm_info->l_addr;
293 }
294
295 /* Per pspace SVR4 specific data. */
296
297 struct svr4_info
298 {
299 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
300
301 /* Validity flag for debug_loader_offset. */
302 int debug_loader_offset_p;
303
304 /* Load address for the dynamic linker, inferred. */
305 CORE_ADDR debug_loader_offset;
306
307 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
308 char *debug_loader_name;
309
310 /* Load map address for the main executable. */
311 CORE_ADDR main_lm_addr;
312
313 CORE_ADDR interp_text_sect_low;
314 CORE_ADDR interp_text_sect_high;
315 CORE_ADDR interp_plt_sect_low;
316 CORE_ADDR interp_plt_sect_high;
317 };
318
319 /* Per-program-space data key. */
320 static const struct program_space_data *solib_svr4_pspace_data;
321
322 static void
323 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
324 {
325 struct svr4_info *info;
326
327 info = program_space_data (pspace, solib_svr4_pspace_data);
328 xfree (info);
329 }
330
331 /* Get the current svr4 data. If none is found yet, add it now. This
332 function always returns a valid object. */
333
334 static struct svr4_info *
335 get_svr4_info (void)
336 {
337 struct svr4_info *info;
338
339 info = program_space_data (current_program_space, solib_svr4_pspace_data);
340 if (info != NULL)
341 return info;
342
343 info = XZALLOC (struct svr4_info);
344 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
345 return info;
346 }
347
348 /* Local function prototypes */
349
350 static int match_main (const char *);
351
352 /* Read program header TYPE from inferior memory. The header is found
353 by scanning the OS auxillary vector.
354
355 If TYPE == -1, return the program headers instead of the contents of
356 one program header.
357
358 Return a pointer to allocated memory holding the program header contents,
359 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
360 size of those contents is returned to P_SECT_SIZE. Likewise, the target
361 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
362
363 static gdb_byte *
364 read_program_header (int type, int *p_sect_size, int *p_arch_size)
365 {
366 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
367 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
368 int arch_size, sect_size;
369 CORE_ADDR sect_addr;
370 gdb_byte *buf;
371 int pt_phdr_p = 0;
372
373 /* Get required auxv elements from target. */
374 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
375 return 0;
376 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
377 return 0;
378 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
379 return 0;
380 if (!at_phdr || !at_phnum)
381 return 0;
382
383 /* Determine ELF architecture type. */
384 if (at_phent == sizeof (Elf32_External_Phdr))
385 arch_size = 32;
386 else if (at_phent == sizeof (Elf64_External_Phdr))
387 arch_size = 64;
388 else
389 return 0;
390
391 /* Find the requested segment. */
392 if (type == -1)
393 {
394 sect_addr = at_phdr;
395 sect_size = at_phent * at_phnum;
396 }
397 else if (arch_size == 32)
398 {
399 Elf32_External_Phdr phdr;
400 int i;
401
402 /* Search for requested PHDR. */
403 for (i = 0; i < at_phnum; i++)
404 {
405 int p_type;
406
407 if (target_read_memory (at_phdr + i * sizeof (phdr),
408 (gdb_byte *)&phdr, sizeof (phdr)))
409 return 0;
410
411 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
412 4, byte_order);
413
414 if (p_type == PT_PHDR)
415 {
416 pt_phdr_p = 1;
417 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
418 4, byte_order);
419 }
420
421 if (p_type == type)
422 break;
423 }
424
425 if (i == at_phnum)
426 return 0;
427
428 /* Retrieve address and size. */
429 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
430 4, byte_order);
431 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
432 4, byte_order);
433 }
434 else
435 {
436 Elf64_External_Phdr phdr;
437 int i;
438
439 /* Search for requested PHDR. */
440 for (i = 0; i < at_phnum; i++)
441 {
442 int p_type;
443
444 if (target_read_memory (at_phdr + i * sizeof (phdr),
445 (gdb_byte *)&phdr, sizeof (phdr)))
446 return 0;
447
448 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
449 4, byte_order);
450
451 if (p_type == PT_PHDR)
452 {
453 pt_phdr_p = 1;
454 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
455 8, byte_order);
456 }
457
458 if (p_type == type)
459 break;
460 }
461
462 if (i == at_phnum)
463 return 0;
464
465 /* Retrieve address and size. */
466 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
467 8, byte_order);
468 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
469 8, byte_order);
470 }
471
472 /* PT_PHDR is optional, but we really need it
473 for PIE to make this work in general. */
474
475 if (pt_phdr_p)
476 {
477 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
478 Relocation offset is the difference between the two. */
479 sect_addr = sect_addr + (at_phdr - pt_phdr);
480 }
481
482 /* Read in requested program header. */
483 buf = xmalloc (sect_size);
484 if (target_read_memory (sect_addr, buf, sect_size))
485 {
486 xfree (buf);
487 return NULL;
488 }
489
490 if (p_arch_size)
491 *p_arch_size = arch_size;
492 if (p_sect_size)
493 *p_sect_size = sect_size;
494
495 return buf;
496 }
497
498
499 /* Return program interpreter string. */
500 static gdb_byte *
501 find_program_interpreter (void)
502 {
503 gdb_byte *buf = NULL;
504
505 /* If we have an exec_bfd, use its section table. */
506 if (exec_bfd
507 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
508 {
509 struct bfd_section *interp_sect;
510
511 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
512 if (interp_sect != NULL)
513 {
514 int sect_size = bfd_section_size (exec_bfd, interp_sect);
515
516 buf = xmalloc (sect_size);
517 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
518 }
519 }
520
521 /* If we didn't find it, use the target auxillary vector. */
522 if (!buf)
523 buf = read_program_header (PT_INTERP, NULL, NULL);
524
525 return buf;
526 }
527
528
529 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
530 returned and the corresponding PTR is set. */
531
532 static int
533 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
534 {
535 int arch_size, step, sect_size;
536 long dyn_tag;
537 CORE_ADDR dyn_ptr, dyn_addr;
538 gdb_byte *bufend, *bufstart, *buf;
539 Elf32_External_Dyn *x_dynp_32;
540 Elf64_External_Dyn *x_dynp_64;
541 struct bfd_section *sect;
542 struct target_section *target_section;
543
544 if (abfd == NULL)
545 return 0;
546
547 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
548 return 0;
549
550 arch_size = bfd_get_arch_size (abfd);
551 if (arch_size == -1)
552 return 0;
553
554 /* Find the start address of the .dynamic section. */
555 sect = bfd_get_section_by_name (abfd, ".dynamic");
556 if (sect == NULL)
557 return 0;
558
559 for (target_section = current_target_sections->sections;
560 target_section < current_target_sections->sections_end;
561 target_section++)
562 if (sect == target_section->the_bfd_section)
563 break;
564 if (target_section < current_target_sections->sections_end)
565 dyn_addr = target_section->addr;
566 else
567 {
568 /* ABFD may come from OBJFILE acting only as a symbol file without being
569 loaded into the target (see add_symbol_file_command). This case is
570 such fallback to the file VMA address without the possibility of
571 having the section relocated to its actual in-memory address. */
572
573 dyn_addr = bfd_section_vma (abfd, sect);
574 }
575
576 /* Read in .dynamic from the BFD. We will get the actual value
577 from memory later. */
578 sect_size = bfd_section_size (abfd, sect);
579 buf = bufstart = alloca (sect_size);
580 if (!bfd_get_section_contents (abfd, sect,
581 buf, 0, sect_size))
582 return 0;
583
584 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
585 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
586 : sizeof (Elf64_External_Dyn);
587 for (bufend = buf + sect_size;
588 buf < bufend;
589 buf += step)
590 {
591 if (arch_size == 32)
592 {
593 x_dynp_32 = (Elf32_External_Dyn *) buf;
594 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
595 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
596 }
597 else
598 {
599 x_dynp_64 = (Elf64_External_Dyn *) buf;
600 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
601 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
602 }
603 if (dyn_tag == DT_NULL)
604 return 0;
605 if (dyn_tag == dyntag)
606 {
607 /* If requested, try to read the runtime value of this .dynamic
608 entry. */
609 if (ptr)
610 {
611 struct type *ptr_type;
612 gdb_byte ptr_buf[8];
613 CORE_ADDR ptr_addr;
614
615 ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
616 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
617 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
618 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
619 *ptr = dyn_ptr;
620 }
621 return 1;
622 }
623 }
624
625 return 0;
626 }
627
628 /* Scan for DYNTAG in .dynamic section of the target's main executable,
629 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
630 returned and the corresponding PTR is set. */
631
632 static int
633 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
634 {
635 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
636 int sect_size, arch_size, step;
637 long dyn_tag;
638 CORE_ADDR dyn_ptr;
639 gdb_byte *bufend, *bufstart, *buf;
640
641 /* Read in .dynamic section. */
642 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
643 if (!buf)
644 return 0;
645
646 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
647 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
648 : sizeof (Elf64_External_Dyn);
649 for (bufend = buf + sect_size;
650 buf < bufend;
651 buf += step)
652 {
653 if (arch_size == 32)
654 {
655 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
656
657 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
658 4, byte_order);
659 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
660 4, byte_order);
661 }
662 else
663 {
664 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
665
666 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
667 8, byte_order);
668 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
669 8, byte_order);
670 }
671 if (dyn_tag == DT_NULL)
672 break;
673
674 if (dyn_tag == dyntag)
675 {
676 if (ptr)
677 *ptr = dyn_ptr;
678
679 xfree (bufstart);
680 return 1;
681 }
682 }
683
684 xfree (bufstart);
685 return 0;
686 }
687
688 /* Locate the base address of dynamic linker structs for SVR4 elf
689 targets.
690
691 For SVR4 elf targets the address of the dynamic linker's runtime
692 structure is contained within the dynamic info section in the
693 executable file. The dynamic section is also mapped into the
694 inferior address space. Because the runtime loader fills in the
695 real address before starting the inferior, we have to read in the
696 dynamic info section from the inferior address space.
697 If there are any errors while trying to find the address, we
698 silently return 0, otherwise the found address is returned. */
699
700 static CORE_ADDR
701 elf_locate_base (void)
702 {
703 struct minimal_symbol *msymbol;
704 CORE_ADDR dyn_ptr;
705
706 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
707 instead of DT_DEBUG, although they sometimes contain an unused
708 DT_DEBUG. */
709 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
710 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
711 {
712 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
713 gdb_byte *pbuf;
714 int pbuf_size = TYPE_LENGTH (ptr_type);
715
716 pbuf = alloca (pbuf_size);
717 /* DT_MIPS_RLD_MAP contains a pointer to the address
718 of the dynamic link structure. */
719 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
720 return 0;
721 return extract_typed_address (pbuf, ptr_type);
722 }
723
724 /* Find DT_DEBUG. */
725 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
726 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
727 return dyn_ptr;
728
729 /* This may be a static executable. Look for the symbol
730 conventionally named _r_debug, as a last resort. */
731 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
732 if (msymbol != NULL)
733 return SYMBOL_VALUE_ADDRESS (msymbol);
734
735 /* DT_DEBUG entry not found. */
736 return 0;
737 }
738
739 /* Locate the base address of dynamic linker structs.
740
741 For both the SunOS and SVR4 shared library implementations, if the
742 inferior executable has been linked dynamically, there is a single
743 address somewhere in the inferior's data space which is the key to
744 locating all of the dynamic linker's runtime structures. This
745 address is the value of the debug base symbol. The job of this
746 function is to find and return that address, or to return 0 if there
747 is no such address (the executable is statically linked for example).
748
749 For SunOS, the job is almost trivial, since the dynamic linker and
750 all of it's structures are statically linked to the executable at
751 link time. Thus the symbol for the address we are looking for has
752 already been added to the minimal symbol table for the executable's
753 objfile at the time the symbol file's symbols were read, and all we
754 have to do is look it up there. Note that we explicitly do NOT want
755 to find the copies in the shared library.
756
757 The SVR4 version is a bit more complicated because the address
758 is contained somewhere in the dynamic info section. We have to go
759 to a lot more work to discover the address of the debug base symbol.
760 Because of this complexity, we cache the value we find and return that
761 value on subsequent invocations. Note there is no copy in the
762 executable symbol tables. */
763
764 static CORE_ADDR
765 locate_base (struct svr4_info *info)
766 {
767 /* Check to see if we have a currently valid address, and if so, avoid
768 doing all this work again and just return the cached address. If
769 we have no cached address, try to locate it in the dynamic info
770 section for ELF executables. There's no point in doing any of this
771 though if we don't have some link map offsets to work with. */
772
773 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
774 info->debug_base = elf_locate_base ();
775 return info->debug_base;
776 }
777
778 /* Find the first element in the inferior's dynamic link map, and
779 return its address in the inferior. Return zero if the address
780 could not be determined.
781
782 FIXME: Perhaps we should validate the info somehow, perhaps by
783 checking r_version for a known version number, or r_state for
784 RT_CONSISTENT. */
785
786 static CORE_ADDR
787 solib_svr4_r_map (struct svr4_info *info)
788 {
789 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
790 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
791 CORE_ADDR addr = 0;
792 volatile struct gdb_exception ex;
793
794 TRY_CATCH (ex, RETURN_MASK_ERROR)
795 {
796 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
797 ptr_type);
798 }
799 exception_print (gdb_stderr, ex);
800 return addr;
801 }
802
803 /* Find r_brk from the inferior's debug base. */
804
805 static CORE_ADDR
806 solib_svr4_r_brk (struct svr4_info *info)
807 {
808 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
809 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
810
811 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
812 ptr_type);
813 }
814
815 /* Find the link map for the dynamic linker (if it is not in the
816 normal list of loaded shared objects). */
817
818 static CORE_ADDR
819 solib_svr4_r_ldsomap (struct svr4_info *info)
820 {
821 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
822 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
823 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
824 ULONGEST version;
825
826 /* Check version, and return zero if `struct r_debug' doesn't have
827 the r_ldsomap member. */
828 version
829 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
830 lmo->r_version_size, byte_order);
831 if (version < 2 || lmo->r_ldsomap_offset == -1)
832 return 0;
833
834 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
835 ptr_type);
836 }
837
838 /* On Solaris systems with some versions of the dynamic linker,
839 ld.so's l_name pointer points to the SONAME in the string table
840 rather than into writable memory. So that GDB can find shared
841 libraries when loading a core file generated by gcore, ensure that
842 memory areas containing the l_name string are saved in the core
843 file. */
844
845 static int
846 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
847 {
848 struct svr4_info *info;
849 CORE_ADDR ldsomap;
850 struct so_list *new;
851 struct cleanup *old_chain;
852 struct link_map_offsets *lmo;
853 CORE_ADDR name_lm;
854
855 info = get_svr4_info ();
856
857 info->debug_base = 0;
858 locate_base (info);
859 if (!info->debug_base)
860 return 0;
861
862 ldsomap = solib_svr4_r_ldsomap (info);
863 if (!ldsomap)
864 return 0;
865
866 lmo = svr4_fetch_link_map_offsets ();
867 new = XZALLOC (struct so_list);
868 old_chain = make_cleanup (xfree, new);
869 new->lm_info = lm_info_read (ldsomap);
870 make_cleanup (xfree, new->lm_info);
871 name_lm = new->lm_info ? new->lm_info->l_name : 0;
872 do_cleanups (old_chain);
873
874 return (name_lm >= vaddr && name_lm < vaddr + size);
875 }
876
877 /* Implement the "open_symbol_file_object" target_so_ops method.
878
879 If no open symbol file, attempt to locate and open the main symbol
880 file. On SVR4 systems, this is the first link map entry. If its
881 name is here, we can open it. Useful when attaching to a process
882 without first loading its symbol file. */
883
884 static int
885 open_symbol_file_object (void *from_ttyp)
886 {
887 CORE_ADDR lm, l_name;
888 char *filename;
889 int errcode;
890 int from_tty = *(int *)from_ttyp;
891 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
892 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
893 int l_name_size = TYPE_LENGTH (ptr_type);
894 gdb_byte *l_name_buf = xmalloc (l_name_size);
895 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
896 struct svr4_info *info = get_svr4_info ();
897
898 if (symfile_objfile)
899 if (!query (_("Attempt to reload symbols from process? ")))
900 {
901 do_cleanups (cleanups);
902 return 0;
903 }
904
905 /* Always locate the debug struct, in case it has moved. */
906 info->debug_base = 0;
907 if (locate_base (info) == 0)
908 {
909 do_cleanups (cleanups);
910 return 0; /* failed somehow... */
911 }
912
913 /* First link map member should be the executable. */
914 lm = solib_svr4_r_map (info);
915 if (lm == 0)
916 {
917 do_cleanups (cleanups);
918 return 0; /* failed somehow... */
919 }
920
921 /* Read address of name from target memory to GDB. */
922 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
923
924 /* Convert the address to host format. */
925 l_name = extract_typed_address (l_name_buf, ptr_type);
926
927 if (l_name == 0)
928 {
929 do_cleanups (cleanups);
930 return 0; /* No filename. */
931 }
932
933 /* Now fetch the filename from target memory. */
934 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
935 make_cleanup (xfree, filename);
936
937 if (errcode)
938 {
939 warning (_("failed to read exec filename from attached file: %s"),
940 safe_strerror (errcode));
941 do_cleanups (cleanups);
942 return 0;
943 }
944
945 /* Have a pathname: read the symbol file. */
946 symbol_file_add_main (filename, from_tty);
947
948 do_cleanups (cleanups);
949 return 1;
950 }
951
952 /* Data exchange structure for the XML parser as returned by
953 svr4_current_sos_via_xfer_libraries. */
954
955 struct svr4_library_list
956 {
957 struct so_list *head, **tailp;
958
959 /* Inferior address of struct link_map used for the main executable. It is
960 NULL if not known. */
961 CORE_ADDR main_lm;
962 };
963
964 #ifdef HAVE_LIBEXPAT
965
966 #include "xml-support.h"
967
968 /* Handle the start of a <library> element. Note: new elements are added
969 at the tail of the list, keeping the list in order. */
970
971 static void
972 library_list_start_library (struct gdb_xml_parser *parser,
973 const struct gdb_xml_element *element,
974 void *user_data, VEC(gdb_xml_value_s) *attributes)
975 {
976 struct svr4_library_list *list = user_data;
977 const char *name = xml_find_attribute (attributes, "name")->value;
978 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
979 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
980 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
981 struct so_list *new_elem;
982
983 new_elem = XZALLOC (struct so_list);
984 new_elem->lm_info = XZALLOC (struct lm_info);
985 new_elem->lm_info->lm_addr = *lmp;
986 new_elem->lm_info->l_addr_inferior = *l_addrp;
987 new_elem->lm_info->l_ld = *l_ldp;
988
989 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
990 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
991 strcpy (new_elem->so_original_name, new_elem->so_name);
992
993 *list->tailp = new_elem;
994 list->tailp = &new_elem->next;
995 }
996
997 /* Handle the start of a <library-list-svr4> element. */
998
999 static void
1000 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1001 const struct gdb_xml_element *element,
1002 void *user_data, VEC(gdb_xml_value_s) *attributes)
1003 {
1004 struct svr4_library_list *list = user_data;
1005 const char *version = xml_find_attribute (attributes, "version")->value;
1006 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1007
1008 if (strcmp (version, "1.0") != 0)
1009 gdb_xml_error (parser,
1010 _("SVR4 Library list has unsupported version \"%s\""),
1011 version);
1012
1013 if (main_lm)
1014 list->main_lm = *(ULONGEST *) main_lm->value;
1015 }
1016
1017 /* The allowed elements and attributes for an XML library list.
1018 The root element is a <library-list>. */
1019
1020 static const struct gdb_xml_attribute svr4_library_attributes[] =
1021 {
1022 { "name", GDB_XML_AF_NONE, NULL, NULL },
1023 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1024 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1025 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1026 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1027 };
1028
1029 static const struct gdb_xml_element svr4_library_list_children[] =
1030 {
1031 {
1032 "library", svr4_library_attributes, NULL,
1033 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1034 library_list_start_library, NULL
1035 },
1036 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1037 };
1038
1039 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1040 {
1041 { "version", GDB_XML_AF_NONE, NULL, NULL },
1042 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1043 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1044 };
1045
1046 static const struct gdb_xml_element svr4_library_list_elements[] =
1047 {
1048 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1049 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1050 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1051 };
1052
1053 /* Implementation for target_so_ops.free_so. */
1054
1055 static void
1056 svr4_free_so (struct so_list *so)
1057 {
1058 xfree (so->lm_info);
1059 }
1060
1061 /* Free so_list built so far (called via cleanup). */
1062
1063 static void
1064 svr4_free_library_list (void *p_list)
1065 {
1066 struct so_list *list = *(struct so_list **) p_list;
1067
1068 while (list != NULL)
1069 {
1070 struct so_list *next = list->next;
1071
1072 svr4_free_so (list);
1073 list = next;
1074 }
1075 }
1076
1077 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1078
1079 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1080 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1081 empty, caller is responsible for freeing all its entries. */
1082
1083 static int
1084 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1085 {
1086 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1087 &list->head);
1088
1089 memset (list, 0, sizeof (*list));
1090 list->tailp = &list->head;
1091 if (gdb_xml_parse_quick (_("target library list"), "library-list.dtd",
1092 svr4_library_list_elements, document, list) == 0)
1093 {
1094 /* Parsed successfully, keep the result. */
1095 discard_cleanups (back_to);
1096 return 1;
1097 }
1098
1099 do_cleanups (back_to);
1100 return 0;
1101 }
1102
1103 /* Attempt to get so_list from target via qXfer:libraries:read packet.
1104
1105 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1106 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1107 empty, caller is responsible for freeing all its entries. */
1108
1109 static int
1110 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1111 {
1112 char *svr4_library_document;
1113 int result;
1114 struct cleanup *back_to;
1115
1116 /* Fetch the list of shared libraries. */
1117 svr4_library_document = target_read_stralloc (&current_target,
1118 TARGET_OBJECT_LIBRARIES_SVR4,
1119 NULL);
1120 if (svr4_library_document == NULL)
1121 return 0;
1122
1123 back_to = make_cleanup (xfree, svr4_library_document);
1124 result = svr4_parse_libraries (svr4_library_document, list);
1125 do_cleanups (back_to);
1126
1127 return result;
1128 }
1129
1130 #else
1131
1132 static int
1133 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1134 {
1135 return 0;
1136 }
1137
1138 #endif
1139
1140 /* If no shared library information is available from the dynamic
1141 linker, build a fallback list from other sources. */
1142
1143 static struct so_list *
1144 svr4_default_sos (void)
1145 {
1146 struct svr4_info *info = get_svr4_info ();
1147 struct so_list *new;
1148
1149 if (!info->debug_loader_offset_p)
1150 return NULL;
1151
1152 new = XZALLOC (struct so_list);
1153
1154 new->lm_info = xzalloc (sizeof (struct lm_info));
1155
1156 /* Nothing will ever check the other fields if we set l_addr_p. */
1157 new->lm_info->l_addr = info->debug_loader_offset;
1158 new->lm_info->l_addr_p = 1;
1159
1160 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1161 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1162 strcpy (new->so_original_name, new->so_name);
1163
1164 return new;
1165 }
1166
1167 /* Read the whole inferior libraries chain starting at address LM. Add the
1168 entries to the tail referenced by LINK_PTR_PTR. Ignore the first entry if
1169 IGNORE_FIRST and set global MAIN_LM_ADDR according to it. */
1170
1171 static void
1172 svr4_read_so_list (CORE_ADDR lm, struct so_list ***link_ptr_ptr,
1173 int ignore_first)
1174 {
1175 CORE_ADDR prev_lm = 0, next_lm;
1176
1177 for (; lm != 0; prev_lm = lm, lm = next_lm)
1178 {
1179 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1180 struct so_list *new;
1181 struct cleanup *old_chain;
1182 int errcode;
1183 char *buffer;
1184
1185 new = XZALLOC (struct so_list);
1186 old_chain = make_cleanup_free_so (new);
1187
1188 new->lm_info = lm_info_read (lm);
1189 if (new->lm_info == NULL)
1190 {
1191 do_cleanups (old_chain);
1192 break;
1193 }
1194
1195 next_lm = new->lm_info->l_next;
1196
1197 if (new->lm_info->l_prev != prev_lm)
1198 {
1199 warning (_("Corrupted shared library list: %s != %s"),
1200 paddress (target_gdbarch, prev_lm),
1201 paddress (target_gdbarch, new->lm_info->l_prev));
1202 do_cleanups (old_chain);
1203 break;
1204 }
1205
1206 /* For SVR4 versions, the first entry in the link map is for the
1207 inferior executable, so we must ignore it. For some versions of
1208 SVR4, it has no name. For others (Solaris 2.3 for example), it
1209 does have a name, so we can no longer use a missing name to
1210 decide when to ignore it. */
1211 if (ignore_first && new->lm_info->l_prev == 0)
1212 {
1213 struct svr4_info *info = get_svr4_info ();
1214
1215 info->main_lm_addr = new->lm_info->lm_addr;
1216 do_cleanups (old_chain);
1217 continue;
1218 }
1219
1220 /* Extract this shared object's name. */
1221 target_read_string (new->lm_info->l_name, &buffer,
1222 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1223 if (errcode != 0)
1224 {
1225 warning (_("Can't read pathname for load map: %s."),
1226 safe_strerror (errcode));
1227 do_cleanups (old_chain);
1228 continue;
1229 }
1230
1231 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1232 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1233 strcpy (new->so_original_name, new->so_name);
1234 xfree (buffer);
1235
1236 /* If this entry has no name, or its name matches the name
1237 for the main executable, don't include it in the list. */
1238 if (! new->so_name[0] || match_main (new->so_name))
1239 {
1240 do_cleanups (old_chain);
1241 continue;
1242 }
1243
1244 discard_cleanups (old_chain);
1245 new->next = 0;
1246 **link_ptr_ptr = new;
1247 *link_ptr_ptr = &new->next;
1248 }
1249 }
1250
1251 /* Implement the "current_sos" target_so_ops method. */
1252
1253 static struct so_list *
1254 svr4_current_sos (void)
1255 {
1256 CORE_ADDR lm;
1257 struct so_list *head = NULL;
1258 struct so_list **link_ptr = &head;
1259 struct svr4_info *info;
1260 struct cleanup *back_to;
1261 int ignore_first;
1262 struct svr4_library_list library_list;
1263
1264 if (svr4_current_sos_via_xfer_libraries (&library_list))
1265 {
1266 if (library_list.main_lm)
1267 {
1268 info = get_svr4_info ();
1269 info->main_lm_addr = library_list.main_lm;
1270 }
1271
1272 return library_list.head ? library_list.head : svr4_default_sos ();
1273 }
1274
1275 info = get_svr4_info ();
1276
1277 /* Always locate the debug struct, in case it has moved. */
1278 info->debug_base = 0;
1279 locate_base (info);
1280
1281 /* If we can't find the dynamic linker's base structure, this
1282 must not be a dynamically linked executable. Hmm. */
1283 if (! info->debug_base)
1284 return svr4_default_sos ();
1285
1286 /* Assume that everything is a library if the dynamic loader was loaded
1287 late by a static executable. */
1288 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1289 ignore_first = 0;
1290 else
1291 ignore_first = 1;
1292
1293 back_to = make_cleanup (svr4_free_library_list, &head);
1294
1295 /* Walk the inferior's link map list, and build our list of
1296 `struct so_list' nodes. */
1297 lm = solib_svr4_r_map (info);
1298 if (lm)
1299 svr4_read_so_list (lm, &link_ptr, ignore_first);
1300
1301 /* On Solaris, the dynamic linker is not in the normal list of
1302 shared objects, so make sure we pick it up too. Having
1303 symbol information for the dynamic linker is quite crucial
1304 for skipping dynamic linker resolver code. */
1305 lm = solib_svr4_r_ldsomap (info);
1306 if (lm)
1307 svr4_read_so_list (lm, &link_ptr, 0);
1308
1309 discard_cleanups (back_to);
1310
1311 if (head == NULL)
1312 return svr4_default_sos ();
1313
1314 return head;
1315 }
1316
1317 /* Get the address of the link_map for a given OBJFILE. */
1318
1319 CORE_ADDR
1320 svr4_fetch_objfile_link_map (struct objfile *objfile)
1321 {
1322 struct so_list *so;
1323 struct svr4_info *info = get_svr4_info ();
1324
1325 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1326 if (info->main_lm_addr == 0)
1327 solib_add (NULL, 0, &current_target, auto_solib_add);
1328
1329 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1330 if (objfile == symfile_objfile)
1331 return info->main_lm_addr;
1332
1333 /* The other link map addresses may be found by examining the list
1334 of shared libraries. */
1335 for (so = master_so_list (); so; so = so->next)
1336 if (so->objfile == objfile)
1337 return so->lm_info->lm_addr;
1338
1339 /* Not found! */
1340 return 0;
1341 }
1342
1343 /* On some systems, the only way to recognize the link map entry for
1344 the main executable file is by looking at its name. Return
1345 non-zero iff SONAME matches one of the known main executable names. */
1346
1347 static int
1348 match_main (const char *soname)
1349 {
1350 const char * const *mainp;
1351
1352 for (mainp = main_name_list; *mainp != NULL; mainp++)
1353 {
1354 if (strcmp (soname, *mainp) == 0)
1355 return (1);
1356 }
1357
1358 return (0);
1359 }
1360
1361 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1362 SVR4 run time loader. */
1363
1364 int
1365 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1366 {
1367 struct svr4_info *info = get_svr4_info ();
1368
1369 return ((pc >= info->interp_text_sect_low
1370 && pc < info->interp_text_sect_high)
1371 || (pc >= info->interp_plt_sect_low
1372 && pc < info->interp_plt_sect_high)
1373 || in_plt_section (pc, NULL)
1374 || in_gnu_ifunc_stub (pc));
1375 }
1376
1377 /* Given an executable's ABFD and target, compute the entry-point
1378 address. */
1379
1380 static CORE_ADDR
1381 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1382 {
1383 /* KevinB wrote ... for most targets, the address returned by
1384 bfd_get_start_address() is the entry point for the start
1385 function. But, for some targets, bfd_get_start_address() returns
1386 the address of a function descriptor from which the entry point
1387 address may be extracted. This address is extracted by
1388 gdbarch_convert_from_func_ptr_addr(). The method
1389 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1390 function for targets which don't use function descriptors. */
1391 return gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1392 bfd_get_start_address (abfd),
1393 targ);
1394 }
1395
1396 /* Helper function for gdb_bfd_lookup_symbol. */
1397
1398 static int
1399 cmp_name_and_sec_flags (asymbol *sym, void *data)
1400 {
1401 return (strcmp (sym->name, (const char *) data) == 0
1402 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
1403 }
1404 /* Arrange for dynamic linker to hit breakpoint.
1405
1406 Both the SunOS and the SVR4 dynamic linkers have, as part of their
1407 debugger interface, support for arranging for the inferior to hit
1408 a breakpoint after mapping in the shared libraries. This function
1409 enables that breakpoint.
1410
1411 For SunOS, there is a special flag location (in_debugger) which we
1412 set to 1. When the dynamic linker sees this flag set, it will set
1413 a breakpoint at a location known only to itself, after saving the
1414 original contents of that place and the breakpoint address itself,
1415 in it's own internal structures. When we resume the inferior, it
1416 will eventually take a SIGTRAP when it runs into the breakpoint.
1417 We handle this (in a different place) by restoring the contents of
1418 the breakpointed location (which is only known after it stops),
1419 chasing around to locate the shared libraries that have been
1420 loaded, then resuming.
1421
1422 For SVR4, the debugger interface structure contains a member (r_brk)
1423 which is statically initialized at the time the shared library is
1424 built, to the offset of a function (_r_debug_state) which is guaran-
1425 teed to be called once before mapping in a library, and again when
1426 the mapping is complete. At the time we are examining this member,
1427 it contains only the unrelocated offset of the function, so we have
1428 to do our own relocation. Later, when the dynamic linker actually
1429 runs, it relocates r_brk to be the actual address of _r_debug_state().
1430
1431 The debugger interface structure also contains an enumeration which
1432 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1433 depending upon whether or not the library is being mapped or unmapped,
1434 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
1435
1436 static int
1437 enable_break (struct svr4_info *info, int from_tty)
1438 {
1439 struct minimal_symbol *msymbol;
1440 const char * const *bkpt_namep;
1441 asection *interp_sect;
1442 gdb_byte *interp_name;
1443 CORE_ADDR sym_addr;
1444
1445 info->interp_text_sect_low = info->interp_text_sect_high = 0;
1446 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1447
1448 /* If we already have a shared library list in the target, and
1449 r_debug contains r_brk, set the breakpoint there - this should
1450 mean r_brk has already been relocated. Assume the dynamic linker
1451 is the object containing r_brk. */
1452
1453 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1454 sym_addr = 0;
1455 if (info->debug_base && solib_svr4_r_map (info) != 0)
1456 sym_addr = solib_svr4_r_brk (info);
1457
1458 if (sym_addr != 0)
1459 {
1460 struct obj_section *os;
1461
1462 sym_addr = gdbarch_addr_bits_remove
1463 (target_gdbarch, gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1464 sym_addr,
1465 &current_target));
1466
1467 /* On at least some versions of Solaris there's a dynamic relocation
1468 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1469 we get control before the dynamic linker has self-relocated.
1470 Check if SYM_ADDR is in a known section, if it is assume we can
1471 trust its value. This is just a heuristic though, it could go away
1472 or be replaced if it's getting in the way.
1473
1474 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1475 however it's spelled in your particular system) is ARM or Thumb.
1476 That knowledge is encoded in the address, if it's Thumb the low bit
1477 is 1. However, we've stripped that info above and it's not clear
1478 what all the consequences are of passing a non-addr_bits_remove'd
1479 address to create_solib_event_breakpoint. The call to
1480 find_pc_section verifies we know about the address and have some
1481 hope of computing the right kind of breakpoint to use (via
1482 symbol info). It does mean that GDB needs to be pointed at a
1483 non-stripped version of the dynamic linker in order to obtain
1484 information it already knows about. Sigh. */
1485
1486 os = find_pc_section (sym_addr);
1487 if (os != NULL)
1488 {
1489 /* Record the relocated start and end address of the dynamic linker
1490 text and plt section for svr4_in_dynsym_resolve_code. */
1491 bfd *tmp_bfd;
1492 CORE_ADDR load_addr;
1493
1494 tmp_bfd = os->objfile->obfd;
1495 load_addr = ANOFFSET (os->objfile->section_offsets,
1496 os->objfile->sect_index_text);
1497
1498 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1499 if (interp_sect)
1500 {
1501 info->interp_text_sect_low =
1502 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1503 info->interp_text_sect_high =
1504 info->interp_text_sect_low
1505 + bfd_section_size (tmp_bfd, interp_sect);
1506 }
1507 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1508 if (interp_sect)
1509 {
1510 info->interp_plt_sect_low =
1511 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1512 info->interp_plt_sect_high =
1513 info->interp_plt_sect_low
1514 + bfd_section_size (tmp_bfd, interp_sect);
1515 }
1516
1517 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1518 return 1;
1519 }
1520 }
1521
1522 /* Find the program interpreter; if not found, warn the user and drop
1523 into the old breakpoint at symbol code. */
1524 interp_name = find_program_interpreter ();
1525 if (interp_name)
1526 {
1527 CORE_ADDR load_addr = 0;
1528 int load_addr_found = 0;
1529 int loader_found_in_list = 0;
1530 struct so_list *so;
1531 bfd *tmp_bfd = NULL;
1532 struct target_ops *tmp_bfd_target;
1533 volatile struct gdb_exception ex;
1534
1535 sym_addr = 0;
1536
1537 /* Now we need to figure out where the dynamic linker was
1538 loaded so that we can load its symbols and place a breakpoint
1539 in the dynamic linker itself.
1540
1541 This address is stored on the stack. However, I've been unable
1542 to find any magic formula to find it for Solaris (appears to
1543 be trivial on GNU/Linux). Therefore, we have to try an alternate
1544 mechanism to find the dynamic linker's base address. */
1545
1546 TRY_CATCH (ex, RETURN_MASK_ALL)
1547 {
1548 tmp_bfd = solib_bfd_open (interp_name);
1549 }
1550 if (tmp_bfd == NULL)
1551 goto bkpt_at_symbol;
1552
1553 /* Now convert the TMP_BFD into a target. That way target, as
1554 well as BFD operations can be used. Note that closing the
1555 target will also close the underlying bfd. */
1556 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1557
1558 /* On a running target, we can get the dynamic linker's base
1559 address from the shared library table. */
1560 so = master_so_list ();
1561 while (so)
1562 {
1563 if (svr4_same_1 (interp_name, so->so_original_name))
1564 {
1565 load_addr_found = 1;
1566 loader_found_in_list = 1;
1567 load_addr = lm_addr_check (so, tmp_bfd);
1568 break;
1569 }
1570 so = so->next;
1571 }
1572
1573 /* If we were not able to find the base address of the loader
1574 from our so_list, then try using the AT_BASE auxilliary entry. */
1575 if (!load_addr_found)
1576 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1577 {
1578 int addr_bit = gdbarch_addr_bit (target_gdbarch);
1579
1580 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1581 that `+ load_addr' will overflow CORE_ADDR width not creating
1582 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1583 GDB. */
1584
1585 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1586 {
1587 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1588 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1589 tmp_bfd_target);
1590
1591 gdb_assert (load_addr < space_size);
1592
1593 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1594 64bit ld.so with 32bit executable, it should not happen. */
1595
1596 if (tmp_entry_point < space_size
1597 && tmp_entry_point + load_addr >= space_size)
1598 load_addr -= space_size;
1599 }
1600
1601 load_addr_found = 1;
1602 }
1603
1604 /* Otherwise we find the dynamic linker's base address by examining
1605 the current pc (which should point at the entry point for the
1606 dynamic linker) and subtracting the offset of the entry point.
1607
1608 This is more fragile than the previous approaches, but is a good
1609 fallback method because it has actually been working well in
1610 most cases. */
1611 if (!load_addr_found)
1612 {
1613 struct regcache *regcache
1614 = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1615
1616 load_addr = (regcache_read_pc (regcache)
1617 - exec_entry_point (tmp_bfd, tmp_bfd_target));
1618 }
1619
1620 if (!loader_found_in_list)
1621 {
1622 info->debug_loader_name = xstrdup (interp_name);
1623 info->debug_loader_offset_p = 1;
1624 info->debug_loader_offset = load_addr;
1625 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1626 }
1627
1628 /* Record the relocated start and end address of the dynamic linker
1629 text and plt section for svr4_in_dynsym_resolve_code. */
1630 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1631 if (interp_sect)
1632 {
1633 info->interp_text_sect_low =
1634 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1635 info->interp_text_sect_high =
1636 info->interp_text_sect_low
1637 + bfd_section_size (tmp_bfd, interp_sect);
1638 }
1639 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1640 if (interp_sect)
1641 {
1642 info->interp_plt_sect_low =
1643 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1644 info->interp_plt_sect_high =
1645 info->interp_plt_sect_low
1646 + bfd_section_size (tmp_bfd, interp_sect);
1647 }
1648
1649 /* Now try to set a breakpoint in the dynamic linker. */
1650 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1651 {
1652 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
1653 (void *) *bkpt_namep);
1654 if (sym_addr != 0)
1655 break;
1656 }
1657
1658 if (sym_addr != 0)
1659 /* Convert 'sym_addr' from a function pointer to an address.
1660 Because we pass tmp_bfd_target instead of the current
1661 target, this will always produce an unrelocated value. */
1662 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1663 sym_addr,
1664 tmp_bfd_target);
1665
1666 /* We're done with both the temporary bfd and target. Remember,
1667 closing the target closes the underlying bfd. */
1668 target_close (tmp_bfd_target, 0);
1669
1670 if (sym_addr != 0)
1671 {
1672 create_solib_event_breakpoint (target_gdbarch, load_addr + sym_addr);
1673 xfree (interp_name);
1674 return 1;
1675 }
1676
1677 /* For whatever reason we couldn't set a breakpoint in the dynamic
1678 linker. Warn and drop into the old code. */
1679 bkpt_at_symbol:
1680 xfree (interp_name);
1681 warning (_("Unable to find dynamic linker breakpoint function.\n"
1682 "GDB will be unable to debug shared library initializers\n"
1683 "and track explicitly loaded dynamic code."));
1684 }
1685
1686 /* Scan through the lists of symbols, trying to look up the symbol and
1687 set a breakpoint there. Terminate loop when we/if we succeed. */
1688
1689 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1690 {
1691 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1692 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1693 {
1694 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1695 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1696 sym_addr,
1697 &current_target);
1698 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1699 return 1;
1700 }
1701 }
1702
1703 if (!current_inferior ()->attach_flag)
1704 {
1705 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1706 {
1707 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1708 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1709 {
1710 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1711 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1712 sym_addr,
1713 &current_target);
1714 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1715 return 1;
1716 }
1717 }
1718 }
1719 return 0;
1720 }
1721
1722 /* Implement the "special_symbol_handling" target_so_ops method. */
1723
1724 static void
1725 svr4_special_symbol_handling (void)
1726 {
1727 /* Nothing to do. */
1728 }
1729
1730 /* Read the ELF program headers from ABFD. Return the contents and
1731 set *PHDRS_SIZE to the size of the program headers. */
1732
1733 static gdb_byte *
1734 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1735 {
1736 Elf_Internal_Ehdr *ehdr;
1737 gdb_byte *buf;
1738
1739 ehdr = elf_elfheader (abfd);
1740
1741 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1742 if (*phdrs_size == 0)
1743 return NULL;
1744
1745 buf = xmalloc (*phdrs_size);
1746 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1747 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1748 {
1749 xfree (buf);
1750 return NULL;
1751 }
1752
1753 return buf;
1754 }
1755
1756 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1757 exec_bfd. Otherwise return 0.
1758
1759 We relocate all of the sections by the same amount. This
1760 behavior is mandated by recent editions of the System V ABI.
1761 According to the System V Application Binary Interface,
1762 Edition 4.1, page 5-5:
1763
1764 ... Though the system chooses virtual addresses for
1765 individual processes, it maintains the segments' relative
1766 positions. Because position-independent code uses relative
1767 addressesing between segments, the difference between
1768 virtual addresses in memory must match the difference
1769 between virtual addresses in the file. The difference
1770 between the virtual address of any segment in memory and
1771 the corresponding virtual address in the file is thus a
1772 single constant value for any one executable or shared
1773 object in a given process. This difference is the base
1774 address. One use of the base address is to relocate the
1775 memory image of the program during dynamic linking.
1776
1777 The same language also appears in Edition 4.0 of the System V
1778 ABI and is left unspecified in some of the earlier editions.
1779
1780 Decide if the objfile needs to be relocated. As indicated above, we will
1781 only be here when execution is stopped. But during attachment PC can be at
1782 arbitrary address therefore regcache_read_pc can be misleading (contrary to
1783 the auxv AT_ENTRY value). Moreover for executable with interpreter section
1784 regcache_read_pc would point to the interpreter and not the main executable.
1785
1786 So, to summarize, relocations are necessary when the start address obtained
1787 from the executable is different from the address in auxv AT_ENTRY entry.
1788
1789 [ The astute reader will note that we also test to make sure that
1790 the executable in question has the DYNAMIC flag set. It is my
1791 opinion that this test is unnecessary (undesirable even). It
1792 was added to avoid inadvertent relocation of an executable
1793 whose e_type member in the ELF header is not ET_DYN. There may
1794 be a time in the future when it is desirable to do relocations
1795 on other types of files as well in which case this condition
1796 should either be removed or modified to accomodate the new file
1797 type. - Kevin, Nov 2000. ] */
1798
1799 static int
1800 svr4_exec_displacement (CORE_ADDR *displacementp)
1801 {
1802 /* ENTRY_POINT is a possible function descriptor - before
1803 a call to gdbarch_convert_from_func_ptr_addr. */
1804 CORE_ADDR entry_point, displacement;
1805
1806 if (exec_bfd == NULL)
1807 return 0;
1808
1809 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
1810 being executed themselves and PIE (Position Independent Executable)
1811 executables are ET_DYN. */
1812
1813 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1814 return 0;
1815
1816 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1817 return 0;
1818
1819 displacement = entry_point - bfd_get_start_address (exec_bfd);
1820
1821 /* Verify the DISPLACEMENT candidate complies with the required page
1822 alignment. It is cheaper than the program headers comparison below. */
1823
1824 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1825 {
1826 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1827
1828 /* p_align of PT_LOAD segments does not specify any alignment but
1829 only congruency of addresses:
1830 p_offset % p_align == p_vaddr % p_align
1831 Kernel is free to load the executable with lower alignment. */
1832
1833 if ((displacement & (elf->minpagesize - 1)) != 0)
1834 return 0;
1835 }
1836
1837 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1838 comparing their program headers. If the program headers in the auxilliary
1839 vector do not match the program headers in the executable, then we are
1840 looking at a different file than the one used by the kernel - for
1841 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
1842
1843 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1844 {
1845 /* Be optimistic and clear OK only if GDB was able to verify the headers
1846 really do not match. */
1847 int phdrs_size, phdrs2_size, ok = 1;
1848 gdb_byte *buf, *buf2;
1849 int arch_size;
1850
1851 buf = read_program_header (-1, &phdrs_size, &arch_size);
1852 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1853 if (buf != NULL && buf2 != NULL)
1854 {
1855 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
1856
1857 /* We are dealing with three different addresses. EXEC_BFD
1858 represents current address in on-disk file. target memory content
1859 may be different from EXEC_BFD as the file may have been prelinked
1860 to a different address after the executable has been loaded.
1861 Moreover the address of placement in target memory can be
1862 different from what the program headers in target memory say -
1863 this is the goal of PIE.
1864
1865 Detected DISPLACEMENT covers both the offsets of PIE placement and
1866 possible new prelink performed after start of the program. Here
1867 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1868 content offset for the verification purpose. */
1869
1870 if (phdrs_size != phdrs2_size
1871 || bfd_get_arch_size (exec_bfd) != arch_size)
1872 ok = 0;
1873 else if (arch_size == 32
1874 && phdrs_size >= sizeof (Elf32_External_Phdr)
1875 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1876 {
1877 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1878 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1879 CORE_ADDR displacement = 0;
1880 int i;
1881
1882 /* DISPLACEMENT could be found more easily by the difference of
1883 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1884 already have enough information to compute that displacement
1885 with what we've read. */
1886
1887 for (i = 0; i < ehdr2->e_phnum; i++)
1888 if (phdr2[i].p_type == PT_LOAD)
1889 {
1890 Elf32_External_Phdr *phdrp;
1891 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1892 CORE_ADDR vaddr, paddr;
1893 CORE_ADDR displacement_vaddr = 0;
1894 CORE_ADDR displacement_paddr = 0;
1895
1896 phdrp = &((Elf32_External_Phdr *) buf)[i];
1897 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1898 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1899
1900 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1901 byte_order);
1902 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1903
1904 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1905 byte_order);
1906 displacement_paddr = paddr - phdr2[i].p_paddr;
1907
1908 if (displacement_vaddr == displacement_paddr)
1909 displacement = displacement_vaddr;
1910
1911 break;
1912 }
1913
1914 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1915
1916 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1917 {
1918 Elf32_External_Phdr *phdrp;
1919 Elf32_External_Phdr *phdr2p;
1920 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1921 CORE_ADDR vaddr, paddr;
1922 asection *plt2_asect;
1923
1924 phdrp = &((Elf32_External_Phdr *) buf)[i];
1925 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1926 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1927 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1928
1929 /* PT_GNU_STACK is an exception by being never relocated by
1930 prelink as its addresses are always zero. */
1931
1932 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1933 continue;
1934
1935 /* Check also other adjustment combinations - PR 11786. */
1936
1937 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1938 byte_order);
1939 vaddr -= displacement;
1940 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1941
1942 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1943 byte_order);
1944 paddr -= displacement;
1945 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1946
1947 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1948 continue;
1949
1950 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
1951 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1952 if (plt2_asect)
1953 {
1954 int content2;
1955 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1956 CORE_ADDR filesz;
1957
1958 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1959 & SEC_HAS_CONTENTS) != 0;
1960
1961 filesz = extract_unsigned_integer (buf_filesz_p, 4,
1962 byte_order);
1963
1964 /* PLT2_ASECT is from on-disk file (exec_bfd) while
1965 FILESZ is from the in-memory image. */
1966 if (content2)
1967 filesz += bfd_get_section_size (plt2_asect);
1968 else
1969 filesz -= bfd_get_section_size (plt2_asect);
1970
1971 store_unsigned_integer (buf_filesz_p, 4, byte_order,
1972 filesz);
1973
1974 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1975 continue;
1976 }
1977
1978 ok = 0;
1979 break;
1980 }
1981 }
1982 else if (arch_size == 64
1983 && phdrs_size >= sizeof (Elf64_External_Phdr)
1984 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
1985 {
1986 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1987 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1988 CORE_ADDR displacement = 0;
1989 int i;
1990
1991 /* DISPLACEMENT could be found more easily by the difference of
1992 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1993 already have enough information to compute that displacement
1994 with what we've read. */
1995
1996 for (i = 0; i < ehdr2->e_phnum; i++)
1997 if (phdr2[i].p_type == PT_LOAD)
1998 {
1999 Elf64_External_Phdr *phdrp;
2000 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2001 CORE_ADDR vaddr, paddr;
2002 CORE_ADDR displacement_vaddr = 0;
2003 CORE_ADDR displacement_paddr = 0;
2004
2005 phdrp = &((Elf64_External_Phdr *) buf)[i];
2006 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2007 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2008
2009 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2010 byte_order);
2011 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2012
2013 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2014 byte_order);
2015 displacement_paddr = paddr - phdr2[i].p_paddr;
2016
2017 if (displacement_vaddr == displacement_paddr)
2018 displacement = displacement_vaddr;
2019
2020 break;
2021 }
2022
2023 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2024
2025 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2026 {
2027 Elf64_External_Phdr *phdrp;
2028 Elf64_External_Phdr *phdr2p;
2029 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2030 CORE_ADDR vaddr, paddr;
2031 asection *plt2_asect;
2032
2033 phdrp = &((Elf64_External_Phdr *) buf)[i];
2034 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2035 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2036 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2037
2038 /* PT_GNU_STACK is an exception by being never relocated by
2039 prelink as its addresses are always zero. */
2040
2041 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2042 continue;
2043
2044 /* Check also other adjustment combinations - PR 11786. */
2045
2046 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2047 byte_order);
2048 vaddr -= displacement;
2049 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2050
2051 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2052 byte_order);
2053 paddr -= displacement;
2054 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2055
2056 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2057 continue;
2058
2059 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2060 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2061 if (plt2_asect)
2062 {
2063 int content2;
2064 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2065 CORE_ADDR filesz;
2066
2067 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2068 & SEC_HAS_CONTENTS) != 0;
2069
2070 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2071 byte_order);
2072
2073 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2074 FILESZ is from the in-memory image. */
2075 if (content2)
2076 filesz += bfd_get_section_size (plt2_asect);
2077 else
2078 filesz -= bfd_get_section_size (plt2_asect);
2079
2080 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2081 filesz);
2082
2083 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2084 continue;
2085 }
2086
2087 ok = 0;
2088 break;
2089 }
2090 }
2091 else
2092 ok = 0;
2093 }
2094
2095 xfree (buf);
2096 xfree (buf2);
2097
2098 if (!ok)
2099 return 0;
2100 }
2101
2102 if (info_verbose)
2103 {
2104 /* It can be printed repeatedly as there is no easy way to check
2105 the executable symbols/file has been already relocated to
2106 displacement. */
2107
2108 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2109 "displacement %s for \"%s\".\n"),
2110 paddress (target_gdbarch, displacement),
2111 bfd_get_filename (exec_bfd));
2112 }
2113
2114 *displacementp = displacement;
2115 return 1;
2116 }
2117
2118 /* Relocate the main executable. This function should be called upon
2119 stopping the inferior process at the entry point to the program.
2120 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2121 different, the main executable is relocated by the proper amount. */
2122
2123 static void
2124 svr4_relocate_main_executable (void)
2125 {
2126 CORE_ADDR displacement;
2127
2128 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2129 probably contains the offsets computed using the PIE displacement
2130 from the previous run, which of course are irrelevant for this run.
2131 So we need to determine the new PIE displacement and recompute the
2132 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2133 already contains pre-computed offsets.
2134
2135 If we cannot compute the PIE displacement, either:
2136
2137 - The executable is not PIE.
2138
2139 - SYMFILE_OBJFILE does not match the executable started in the target.
2140 This can happen for main executable symbols loaded at the host while
2141 `ld.so --ld-args main-executable' is loaded in the target.
2142
2143 Then we leave the section offsets untouched and use them as is for
2144 this run. Either:
2145
2146 - These section offsets were properly reset earlier, and thus
2147 already contain the correct values. This can happen for instance
2148 when reconnecting via the remote protocol to a target that supports
2149 the `qOffsets' packet.
2150
2151 - The section offsets were not reset earlier, and the best we can
2152 hope is that the old offsets are still applicable to the new run. */
2153
2154 if (! svr4_exec_displacement (&displacement))
2155 return;
2156
2157 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2158 addresses. */
2159
2160 if (symfile_objfile)
2161 {
2162 struct section_offsets *new_offsets;
2163 int i;
2164
2165 new_offsets = alloca (symfile_objfile->num_sections
2166 * sizeof (*new_offsets));
2167
2168 for (i = 0; i < symfile_objfile->num_sections; i++)
2169 new_offsets->offsets[i] = displacement;
2170
2171 objfile_relocate (symfile_objfile, new_offsets);
2172 }
2173 else if (exec_bfd)
2174 {
2175 asection *asect;
2176
2177 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2178 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2179 (bfd_section_vma (exec_bfd, asect)
2180 + displacement));
2181 }
2182 }
2183
2184 /* Implement the "create_inferior_hook" target_solib_ops method.
2185
2186 For SVR4 executables, this first instruction is either the first
2187 instruction in the dynamic linker (for dynamically linked
2188 executables) or the instruction at "start" for statically linked
2189 executables. For dynamically linked executables, the system
2190 first exec's /lib/libc.so.N, which contains the dynamic linker,
2191 and starts it running. The dynamic linker maps in any needed
2192 shared libraries, maps in the actual user executable, and then
2193 jumps to "start" in the user executable.
2194
2195 We can arrange to cooperate with the dynamic linker to discover the
2196 names of shared libraries that are dynamically linked, and the base
2197 addresses to which they are linked.
2198
2199 This function is responsible for discovering those names and
2200 addresses, and saving sufficient information about them to allow
2201 their symbols to be read at a later time.
2202
2203 FIXME
2204
2205 Between enable_break() and disable_break(), this code does not
2206 properly handle hitting breakpoints which the user might have
2207 set in the startup code or in the dynamic linker itself. Proper
2208 handling will probably have to wait until the implementation is
2209 changed to use the "breakpoint handler function" method.
2210
2211 Also, what if child has exit()ed? Must exit loop somehow. */
2212
2213 static void
2214 svr4_solib_create_inferior_hook (int from_tty)
2215 {
2216 #if defined(_SCO_DS)
2217 struct inferior *inf;
2218 struct thread_info *tp;
2219 #endif /* defined(_SCO_DS) */
2220 struct svr4_info *info;
2221
2222 info = get_svr4_info ();
2223
2224 /* Relocate the main executable if necessary. */
2225 svr4_relocate_main_executable ();
2226
2227 /* No point setting a breakpoint in the dynamic linker if we can't
2228 hit it (e.g., a core file, or a trace file). */
2229 if (!target_has_execution)
2230 return;
2231
2232 if (!svr4_have_link_map_offsets ())
2233 return;
2234
2235 if (!enable_break (info, from_tty))
2236 return;
2237
2238 #if defined(_SCO_DS)
2239 /* SCO needs the loop below, other systems should be using the
2240 special shared library breakpoints and the shared library breakpoint
2241 service routine.
2242
2243 Now run the target. It will eventually hit the breakpoint, at
2244 which point all of the libraries will have been mapped in and we
2245 can go groveling around in the dynamic linker structures to find
2246 out what we need to know about them. */
2247
2248 inf = current_inferior ();
2249 tp = inferior_thread ();
2250
2251 clear_proceed_status ();
2252 inf->control.stop_soon = STOP_QUIETLY;
2253 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2254 do
2255 {
2256 target_resume (pid_to_ptid (-1), 0, tp->suspend.stop_signal);
2257 wait_for_inferior ();
2258 }
2259 while (tp->suspend.stop_signal != TARGET_SIGNAL_TRAP);
2260 inf->control.stop_soon = NO_STOP_QUIETLY;
2261 #endif /* defined(_SCO_DS) */
2262 }
2263
2264 static void
2265 svr4_clear_solib (void)
2266 {
2267 struct svr4_info *info;
2268
2269 info = get_svr4_info ();
2270 info->debug_base = 0;
2271 info->debug_loader_offset_p = 0;
2272 info->debug_loader_offset = 0;
2273 xfree (info->debug_loader_name);
2274 info->debug_loader_name = NULL;
2275 }
2276
2277 /* Clear any bits of ADDR that wouldn't fit in a target-format
2278 data pointer. "Data pointer" here refers to whatever sort of
2279 address the dynamic linker uses to manage its sections. At the
2280 moment, we don't support shared libraries on any processors where
2281 code and data pointers are different sizes.
2282
2283 This isn't really the right solution. What we really need here is
2284 a way to do arithmetic on CORE_ADDR values that respects the
2285 natural pointer/address correspondence. (For example, on the MIPS,
2286 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2287 sign-extend the value. There, simply truncating the bits above
2288 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2289 be a new gdbarch method or something. */
2290 static CORE_ADDR
2291 svr4_truncate_ptr (CORE_ADDR addr)
2292 {
2293 if (gdbarch_ptr_bit (target_gdbarch) == sizeof (CORE_ADDR) * 8)
2294 /* We don't need to truncate anything, and the bit twiddling below
2295 will fail due to overflow problems. */
2296 return addr;
2297 else
2298 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch)) - 1);
2299 }
2300
2301
2302 static void
2303 svr4_relocate_section_addresses (struct so_list *so,
2304 struct target_section *sec)
2305 {
2306 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so,
2307 sec->bfd));
2308 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so,
2309 sec->bfd));
2310 }
2311 \f
2312
2313 /* Architecture-specific operations. */
2314
2315 /* Per-architecture data key. */
2316 static struct gdbarch_data *solib_svr4_data;
2317
2318 struct solib_svr4_ops
2319 {
2320 /* Return a description of the layout of `struct link_map'. */
2321 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2322 };
2323
2324 /* Return a default for the architecture-specific operations. */
2325
2326 static void *
2327 solib_svr4_init (struct obstack *obstack)
2328 {
2329 struct solib_svr4_ops *ops;
2330
2331 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2332 ops->fetch_link_map_offsets = NULL;
2333 return ops;
2334 }
2335
2336 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2337 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2338
2339 void
2340 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2341 struct link_map_offsets *(*flmo) (void))
2342 {
2343 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2344
2345 ops->fetch_link_map_offsets = flmo;
2346
2347 set_solib_ops (gdbarch, &svr4_so_ops);
2348 }
2349
2350 /* Fetch a link_map_offsets structure using the architecture-specific
2351 `struct link_map_offsets' fetcher. */
2352
2353 static struct link_map_offsets *
2354 svr4_fetch_link_map_offsets (void)
2355 {
2356 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2357
2358 gdb_assert (ops->fetch_link_map_offsets);
2359 return ops->fetch_link_map_offsets ();
2360 }
2361
2362 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2363
2364 static int
2365 svr4_have_link_map_offsets (void)
2366 {
2367 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2368
2369 return (ops->fetch_link_map_offsets != NULL);
2370 }
2371 \f
2372
2373 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2374 `struct r_debug' and a `struct link_map' that are binary compatible
2375 with the origional SVR4 implementation. */
2376
2377 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2378 for an ILP32 SVR4 system. */
2379
2380 struct link_map_offsets *
2381 svr4_ilp32_fetch_link_map_offsets (void)
2382 {
2383 static struct link_map_offsets lmo;
2384 static struct link_map_offsets *lmp = NULL;
2385
2386 if (lmp == NULL)
2387 {
2388 lmp = &lmo;
2389
2390 lmo.r_version_offset = 0;
2391 lmo.r_version_size = 4;
2392 lmo.r_map_offset = 4;
2393 lmo.r_brk_offset = 8;
2394 lmo.r_ldsomap_offset = 20;
2395
2396 /* Everything we need is in the first 20 bytes. */
2397 lmo.link_map_size = 20;
2398 lmo.l_addr_offset = 0;
2399 lmo.l_name_offset = 4;
2400 lmo.l_ld_offset = 8;
2401 lmo.l_next_offset = 12;
2402 lmo.l_prev_offset = 16;
2403 }
2404
2405 return lmp;
2406 }
2407
2408 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2409 for an LP64 SVR4 system. */
2410
2411 struct link_map_offsets *
2412 svr4_lp64_fetch_link_map_offsets (void)
2413 {
2414 static struct link_map_offsets lmo;
2415 static struct link_map_offsets *lmp = NULL;
2416
2417 if (lmp == NULL)
2418 {
2419 lmp = &lmo;
2420
2421 lmo.r_version_offset = 0;
2422 lmo.r_version_size = 4;
2423 lmo.r_map_offset = 8;
2424 lmo.r_brk_offset = 16;
2425 lmo.r_ldsomap_offset = 40;
2426
2427 /* Everything we need is in the first 40 bytes. */
2428 lmo.link_map_size = 40;
2429 lmo.l_addr_offset = 0;
2430 lmo.l_name_offset = 8;
2431 lmo.l_ld_offset = 16;
2432 lmo.l_next_offset = 24;
2433 lmo.l_prev_offset = 32;
2434 }
2435
2436 return lmp;
2437 }
2438 \f
2439
2440 struct target_so_ops svr4_so_ops;
2441
2442 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2443 different rule for symbol lookup. The lookup begins here in the DSO, not in
2444 the main executable. */
2445
2446 static struct symbol *
2447 elf_lookup_lib_symbol (const struct objfile *objfile,
2448 const char *name,
2449 const domain_enum domain)
2450 {
2451 bfd *abfd;
2452
2453 if (objfile == symfile_objfile)
2454 abfd = exec_bfd;
2455 else
2456 {
2457 /* OBJFILE should have been passed as the non-debug one. */
2458 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2459
2460 abfd = objfile->obfd;
2461 }
2462
2463 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2464 return NULL;
2465
2466 return lookup_global_symbol_from_objfile (objfile, name, domain);
2467 }
2468
2469 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2470
2471 void
2472 _initialize_svr4_solib (void)
2473 {
2474 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2475 solib_svr4_pspace_data
2476 = register_program_space_data_with_cleanup (svr4_pspace_data_cleanup);
2477
2478 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2479 svr4_so_ops.free_so = svr4_free_so;
2480 svr4_so_ops.clear_solib = svr4_clear_solib;
2481 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2482 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2483 svr4_so_ops.current_sos = svr4_current_sos;
2484 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2485 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2486 svr4_so_ops.bfd_open = solib_bfd_open;
2487 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2488 svr4_so_ops.same = svr4_same;
2489 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2490 }