Update years in copyright notice for the GDB files.
[binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "regcache.h"
34 #include "gdbthread.h"
35 #include "observer.h"
36
37 #include "gdb_assert.h"
38
39 #include "solist.h"
40 #include "solib.h"
41 #include "solib-svr4.h"
42
43 #include "bfd-target.h"
44 #include "elf-bfd.h"
45 #include "exec.h"
46 #include "auxv.h"
47 #include "exceptions.h"
48 #include "gdb_bfd.h"
49
50 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
51 static int svr4_have_link_map_offsets (void);
52 static void svr4_relocate_main_executable (void);
53
54 /* Link map info to include in an allocated so_list entry. */
55
56 struct lm_info
57 {
58 /* Amount by which addresses in the binary should be relocated to
59 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
60 When prelinking is involved and the prelink base address changes,
61 we may need a different offset - the recomputed offset is in L_ADDR.
62 It is commonly the same value. It is cached as we want to warn about
63 the difference and compute it only once. L_ADDR is valid
64 iff L_ADDR_P. */
65 CORE_ADDR l_addr, l_addr_inferior;
66 unsigned int l_addr_p : 1;
67
68 /* The target location of lm. */
69 CORE_ADDR lm_addr;
70
71 /* Values read in from inferior's fields of the same name. */
72 CORE_ADDR l_ld, l_next, l_prev, l_name;
73 };
74
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
77 events.
78
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
82
83 static const char * const solib_break_names[] =
84 {
85 "r_debug_state",
86 "_r_debug_state",
87 "_dl_debug_state",
88 "rtld_db_dlactivity",
89 "__dl_rtld_db_dlactivity",
90 "_rtld_debug_state",
91
92 NULL
93 };
94
95 static const char * const bkpt_names[] =
96 {
97 "_start",
98 "__start",
99 "main",
100 NULL
101 };
102
103 static const char * const main_name_list[] =
104 {
105 "main_$main",
106 NULL
107 };
108
109 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
110 the same shared library. */
111
112 static int
113 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
114 {
115 if (strcmp (gdb_so_name, inferior_so_name) == 0)
116 return 1;
117
118 /* On Solaris, when starting inferior we think that dynamic linker is
119 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
120 contains /lib/ld.so.1. Sometimes one file is a link to another, but
121 sometimes they have identical content, but are not linked to each
122 other. We don't restrict this check for Solaris, but the chances
123 of running into this situation elsewhere are very low. */
124 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
125 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
126 return 1;
127
128 /* Similarly, we observed the same issue with sparc64, but with
129 different locations. */
130 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
131 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
132 return 1;
133
134 return 0;
135 }
136
137 static int
138 svr4_same (struct so_list *gdb, struct so_list *inferior)
139 {
140 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
141 }
142
143 static struct lm_info *
144 lm_info_read (CORE_ADDR lm_addr)
145 {
146 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
147 gdb_byte *lm;
148 struct lm_info *lm_info;
149 struct cleanup *back_to;
150
151 lm = xmalloc (lmo->link_map_size);
152 back_to = make_cleanup (xfree, lm);
153
154 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
155 {
156 warning (_("Error reading shared library list entry at %s"),
157 paddress (target_gdbarch (), lm_addr)),
158 lm_info = NULL;
159 }
160 else
161 {
162 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
163
164 lm_info = xzalloc (sizeof (*lm_info));
165 lm_info->lm_addr = lm_addr;
166
167 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
168 ptr_type);
169 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
170 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
171 ptr_type);
172 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
173 ptr_type);
174 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
175 ptr_type);
176 }
177
178 do_cleanups (back_to);
179
180 return lm_info;
181 }
182
183 static int
184 has_lm_dynamic_from_link_map (void)
185 {
186 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
187
188 return lmo->l_ld_offset >= 0;
189 }
190
191 static CORE_ADDR
192 lm_addr_check (struct so_list *so, bfd *abfd)
193 {
194 if (!so->lm_info->l_addr_p)
195 {
196 struct bfd_section *dyninfo_sect;
197 CORE_ADDR l_addr, l_dynaddr, dynaddr;
198
199 l_addr = so->lm_info->l_addr_inferior;
200
201 if (! abfd || ! has_lm_dynamic_from_link_map ())
202 goto set_addr;
203
204 l_dynaddr = so->lm_info->l_ld;
205
206 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
207 if (dyninfo_sect == NULL)
208 goto set_addr;
209
210 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
211
212 if (dynaddr + l_addr != l_dynaddr)
213 {
214 CORE_ADDR align = 0x1000;
215 CORE_ADDR minpagesize = align;
216
217 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
218 {
219 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
220 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
221 int i;
222
223 align = 1;
224
225 for (i = 0; i < ehdr->e_phnum; i++)
226 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
227 align = phdr[i].p_align;
228
229 minpagesize = get_elf_backend_data (abfd)->minpagesize;
230 }
231
232 /* Turn it into a mask. */
233 align--;
234
235 /* If the changes match the alignment requirements, we
236 assume we're using a core file that was generated by the
237 same binary, just prelinked with a different base offset.
238 If it doesn't match, we may have a different binary, the
239 same binary with the dynamic table loaded at an unrelated
240 location, or anything, really. To avoid regressions,
241 don't adjust the base offset in the latter case, although
242 odds are that, if things really changed, debugging won't
243 quite work.
244
245 One could expect more the condition
246 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
247 but the one below is relaxed for PPC. The PPC kernel supports
248 either 4k or 64k page sizes. To be prepared for 64k pages,
249 PPC ELF files are built using an alignment requirement of 64k.
250 However, when running on a kernel supporting 4k pages, the memory
251 mapping of the library may not actually happen on a 64k boundary!
252
253 (In the usual case where (l_addr & align) == 0, this check is
254 equivalent to the possibly expected check above.)
255
256 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
257
258 l_addr = l_dynaddr - dynaddr;
259
260 if ((l_addr & (minpagesize - 1)) == 0
261 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
262 {
263 if (info_verbose)
264 printf_unfiltered (_("Using PIC (Position Independent Code) "
265 "prelink displacement %s for \"%s\".\n"),
266 paddress (target_gdbarch (), l_addr),
267 so->so_name);
268 }
269 else
270 {
271 /* There is no way to verify the library file matches. prelink
272 can during prelinking of an unprelinked file (or unprelinking
273 of a prelinked file) shift the DYNAMIC segment by arbitrary
274 offset without any page size alignment. There is no way to
275 find out the ELF header and/or Program Headers for a limited
276 verification if it they match. One could do a verification
277 of the DYNAMIC segment. Still the found address is the best
278 one GDB could find. */
279
280 warning (_(".dynamic section for \"%s\" "
281 "is not at the expected address "
282 "(wrong library or version mismatch?)"), so->so_name);
283 }
284 }
285
286 set_addr:
287 so->lm_info->l_addr = l_addr;
288 so->lm_info->l_addr_p = 1;
289 }
290
291 return so->lm_info->l_addr;
292 }
293
294 /* Per pspace SVR4 specific data. */
295
296 struct svr4_info
297 {
298 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
299
300 /* Validity flag for debug_loader_offset. */
301 int debug_loader_offset_p;
302
303 /* Load address for the dynamic linker, inferred. */
304 CORE_ADDR debug_loader_offset;
305
306 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
307 char *debug_loader_name;
308
309 /* Load map address for the main executable. */
310 CORE_ADDR main_lm_addr;
311
312 CORE_ADDR interp_text_sect_low;
313 CORE_ADDR interp_text_sect_high;
314 CORE_ADDR interp_plt_sect_low;
315 CORE_ADDR interp_plt_sect_high;
316 };
317
318 /* Per-program-space data key. */
319 static const struct program_space_data *solib_svr4_pspace_data;
320
321 static void
322 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
323 {
324 struct svr4_info *info;
325
326 info = program_space_data (pspace, solib_svr4_pspace_data);
327 xfree (info);
328 }
329
330 /* Get the current svr4 data. If none is found yet, add it now. This
331 function always returns a valid object. */
332
333 static struct svr4_info *
334 get_svr4_info (void)
335 {
336 struct svr4_info *info;
337
338 info = program_space_data (current_program_space, solib_svr4_pspace_data);
339 if (info != NULL)
340 return info;
341
342 info = XZALLOC (struct svr4_info);
343 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
344 return info;
345 }
346
347 /* Local function prototypes */
348
349 static int match_main (const char *);
350
351 /* Read program header TYPE from inferior memory. The header is found
352 by scanning the OS auxillary vector.
353
354 If TYPE == -1, return the program headers instead of the contents of
355 one program header.
356
357 Return a pointer to allocated memory holding the program header contents,
358 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
359 size of those contents is returned to P_SECT_SIZE. Likewise, the target
360 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
361
362 static gdb_byte *
363 read_program_header (int type, int *p_sect_size, int *p_arch_size)
364 {
365 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
366 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
367 int arch_size, sect_size;
368 CORE_ADDR sect_addr;
369 gdb_byte *buf;
370 int pt_phdr_p = 0;
371
372 /* Get required auxv elements from target. */
373 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
374 return 0;
375 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
376 return 0;
377 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
378 return 0;
379 if (!at_phdr || !at_phnum)
380 return 0;
381
382 /* Determine ELF architecture type. */
383 if (at_phent == sizeof (Elf32_External_Phdr))
384 arch_size = 32;
385 else if (at_phent == sizeof (Elf64_External_Phdr))
386 arch_size = 64;
387 else
388 return 0;
389
390 /* Find the requested segment. */
391 if (type == -1)
392 {
393 sect_addr = at_phdr;
394 sect_size = at_phent * at_phnum;
395 }
396 else if (arch_size == 32)
397 {
398 Elf32_External_Phdr phdr;
399 int i;
400
401 /* Search for requested PHDR. */
402 for (i = 0; i < at_phnum; i++)
403 {
404 int p_type;
405
406 if (target_read_memory (at_phdr + i * sizeof (phdr),
407 (gdb_byte *)&phdr, sizeof (phdr)))
408 return 0;
409
410 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
411 4, byte_order);
412
413 if (p_type == PT_PHDR)
414 {
415 pt_phdr_p = 1;
416 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
417 4, byte_order);
418 }
419
420 if (p_type == type)
421 break;
422 }
423
424 if (i == at_phnum)
425 return 0;
426
427 /* Retrieve address and size. */
428 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
429 4, byte_order);
430 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
431 4, byte_order);
432 }
433 else
434 {
435 Elf64_External_Phdr phdr;
436 int i;
437
438 /* Search for requested PHDR. */
439 for (i = 0; i < at_phnum; i++)
440 {
441 int p_type;
442
443 if (target_read_memory (at_phdr + i * sizeof (phdr),
444 (gdb_byte *)&phdr, sizeof (phdr)))
445 return 0;
446
447 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
448 4, byte_order);
449
450 if (p_type == PT_PHDR)
451 {
452 pt_phdr_p = 1;
453 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
454 8, byte_order);
455 }
456
457 if (p_type == type)
458 break;
459 }
460
461 if (i == at_phnum)
462 return 0;
463
464 /* Retrieve address and size. */
465 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
466 8, byte_order);
467 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
468 8, byte_order);
469 }
470
471 /* PT_PHDR is optional, but we really need it
472 for PIE to make this work in general. */
473
474 if (pt_phdr_p)
475 {
476 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
477 Relocation offset is the difference between the two. */
478 sect_addr = sect_addr + (at_phdr - pt_phdr);
479 }
480
481 /* Read in requested program header. */
482 buf = xmalloc (sect_size);
483 if (target_read_memory (sect_addr, buf, sect_size))
484 {
485 xfree (buf);
486 return NULL;
487 }
488
489 if (p_arch_size)
490 *p_arch_size = arch_size;
491 if (p_sect_size)
492 *p_sect_size = sect_size;
493
494 return buf;
495 }
496
497
498 /* Return program interpreter string. */
499 static gdb_byte *
500 find_program_interpreter (void)
501 {
502 gdb_byte *buf = NULL;
503
504 /* If we have an exec_bfd, use its section table. */
505 if (exec_bfd
506 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
507 {
508 struct bfd_section *interp_sect;
509
510 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
511 if (interp_sect != NULL)
512 {
513 int sect_size = bfd_section_size (exec_bfd, interp_sect);
514
515 buf = xmalloc (sect_size);
516 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
517 }
518 }
519
520 /* If we didn't find it, use the target auxillary vector. */
521 if (!buf)
522 buf = read_program_header (PT_INTERP, NULL, NULL);
523
524 return buf;
525 }
526
527
528 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
529 returned and the corresponding PTR is set. */
530
531 static int
532 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
533 {
534 int arch_size, step, sect_size;
535 long dyn_tag;
536 CORE_ADDR dyn_ptr, dyn_addr;
537 gdb_byte *bufend, *bufstart, *buf;
538 Elf32_External_Dyn *x_dynp_32;
539 Elf64_External_Dyn *x_dynp_64;
540 struct bfd_section *sect;
541 struct target_section *target_section;
542
543 if (abfd == NULL)
544 return 0;
545
546 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
547 return 0;
548
549 arch_size = bfd_get_arch_size (abfd);
550 if (arch_size == -1)
551 return 0;
552
553 /* Find the start address of the .dynamic section. */
554 sect = bfd_get_section_by_name (abfd, ".dynamic");
555 if (sect == NULL)
556 return 0;
557
558 for (target_section = current_target_sections->sections;
559 target_section < current_target_sections->sections_end;
560 target_section++)
561 if (sect == target_section->the_bfd_section)
562 break;
563 if (target_section < current_target_sections->sections_end)
564 dyn_addr = target_section->addr;
565 else
566 {
567 /* ABFD may come from OBJFILE acting only as a symbol file without being
568 loaded into the target (see add_symbol_file_command). This case is
569 such fallback to the file VMA address without the possibility of
570 having the section relocated to its actual in-memory address. */
571
572 dyn_addr = bfd_section_vma (abfd, sect);
573 }
574
575 /* Read in .dynamic from the BFD. We will get the actual value
576 from memory later. */
577 sect_size = bfd_section_size (abfd, sect);
578 buf = bufstart = alloca (sect_size);
579 if (!bfd_get_section_contents (abfd, sect,
580 buf, 0, sect_size))
581 return 0;
582
583 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
584 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
585 : sizeof (Elf64_External_Dyn);
586 for (bufend = buf + sect_size;
587 buf < bufend;
588 buf += step)
589 {
590 if (arch_size == 32)
591 {
592 x_dynp_32 = (Elf32_External_Dyn *) buf;
593 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
594 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
595 }
596 else
597 {
598 x_dynp_64 = (Elf64_External_Dyn *) buf;
599 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
600 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
601 }
602 if (dyn_tag == DT_NULL)
603 return 0;
604 if (dyn_tag == dyntag)
605 {
606 /* If requested, try to read the runtime value of this .dynamic
607 entry. */
608 if (ptr)
609 {
610 struct type *ptr_type;
611 gdb_byte ptr_buf[8];
612 CORE_ADDR ptr_addr;
613
614 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
615 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
616 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
617 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
618 *ptr = dyn_ptr;
619 }
620 return 1;
621 }
622 }
623
624 return 0;
625 }
626
627 /* Scan for DYNTAG in .dynamic section of the target's main executable,
628 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
629 returned and the corresponding PTR is set. */
630
631 static int
632 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
633 {
634 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
635 int sect_size, arch_size, step;
636 long dyn_tag;
637 CORE_ADDR dyn_ptr;
638 gdb_byte *bufend, *bufstart, *buf;
639
640 /* Read in .dynamic section. */
641 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
642 if (!buf)
643 return 0;
644
645 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
646 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
647 : sizeof (Elf64_External_Dyn);
648 for (bufend = buf + sect_size;
649 buf < bufend;
650 buf += step)
651 {
652 if (arch_size == 32)
653 {
654 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
655
656 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
657 4, byte_order);
658 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
659 4, byte_order);
660 }
661 else
662 {
663 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
664
665 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
666 8, byte_order);
667 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
668 8, byte_order);
669 }
670 if (dyn_tag == DT_NULL)
671 break;
672
673 if (dyn_tag == dyntag)
674 {
675 if (ptr)
676 *ptr = dyn_ptr;
677
678 xfree (bufstart);
679 return 1;
680 }
681 }
682
683 xfree (bufstart);
684 return 0;
685 }
686
687 /* Locate the base address of dynamic linker structs for SVR4 elf
688 targets.
689
690 For SVR4 elf targets the address of the dynamic linker's runtime
691 structure is contained within the dynamic info section in the
692 executable file. The dynamic section is also mapped into the
693 inferior address space. Because the runtime loader fills in the
694 real address before starting the inferior, we have to read in the
695 dynamic info section from the inferior address space.
696 If there are any errors while trying to find the address, we
697 silently return 0, otherwise the found address is returned. */
698
699 static CORE_ADDR
700 elf_locate_base (void)
701 {
702 struct minimal_symbol *msymbol;
703 CORE_ADDR dyn_ptr;
704
705 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
706 instead of DT_DEBUG, although they sometimes contain an unused
707 DT_DEBUG. */
708 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
709 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
710 {
711 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
712 gdb_byte *pbuf;
713 int pbuf_size = TYPE_LENGTH (ptr_type);
714
715 pbuf = alloca (pbuf_size);
716 /* DT_MIPS_RLD_MAP contains a pointer to the address
717 of the dynamic link structure. */
718 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
719 return 0;
720 return extract_typed_address (pbuf, ptr_type);
721 }
722
723 /* Find DT_DEBUG. */
724 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
725 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
726 return dyn_ptr;
727
728 /* This may be a static executable. Look for the symbol
729 conventionally named _r_debug, as a last resort. */
730 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
731 if (msymbol != NULL)
732 return SYMBOL_VALUE_ADDRESS (msymbol);
733
734 /* DT_DEBUG entry not found. */
735 return 0;
736 }
737
738 /* Locate the base address of dynamic linker structs.
739
740 For both the SunOS and SVR4 shared library implementations, if the
741 inferior executable has been linked dynamically, there is a single
742 address somewhere in the inferior's data space which is the key to
743 locating all of the dynamic linker's runtime structures. This
744 address is the value of the debug base symbol. The job of this
745 function is to find and return that address, or to return 0 if there
746 is no such address (the executable is statically linked for example).
747
748 For SunOS, the job is almost trivial, since the dynamic linker and
749 all of it's structures are statically linked to the executable at
750 link time. Thus the symbol for the address we are looking for has
751 already been added to the minimal symbol table for the executable's
752 objfile at the time the symbol file's symbols were read, and all we
753 have to do is look it up there. Note that we explicitly do NOT want
754 to find the copies in the shared library.
755
756 The SVR4 version is a bit more complicated because the address
757 is contained somewhere in the dynamic info section. We have to go
758 to a lot more work to discover the address of the debug base symbol.
759 Because of this complexity, we cache the value we find and return that
760 value on subsequent invocations. Note there is no copy in the
761 executable symbol tables. */
762
763 static CORE_ADDR
764 locate_base (struct svr4_info *info)
765 {
766 /* Check to see if we have a currently valid address, and if so, avoid
767 doing all this work again and just return the cached address. If
768 we have no cached address, try to locate it in the dynamic info
769 section for ELF executables. There's no point in doing any of this
770 though if we don't have some link map offsets to work with. */
771
772 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
773 info->debug_base = elf_locate_base ();
774 return info->debug_base;
775 }
776
777 /* Find the first element in the inferior's dynamic link map, and
778 return its address in the inferior. Return zero if the address
779 could not be determined.
780
781 FIXME: Perhaps we should validate the info somehow, perhaps by
782 checking r_version for a known version number, or r_state for
783 RT_CONSISTENT. */
784
785 static CORE_ADDR
786 solib_svr4_r_map (struct svr4_info *info)
787 {
788 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
789 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
790 CORE_ADDR addr = 0;
791 volatile struct gdb_exception ex;
792
793 TRY_CATCH (ex, RETURN_MASK_ERROR)
794 {
795 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
796 ptr_type);
797 }
798 exception_print (gdb_stderr, ex);
799 return addr;
800 }
801
802 /* Find r_brk from the inferior's debug base. */
803
804 static CORE_ADDR
805 solib_svr4_r_brk (struct svr4_info *info)
806 {
807 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
808 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
809
810 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
811 ptr_type);
812 }
813
814 /* Find the link map for the dynamic linker (if it is not in the
815 normal list of loaded shared objects). */
816
817 static CORE_ADDR
818 solib_svr4_r_ldsomap (struct svr4_info *info)
819 {
820 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
821 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
822 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
823 ULONGEST version;
824
825 /* Check version, and return zero if `struct r_debug' doesn't have
826 the r_ldsomap member. */
827 version
828 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
829 lmo->r_version_size, byte_order);
830 if (version < 2 || lmo->r_ldsomap_offset == -1)
831 return 0;
832
833 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
834 ptr_type);
835 }
836
837 /* On Solaris systems with some versions of the dynamic linker,
838 ld.so's l_name pointer points to the SONAME in the string table
839 rather than into writable memory. So that GDB can find shared
840 libraries when loading a core file generated by gcore, ensure that
841 memory areas containing the l_name string are saved in the core
842 file. */
843
844 static int
845 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
846 {
847 struct svr4_info *info;
848 CORE_ADDR ldsomap;
849 struct so_list *new;
850 struct cleanup *old_chain;
851 struct link_map_offsets *lmo;
852 CORE_ADDR name_lm;
853
854 info = get_svr4_info ();
855
856 info->debug_base = 0;
857 locate_base (info);
858 if (!info->debug_base)
859 return 0;
860
861 ldsomap = solib_svr4_r_ldsomap (info);
862 if (!ldsomap)
863 return 0;
864
865 lmo = svr4_fetch_link_map_offsets ();
866 new = XZALLOC (struct so_list);
867 old_chain = make_cleanup (xfree, new);
868 new->lm_info = lm_info_read (ldsomap);
869 make_cleanup (xfree, new->lm_info);
870 name_lm = new->lm_info ? new->lm_info->l_name : 0;
871 do_cleanups (old_chain);
872
873 return (name_lm >= vaddr && name_lm < vaddr + size);
874 }
875
876 /* Implement the "open_symbol_file_object" target_so_ops method.
877
878 If no open symbol file, attempt to locate and open the main symbol
879 file. On SVR4 systems, this is the first link map entry. If its
880 name is here, we can open it. Useful when attaching to a process
881 without first loading its symbol file. */
882
883 static int
884 open_symbol_file_object (void *from_ttyp)
885 {
886 CORE_ADDR lm, l_name;
887 char *filename;
888 int errcode;
889 int from_tty = *(int *)from_ttyp;
890 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
891 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
892 int l_name_size = TYPE_LENGTH (ptr_type);
893 gdb_byte *l_name_buf = xmalloc (l_name_size);
894 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
895 struct svr4_info *info = get_svr4_info ();
896
897 if (symfile_objfile)
898 if (!query (_("Attempt to reload symbols from process? ")))
899 {
900 do_cleanups (cleanups);
901 return 0;
902 }
903
904 /* Always locate the debug struct, in case it has moved. */
905 info->debug_base = 0;
906 if (locate_base (info) == 0)
907 {
908 do_cleanups (cleanups);
909 return 0; /* failed somehow... */
910 }
911
912 /* First link map member should be the executable. */
913 lm = solib_svr4_r_map (info);
914 if (lm == 0)
915 {
916 do_cleanups (cleanups);
917 return 0; /* failed somehow... */
918 }
919
920 /* Read address of name from target memory to GDB. */
921 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
922
923 /* Convert the address to host format. */
924 l_name = extract_typed_address (l_name_buf, ptr_type);
925
926 if (l_name == 0)
927 {
928 do_cleanups (cleanups);
929 return 0; /* No filename. */
930 }
931
932 /* Now fetch the filename from target memory. */
933 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
934 make_cleanup (xfree, filename);
935
936 if (errcode)
937 {
938 warning (_("failed to read exec filename from attached file: %s"),
939 safe_strerror (errcode));
940 do_cleanups (cleanups);
941 return 0;
942 }
943
944 /* Have a pathname: read the symbol file. */
945 symbol_file_add_main (filename, from_tty);
946
947 do_cleanups (cleanups);
948 return 1;
949 }
950
951 /* Data exchange structure for the XML parser as returned by
952 svr4_current_sos_via_xfer_libraries. */
953
954 struct svr4_library_list
955 {
956 struct so_list *head, **tailp;
957
958 /* Inferior address of struct link_map used for the main executable. It is
959 NULL if not known. */
960 CORE_ADDR main_lm;
961 };
962
963 /* Implementation for target_so_ops.free_so. */
964
965 static void
966 svr4_free_so (struct so_list *so)
967 {
968 xfree (so->lm_info);
969 }
970
971 /* Free so_list built so far (called via cleanup). */
972
973 static void
974 svr4_free_library_list (void *p_list)
975 {
976 struct so_list *list = *(struct so_list **) p_list;
977
978 while (list != NULL)
979 {
980 struct so_list *next = list->next;
981
982 free_so (list);
983 list = next;
984 }
985 }
986
987 #ifdef HAVE_LIBEXPAT
988
989 #include "xml-support.h"
990
991 /* Handle the start of a <library> element. Note: new elements are added
992 at the tail of the list, keeping the list in order. */
993
994 static void
995 library_list_start_library (struct gdb_xml_parser *parser,
996 const struct gdb_xml_element *element,
997 void *user_data, VEC(gdb_xml_value_s) *attributes)
998 {
999 struct svr4_library_list *list = user_data;
1000 const char *name = xml_find_attribute (attributes, "name")->value;
1001 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1002 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1003 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1004 struct so_list *new_elem;
1005
1006 new_elem = XZALLOC (struct so_list);
1007 new_elem->lm_info = XZALLOC (struct lm_info);
1008 new_elem->lm_info->lm_addr = *lmp;
1009 new_elem->lm_info->l_addr_inferior = *l_addrp;
1010 new_elem->lm_info->l_ld = *l_ldp;
1011
1012 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1013 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1014 strcpy (new_elem->so_original_name, new_elem->so_name);
1015
1016 *list->tailp = new_elem;
1017 list->tailp = &new_elem->next;
1018 }
1019
1020 /* Handle the start of a <library-list-svr4> element. */
1021
1022 static void
1023 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1024 const struct gdb_xml_element *element,
1025 void *user_data, VEC(gdb_xml_value_s) *attributes)
1026 {
1027 struct svr4_library_list *list = user_data;
1028 const char *version = xml_find_attribute (attributes, "version")->value;
1029 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1030
1031 if (strcmp (version, "1.0") != 0)
1032 gdb_xml_error (parser,
1033 _("SVR4 Library list has unsupported version \"%s\""),
1034 version);
1035
1036 if (main_lm)
1037 list->main_lm = *(ULONGEST *) main_lm->value;
1038 }
1039
1040 /* The allowed elements and attributes for an XML library list.
1041 The root element is a <library-list>. */
1042
1043 static const struct gdb_xml_attribute svr4_library_attributes[] =
1044 {
1045 { "name", GDB_XML_AF_NONE, NULL, NULL },
1046 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1047 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1048 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1049 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1050 };
1051
1052 static const struct gdb_xml_element svr4_library_list_children[] =
1053 {
1054 {
1055 "library", svr4_library_attributes, NULL,
1056 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1057 library_list_start_library, NULL
1058 },
1059 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1060 };
1061
1062 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1063 {
1064 { "version", GDB_XML_AF_NONE, NULL, NULL },
1065 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1066 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1067 };
1068
1069 static const struct gdb_xml_element svr4_library_list_elements[] =
1070 {
1071 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1072 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1073 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1074 };
1075
1076 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1077
1078 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1079 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1080 empty, caller is responsible for freeing all its entries. */
1081
1082 static int
1083 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1084 {
1085 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1086 &list->head);
1087
1088 memset (list, 0, sizeof (*list));
1089 list->tailp = &list->head;
1090 if (gdb_xml_parse_quick (_("target library list"), "library-list.dtd",
1091 svr4_library_list_elements, document, list) == 0)
1092 {
1093 /* Parsed successfully, keep the result. */
1094 discard_cleanups (back_to);
1095 return 1;
1096 }
1097
1098 do_cleanups (back_to);
1099 return 0;
1100 }
1101
1102 /* Attempt to get so_list from target via qXfer:libraries:read packet.
1103
1104 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1105 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1106 empty, caller is responsible for freeing all its entries. */
1107
1108 static int
1109 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1110 {
1111 char *svr4_library_document;
1112 int result;
1113 struct cleanup *back_to;
1114
1115 /* Fetch the list of shared libraries. */
1116 svr4_library_document = target_read_stralloc (&current_target,
1117 TARGET_OBJECT_LIBRARIES_SVR4,
1118 NULL);
1119 if (svr4_library_document == NULL)
1120 return 0;
1121
1122 back_to = make_cleanup (xfree, svr4_library_document);
1123 result = svr4_parse_libraries (svr4_library_document, list);
1124 do_cleanups (back_to);
1125
1126 return result;
1127 }
1128
1129 #else
1130
1131 static int
1132 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1133 {
1134 return 0;
1135 }
1136
1137 #endif
1138
1139 /* If no shared library information is available from the dynamic
1140 linker, build a fallback list from other sources. */
1141
1142 static struct so_list *
1143 svr4_default_sos (void)
1144 {
1145 struct svr4_info *info = get_svr4_info ();
1146 struct so_list *new;
1147
1148 if (!info->debug_loader_offset_p)
1149 return NULL;
1150
1151 new = XZALLOC (struct so_list);
1152
1153 new->lm_info = xzalloc (sizeof (struct lm_info));
1154
1155 /* Nothing will ever check the other fields if we set l_addr_p. */
1156 new->lm_info->l_addr = info->debug_loader_offset;
1157 new->lm_info->l_addr_p = 1;
1158
1159 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1160 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1161 strcpy (new->so_original_name, new->so_name);
1162
1163 return new;
1164 }
1165
1166 /* Read the whole inferior libraries chain starting at address LM. Add the
1167 entries to the tail referenced by LINK_PTR_PTR. Ignore the first entry if
1168 IGNORE_FIRST and set global MAIN_LM_ADDR according to it. */
1169
1170 static void
1171 svr4_read_so_list (CORE_ADDR lm, struct so_list ***link_ptr_ptr,
1172 int ignore_first)
1173 {
1174 CORE_ADDR prev_lm = 0, next_lm;
1175
1176 for (; lm != 0; prev_lm = lm, lm = next_lm)
1177 {
1178 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1179 struct so_list *new;
1180 struct cleanup *old_chain;
1181 int errcode;
1182 char *buffer;
1183
1184 new = XZALLOC (struct so_list);
1185 old_chain = make_cleanup_free_so (new);
1186
1187 new->lm_info = lm_info_read (lm);
1188 if (new->lm_info == NULL)
1189 {
1190 do_cleanups (old_chain);
1191 break;
1192 }
1193
1194 next_lm = new->lm_info->l_next;
1195
1196 if (new->lm_info->l_prev != prev_lm)
1197 {
1198 warning (_("Corrupted shared library list: %s != %s"),
1199 paddress (target_gdbarch (), prev_lm),
1200 paddress (target_gdbarch (), new->lm_info->l_prev));
1201 do_cleanups (old_chain);
1202 break;
1203 }
1204
1205 /* For SVR4 versions, the first entry in the link map is for the
1206 inferior executable, so we must ignore it. For some versions of
1207 SVR4, it has no name. For others (Solaris 2.3 for example), it
1208 does have a name, so we can no longer use a missing name to
1209 decide when to ignore it. */
1210 if (ignore_first && new->lm_info->l_prev == 0)
1211 {
1212 struct svr4_info *info = get_svr4_info ();
1213
1214 info->main_lm_addr = new->lm_info->lm_addr;
1215 do_cleanups (old_chain);
1216 continue;
1217 }
1218
1219 /* Extract this shared object's name. */
1220 target_read_string (new->lm_info->l_name, &buffer,
1221 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1222 if (errcode != 0)
1223 {
1224 warning (_("Can't read pathname for load map: %s."),
1225 safe_strerror (errcode));
1226 do_cleanups (old_chain);
1227 continue;
1228 }
1229
1230 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1231 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1232 strcpy (new->so_original_name, new->so_name);
1233 xfree (buffer);
1234
1235 /* If this entry has no name, or its name matches the name
1236 for the main executable, don't include it in the list. */
1237 if (! new->so_name[0] || match_main (new->so_name))
1238 {
1239 do_cleanups (old_chain);
1240 continue;
1241 }
1242
1243 discard_cleanups (old_chain);
1244 new->next = 0;
1245 **link_ptr_ptr = new;
1246 *link_ptr_ptr = &new->next;
1247 }
1248 }
1249
1250 /* Implement the "current_sos" target_so_ops method. */
1251
1252 static struct so_list *
1253 svr4_current_sos (void)
1254 {
1255 CORE_ADDR lm;
1256 struct so_list *head = NULL;
1257 struct so_list **link_ptr = &head;
1258 struct svr4_info *info;
1259 struct cleanup *back_to;
1260 int ignore_first;
1261 struct svr4_library_list library_list;
1262
1263 /* Fall back to manual examination of the target if the packet is not
1264 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1265 tests a case where gdbserver cannot find the shared libraries list while
1266 GDB itself is able to find it via SYMFILE_OBJFILE.
1267
1268 Unfortunately statically linked inferiors will also fall back through this
1269 suboptimal code path. */
1270
1271 if (svr4_current_sos_via_xfer_libraries (&library_list))
1272 {
1273 if (library_list.main_lm)
1274 {
1275 info = get_svr4_info ();
1276 info->main_lm_addr = library_list.main_lm;
1277 }
1278
1279 return library_list.head ? library_list.head : svr4_default_sos ();
1280 }
1281
1282 info = get_svr4_info ();
1283
1284 /* Always locate the debug struct, in case it has moved. */
1285 info->debug_base = 0;
1286 locate_base (info);
1287
1288 /* If we can't find the dynamic linker's base structure, this
1289 must not be a dynamically linked executable. Hmm. */
1290 if (! info->debug_base)
1291 return svr4_default_sos ();
1292
1293 /* Assume that everything is a library if the dynamic loader was loaded
1294 late by a static executable. */
1295 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1296 ignore_first = 0;
1297 else
1298 ignore_first = 1;
1299
1300 back_to = make_cleanup (svr4_free_library_list, &head);
1301
1302 /* Walk the inferior's link map list, and build our list of
1303 `struct so_list' nodes. */
1304 lm = solib_svr4_r_map (info);
1305 if (lm)
1306 svr4_read_so_list (lm, &link_ptr, ignore_first);
1307
1308 /* On Solaris, the dynamic linker is not in the normal list of
1309 shared objects, so make sure we pick it up too. Having
1310 symbol information for the dynamic linker is quite crucial
1311 for skipping dynamic linker resolver code. */
1312 lm = solib_svr4_r_ldsomap (info);
1313 if (lm)
1314 svr4_read_so_list (lm, &link_ptr, 0);
1315
1316 discard_cleanups (back_to);
1317
1318 if (head == NULL)
1319 return svr4_default_sos ();
1320
1321 return head;
1322 }
1323
1324 /* Get the address of the link_map for a given OBJFILE. */
1325
1326 CORE_ADDR
1327 svr4_fetch_objfile_link_map (struct objfile *objfile)
1328 {
1329 struct so_list *so;
1330 struct svr4_info *info = get_svr4_info ();
1331
1332 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1333 if (info->main_lm_addr == 0)
1334 solib_add (NULL, 0, &current_target, auto_solib_add);
1335
1336 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1337 if (objfile == symfile_objfile)
1338 return info->main_lm_addr;
1339
1340 /* The other link map addresses may be found by examining the list
1341 of shared libraries. */
1342 for (so = master_so_list (); so; so = so->next)
1343 if (so->objfile == objfile)
1344 return so->lm_info->lm_addr;
1345
1346 /* Not found! */
1347 return 0;
1348 }
1349
1350 /* On some systems, the only way to recognize the link map entry for
1351 the main executable file is by looking at its name. Return
1352 non-zero iff SONAME matches one of the known main executable names. */
1353
1354 static int
1355 match_main (const char *soname)
1356 {
1357 const char * const *mainp;
1358
1359 for (mainp = main_name_list; *mainp != NULL; mainp++)
1360 {
1361 if (strcmp (soname, *mainp) == 0)
1362 return (1);
1363 }
1364
1365 return (0);
1366 }
1367
1368 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1369 SVR4 run time loader. */
1370
1371 int
1372 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1373 {
1374 struct svr4_info *info = get_svr4_info ();
1375
1376 return ((pc >= info->interp_text_sect_low
1377 && pc < info->interp_text_sect_high)
1378 || (pc >= info->interp_plt_sect_low
1379 && pc < info->interp_plt_sect_high)
1380 || in_plt_section (pc, NULL)
1381 || in_gnu_ifunc_stub (pc));
1382 }
1383
1384 /* Given an executable's ABFD and target, compute the entry-point
1385 address. */
1386
1387 static CORE_ADDR
1388 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1389 {
1390 CORE_ADDR addr;
1391
1392 /* KevinB wrote ... for most targets, the address returned by
1393 bfd_get_start_address() is the entry point for the start
1394 function. But, for some targets, bfd_get_start_address() returns
1395 the address of a function descriptor from which the entry point
1396 address may be extracted. This address is extracted by
1397 gdbarch_convert_from_func_ptr_addr(). The method
1398 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1399 function for targets which don't use function descriptors. */
1400 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1401 bfd_get_start_address (abfd),
1402 targ);
1403 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1404 }
1405
1406 /* Helper function for gdb_bfd_lookup_symbol. */
1407
1408 static int
1409 cmp_name_and_sec_flags (asymbol *sym, void *data)
1410 {
1411 return (strcmp (sym->name, (const char *) data) == 0
1412 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
1413 }
1414 /* Arrange for dynamic linker to hit breakpoint.
1415
1416 Both the SunOS and the SVR4 dynamic linkers have, as part of their
1417 debugger interface, support for arranging for the inferior to hit
1418 a breakpoint after mapping in the shared libraries. This function
1419 enables that breakpoint.
1420
1421 For SunOS, there is a special flag location (in_debugger) which we
1422 set to 1. When the dynamic linker sees this flag set, it will set
1423 a breakpoint at a location known only to itself, after saving the
1424 original contents of that place and the breakpoint address itself,
1425 in it's own internal structures. When we resume the inferior, it
1426 will eventually take a SIGTRAP when it runs into the breakpoint.
1427 We handle this (in a different place) by restoring the contents of
1428 the breakpointed location (which is only known after it stops),
1429 chasing around to locate the shared libraries that have been
1430 loaded, then resuming.
1431
1432 For SVR4, the debugger interface structure contains a member (r_brk)
1433 which is statically initialized at the time the shared library is
1434 built, to the offset of a function (_r_debug_state) which is guaran-
1435 teed to be called once before mapping in a library, and again when
1436 the mapping is complete. At the time we are examining this member,
1437 it contains only the unrelocated offset of the function, so we have
1438 to do our own relocation. Later, when the dynamic linker actually
1439 runs, it relocates r_brk to be the actual address of _r_debug_state().
1440
1441 The debugger interface structure also contains an enumeration which
1442 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1443 depending upon whether or not the library is being mapped or unmapped,
1444 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
1445
1446 static int
1447 enable_break (struct svr4_info *info, int from_tty)
1448 {
1449 struct minimal_symbol *msymbol;
1450 const char * const *bkpt_namep;
1451 asection *interp_sect;
1452 gdb_byte *interp_name;
1453 CORE_ADDR sym_addr;
1454
1455 info->interp_text_sect_low = info->interp_text_sect_high = 0;
1456 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1457
1458 /* If we already have a shared library list in the target, and
1459 r_debug contains r_brk, set the breakpoint there - this should
1460 mean r_brk has already been relocated. Assume the dynamic linker
1461 is the object containing r_brk. */
1462
1463 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1464 sym_addr = 0;
1465 if (info->debug_base && solib_svr4_r_map (info) != 0)
1466 sym_addr = solib_svr4_r_brk (info);
1467
1468 if (sym_addr != 0)
1469 {
1470 struct obj_section *os;
1471
1472 sym_addr = gdbarch_addr_bits_remove
1473 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1474 sym_addr,
1475 &current_target));
1476
1477 /* On at least some versions of Solaris there's a dynamic relocation
1478 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1479 we get control before the dynamic linker has self-relocated.
1480 Check if SYM_ADDR is in a known section, if it is assume we can
1481 trust its value. This is just a heuristic though, it could go away
1482 or be replaced if it's getting in the way.
1483
1484 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1485 however it's spelled in your particular system) is ARM or Thumb.
1486 That knowledge is encoded in the address, if it's Thumb the low bit
1487 is 1. However, we've stripped that info above and it's not clear
1488 what all the consequences are of passing a non-addr_bits_remove'd
1489 address to create_solib_event_breakpoint. The call to
1490 find_pc_section verifies we know about the address and have some
1491 hope of computing the right kind of breakpoint to use (via
1492 symbol info). It does mean that GDB needs to be pointed at a
1493 non-stripped version of the dynamic linker in order to obtain
1494 information it already knows about. Sigh. */
1495
1496 os = find_pc_section (sym_addr);
1497 if (os != NULL)
1498 {
1499 /* Record the relocated start and end address of the dynamic linker
1500 text and plt section for svr4_in_dynsym_resolve_code. */
1501 bfd *tmp_bfd;
1502 CORE_ADDR load_addr;
1503
1504 tmp_bfd = os->objfile->obfd;
1505 load_addr = ANOFFSET (os->objfile->section_offsets,
1506 SECT_OFF_TEXT (os->objfile));
1507
1508 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1509 if (interp_sect)
1510 {
1511 info->interp_text_sect_low =
1512 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1513 info->interp_text_sect_high =
1514 info->interp_text_sect_low
1515 + bfd_section_size (tmp_bfd, interp_sect);
1516 }
1517 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1518 if (interp_sect)
1519 {
1520 info->interp_plt_sect_low =
1521 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1522 info->interp_plt_sect_high =
1523 info->interp_plt_sect_low
1524 + bfd_section_size (tmp_bfd, interp_sect);
1525 }
1526
1527 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1528 return 1;
1529 }
1530 }
1531
1532 /* Find the program interpreter; if not found, warn the user and drop
1533 into the old breakpoint at symbol code. */
1534 interp_name = find_program_interpreter ();
1535 if (interp_name)
1536 {
1537 CORE_ADDR load_addr = 0;
1538 int load_addr_found = 0;
1539 int loader_found_in_list = 0;
1540 struct so_list *so;
1541 bfd *tmp_bfd = NULL;
1542 struct target_ops *tmp_bfd_target;
1543 volatile struct gdb_exception ex;
1544
1545 sym_addr = 0;
1546
1547 /* Now we need to figure out where the dynamic linker was
1548 loaded so that we can load its symbols and place a breakpoint
1549 in the dynamic linker itself.
1550
1551 This address is stored on the stack. However, I've been unable
1552 to find any magic formula to find it for Solaris (appears to
1553 be trivial on GNU/Linux). Therefore, we have to try an alternate
1554 mechanism to find the dynamic linker's base address. */
1555
1556 TRY_CATCH (ex, RETURN_MASK_ALL)
1557 {
1558 tmp_bfd = solib_bfd_open (interp_name);
1559 }
1560 if (tmp_bfd == NULL)
1561 goto bkpt_at_symbol;
1562
1563 /* Now convert the TMP_BFD into a target. That way target, as
1564 well as BFD operations can be used. */
1565 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1566 /* target_bfd_reopen acquired its own reference, so we can
1567 release ours now. */
1568 gdb_bfd_unref (tmp_bfd);
1569
1570 /* On a running target, we can get the dynamic linker's base
1571 address from the shared library table. */
1572 so = master_so_list ();
1573 while (so)
1574 {
1575 if (svr4_same_1 (interp_name, so->so_original_name))
1576 {
1577 load_addr_found = 1;
1578 loader_found_in_list = 1;
1579 load_addr = lm_addr_check (so, tmp_bfd);
1580 break;
1581 }
1582 so = so->next;
1583 }
1584
1585 /* If we were not able to find the base address of the loader
1586 from our so_list, then try using the AT_BASE auxilliary entry. */
1587 if (!load_addr_found)
1588 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1589 {
1590 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
1591
1592 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1593 that `+ load_addr' will overflow CORE_ADDR width not creating
1594 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1595 GDB. */
1596
1597 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1598 {
1599 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1600 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1601 tmp_bfd_target);
1602
1603 gdb_assert (load_addr < space_size);
1604
1605 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1606 64bit ld.so with 32bit executable, it should not happen. */
1607
1608 if (tmp_entry_point < space_size
1609 && tmp_entry_point + load_addr >= space_size)
1610 load_addr -= space_size;
1611 }
1612
1613 load_addr_found = 1;
1614 }
1615
1616 /* Otherwise we find the dynamic linker's base address by examining
1617 the current pc (which should point at the entry point for the
1618 dynamic linker) and subtracting the offset of the entry point.
1619
1620 This is more fragile than the previous approaches, but is a good
1621 fallback method because it has actually been working well in
1622 most cases. */
1623 if (!load_addr_found)
1624 {
1625 struct regcache *regcache
1626 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
1627
1628 load_addr = (regcache_read_pc (regcache)
1629 - exec_entry_point (tmp_bfd, tmp_bfd_target));
1630 }
1631
1632 if (!loader_found_in_list)
1633 {
1634 info->debug_loader_name = xstrdup (interp_name);
1635 info->debug_loader_offset_p = 1;
1636 info->debug_loader_offset = load_addr;
1637 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1638 }
1639
1640 /* Record the relocated start and end address of the dynamic linker
1641 text and plt section for svr4_in_dynsym_resolve_code. */
1642 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1643 if (interp_sect)
1644 {
1645 info->interp_text_sect_low =
1646 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1647 info->interp_text_sect_high =
1648 info->interp_text_sect_low
1649 + bfd_section_size (tmp_bfd, interp_sect);
1650 }
1651 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1652 if (interp_sect)
1653 {
1654 info->interp_plt_sect_low =
1655 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1656 info->interp_plt_sect_high =
1657 info->interp_plt_sect_low
1658 + bfd_section_size (tmp_bfd, interp_sect);
1659 }
1660
1661 /* Now try to set a breakpoint in the dynamic linker. */
1662 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1663 {
1664 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
1665 (void *) *bkpt_namep);
1666 if (sym_addr != 0)
1667 break;
1668 }
1669
1670 if (sym_addr != 0)
1671 /* Convert 'sym_addr' from a function pointer to an address.
1672 Because we pass tmp_bfd_target instead of the current
1673 target, this will always produce an unrelocated value. */
1674 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1675 sym_addr,
1676 tmp_bfd_target);
1677
1678 /* We're done with both the temporary bfd and target. Closing
1679 the target closes the underlying bfd, because it holds the
1680 only remaining reference. */
1681 target_close (tmp_bfd_target, 0);
1682
1683 if (sym_addr != 0)
1684 {
1685 create_solib_event_breakpoint (target_gdbarch (), load_addr + sym_addr);
1686 xfree (interp_name);
1687 return 1;
1688 }
1689
1690 /* For whatever reason we couldn't set a breakpoint in the dynamic
1691 linker. Warn and drop into the old code. */
1692 bkpt_at_symbol:
1693 xfree (interp_name);
1694 warning (_("Unable to find dynamic linker breakpoint function.\n"
1695 "GDB will be unable to debug shared library initializers\n"
1696 "and track explicitly loaded dynamic code."));
1697 }
1698
1699 /* Scan through the lists of symbols, trying to look up the symbol and
1700 set a breakpoint there. Terminate loop when we/if we succeed. */
1701
1702 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1703 {
1704 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1705 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1706 {
1707 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1708 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1709 sym_addr,
1710 &current_target);
1711 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1712 return 1;
1713 }
1714 }
1715
1716 if (interp_name != NULL && !current_inferior ()->attach_flag)
1717 {
1718 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1719 {
1720 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1721 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1722 {
1723 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1724 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1725 sym_addr,
1726 &current_target);
1727 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1728 return 1;
1729 }
1730 }
1731 }
1732 return 0;
1733 }
1734
1735 /* Implement the "special_symbol_handling" target_so_ops method. */
1736
1737 static void
1738 svr4_special_symbol_handling (void)
1739 {
1740 /* Nothing to do. */
1741 }
1742
1743 /* Read the ELF program headers from ABFD. Return the contents and
1744 set *PHDRS_SIZE to the size of the program headers. */
1745
1746 static gdb_byte *
1747 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1748 {
1749 Elf_Internal_Ehdr *ehdr;
1750 gdb_byte *buf;
1751
1752 ehdr = elf_elfheader (abfd);
1753
1754 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1755 if (*phdrs_size == 0)
1756 return NULL;
1757
1758 buf = xmalloc (*phdrs_size);
1759 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1760 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1761 {
1762 xfree (buf);
1763 return NULL;
1764 }
1765
1766 return buf;
1767 }
1768
1769 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1770 exec_bfd. Otherwise return 0.
1771
1772 We relocate all of the sections by the same amount. This
1773 behavior is mandated by recent editions of the System V ABI.
1774 According to the System V Application Binary Interface,
1775 Edition 4.1, page 5-5:
1776
1777 ... Though the system chooses virtual addresses for
1778 individual processes, it maintains the segments' relative
1779 positions. Because position-independent code uses relative
1780 addressesing between segments, the difference between
1781 virtual addresses in memory must match the difference
1782 between virtual addresses in the file. The difference
1783 between the virtual address of any segment in memory and
1784 the corresponding virtual address in the file is thus a
1785 single constant value for any one executable or shared
1786 object in a given process. This difference is the base
1787 address. One use of the base address is to relocate the
1788 memory image of the program during dynamic linking.
1789
1790 The same language also appears in Edition 4.0 of the System V
1791 ABI and is left unspecified in some of the earlier editions.
1792
1793 Decide if the objfile needs to be relocated. As indicated above, we will
1794 only be here when execution is stopped. But during attachment PC can be at
1795 arbitrary address therefore regcache_read_pc can be misleading (contrary to
1796 the auxv AT_ENTRY value). Moreover for executable with interpreter section
1797 regcache_read_pc would point to the interpreter and not the main executable.
1798
1799 So, to summarize, relocations are necessary when the start address obtained
1800 from the executable is different from the address in auxv AT_ENTRY entry.
1801
1802 [ The astute reader will note that we also test to make sure that
1803 the executable in question has the DYNAMIC flag set. It is my
1804 opinion that this test is unnecessary (undesirable even). It
1805 was added to avoid inadvertent relocation of an executable
1806 whose e_type member in the ELF header is not ET_DYN. There may
1807 be a time in the future when it is desirable to do relocations
1808 on other types of files as well in which case this condition
1809 should either be removed or modified to accomodate the new file
1810 type. - Kevin, Nov 2000. ] */
1811
1812 static int
1813 svr4_exec_displacement (CORE_ADDR *displacementp)
1814 {
1815 /* ENTRY_POINT is a possible function descriptor - before
1816 a call to gdbarch_convert_from_func_ptr_addr. */
1817 CORE_ADDR entry_point, displacement;
1818
1819 if (exec_bfd == NULL)
1820 return 0;
1821
1822 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
1823 being executed themselves and PIE (Position Independent Executable)
1824 executables are ET_DYN. */
1825
1826 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1827 return 0;
1828
1829 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1830 return 0;
1831
1832 displacement = entry_point - bfd_get_start_address (exec_bfd);
1833
1834 /* Verify the DISPLACEMENT candidate complies with the required page
1835 alignment. It is cheaper than the program headers comparison below. */
1836
1837 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1838 {
1839 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1840
1841 /* p_align of PT_LOAD segments does not specify any alignment but
1842 only congruency of addresses:
1843 p_offset % p_align == p_vaddr % p_align
1844 Kernel is free to load the executable with lower alignment. */
1845
1846 if ((displacement & (elf->minpagesize - 1)) != 0)
1847 return 0;
1848 }
1849
1850 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1851 comparing their program headers. If the program headers in the auxilliary
1852 vector do not match the program headers in the executable, then we are
1853 looking at a different file than the one used by the kernel - for
1854 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
1855
1856 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1857 {
1858 /* Be optimistic and clear OK only if GDB was able to verify the headers
1859 really do not match. */
1860 int phdrs_size, phdrs2_size, ok = 1;
1861 gdb_byte *buf, *buf2;
1862 int arch_size;
1863
1864 buf = read_program_header (-1, &phdrs_size, &arch_size);
1865 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1866 if (buf != NULL && buf2 != NULL)
1867 {
1868 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
1869
1870 /* We are dealing with three different addresses. EXEC_BFD
1871 represents current address in on-disk file. target memory content
1872 may be different from EXEC_BFD as the file may have been prelinked
1873 to a different address after the executable has been loaded.
1874 Moreover the address of placement in target memory can be
1875 different from what the program headers in target memory say -
1876 this is the goal of PIE.
1877
1878 Detected DISPLACEMENT covers both the offsets of PIE placement and
1879 possible new prelink performed after start of the program. Here
1880 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1881 content offset for the verification purpose. */
1882
1883 if (phdrs_size != phdrs2_size
1884 || bfd_get_arch_size (exec_bfd) != arch_size)
1885 ok = 0;
1886 else if (arch_size == 32
1887 && phdrs_size >= sizeof (Elf32_External_Phdr)
1888 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1889 {
1890 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1891 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1892 CORE_ADDR displacement = 0;
1893 int i;
1894
1895 /* DISPLACEMENT could be found more easily by the difference of
1896 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1897 already have enough information to compute that displacement
1898 with what we've read. */
1899
1900 for (i = 0; i < ehdr2->e_phnum; i++)
1901 if (phdr2[i].p_type == PT_LOAD)
1902 {
1903 Elf32_External_Phdr *phdrp;
1904 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1905 CORE_ADDR vaddr, paddr;
1906 CORE_ADDR displacement_vaddr = 0;
1907 CORE_ADDR displacement_paddr = 0;
1908
1909 phdrp = &((Elf32_External_Phdr *) buf)[i];
1910 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1911 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1912
1913 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1914 byte_order);
1915 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1916
1917 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1918 byte_order);
1919 displacement_paddr = paddr - phdr2[i].p_paddr;
1920
1921 if (displacement_vaddr == displacement_paddr)
1922 displacement = displacement_vaddr;
1923
1924 break;
1925 }
1926
1927 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1928
1929 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1930 {
1931 Elf32_External_Phdr *phdrp;
1932 Elf32_External_Phdr *phdr2p;
1933 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1934 CORE_ADDR vaddr, paddr;
1935 asection *plt2_asect;
1936
1937 phdrp = &((Elf32_External_Phdr *) buf)[i];
1938 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1939 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1940 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1941
1942 /* PT_GNU_STACK is an exception by being never relocated by
1943 prelink as its addresses are always zero. */
1944
1945 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1946 continue;
1947
1948 /* Check also other adjustment combinations - PR 11786. */
1949
1950 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1951 byte_order);
1952 vaddr -= displacement;
1953 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1954
1955 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1956 byte_order);
1957 paddr -= displacement;
1958 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1959
1960 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1961 continue;
1962
1963 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
1964 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1965 if (plt2_asect)
1966 {
1967 int content2;
1968 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1969 CORE_ADDR filesz;
1970
1971 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1972 & SEC_HAS_CONTENTS) != 0;
1973
1974 filesz = extract_unsigned_integer (buf_filesz_p, 4,
1975 byte_order);
1976
1977 /* PLT2_ASECT is from on-disk file (exec_bfd) while
1978 FILESZ is from the in-memory image. */
1979 if (content2)
1980 filesz += bfd_get_section_size (plt2_asect);
1981 else
1982 filesz -= bfd_get_section_size (plt2_asect);
1983
1984 store_unsigned_integer (buf_filesz_p, 4, byte_order,
1985 filesz);
1986
1987 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1988 continue;
1989 }
1990
1991 ok = 0;
1992 break;
1993 }
1994 }
1995 else if (arch_size == 64
1996 && phdrs_size >= sizeof (Elf64_External_Phdr)
1997 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
1998 {
1999 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2000 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2001 CORE_ADDR displacement = 0;
2002 int i;
2003
2004 /* DISPLACEMENT could be found more easily by the difference of
2005 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2006 already have enough information to compute that displacement
2007 with what we've read. */
2008
2009 for (i = 0; i < ehdr2->e_phnum; i++)
2010 if (phdr2[i].p_type == PT_LOAD)
2011 {
2012 Elf64_External_Phdr *phdrp;
2013 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2014 CORE_ADDR vaddr, paddr;
2015 CORE_ADDR displacement_vaddr = 0;
2016 CORE_ADDR displacement_paddr = 0;
2017
2018 phdrp = &((Elf64_External_Phdr *) buf)[i];
2019 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2020 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2021
2022 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2023 byte_order);
2024 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2025
2026 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2027 byte_order);
2028 displacement_paddr = paddr - phdr2[i].p_paddr;
2029
2030 if (displacement_vaddr == displacement_paddr)
2031 displacement = displacement_vaddr;
2032
2033 break;
2034 }
2035
2036 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2037
2038 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2039 {
2040 Elf64_External_Phdr *phdrp;
2041 Elf64_External_Phdr *phdr2p;
2042 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2043 CORE_ADDR vaddr, paddr;
2044 asection *plt2_asect;
2045
2046 phdrp = &((Elf64_External_Phdr *) buf)[i];
2047 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2048 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2049 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2050
2051 /* PT_GNU_STACK is an exception by being never relocated by
2052 prelink as its addresses are always zero. */
2053
2054 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2055 continue;
2056
2057 /* Check also other adjustment combinations - PR 11786. */
2058
2059 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2060 byte_order);
2061 vaddr -= displacement;
2062 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2063
2064 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2065 byte_order);
2066 paddr -= displacement;
2067 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2068
2069 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2070 continue;
2071
2072 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2073 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2074 if (plt2_asect)
2075 {
2076 int content2;
2077 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2078 CORE_ADDR filesz;
2079
2080 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2081 & SEC_HAS_CONTENTS) != 0;
2082
2083 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2084 byte_order);
2085
2086 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2087 FILESZ is from the in-memory image. */
2088 if (content2)
2089 filesz += bfd_get_section_size (plt2_asect);
2090 else
2091 filesz -= bfd_get_section_size (plt2_asect);
2092
2093 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2094 filesz);
2095
2096 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2097 continue;
2098 }
2099
2100 ok = 0;
2101 break;
2102 }
2103 }
2104 else
2105 ok = 0;
2106 }
2107
2108 xfree (buf);
2109 xfree (buf2);
2110
2111 if (!ok)
2112 return 0;
2113 }
2114
2115 if (info_verbose)
2116 {
2117 /* It can be printed repeatedly as there is no easy way to check
2118 the executable symbols/file has been already relocated to
2119 displacement. */
2120
2121 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2122 "displacement %s for \"%s\".\n"),
2123 paddress (target_gdbarch (), displacement),
2124 bfd_get_filename (exec_bfd));
2125 }
2126
2127 *displacementp = displacement;
2128 return 1;
2129 }
2130
2131 /* Relocate the main executable. This function should be called upon
2132 stopping the inferior process at the entry point to the program.
2133 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2134 different, the main executable is relocated by the proper amount. */
2135
2136 static void
2137 svr4_relocate_main_executable (void)
2138 {
2139 CORE_ADDR displacement;
2140
2141 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2142 probably contains the offsets computed using the PIE displacement
2143 from the previous run, which of course are irrelevant for this run.
2144 So we need to determine the new PIE displacement and recompute the
2145 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2146 already contains pre-computed offsets.
2147
2148 If we cannot compute the PIE displacement, either:
2149
2150 - The executable is not PIE.
2151
2152 - SYMFILE_OBJFILE does not match the executable started in the target.
2153 This can happen for main executable symbols loaded at the host while
2154 `ld.so --ld-args main-executable' is loaded in the target.
2155
2156 Then we leave the section offsets untouched and use them as is for
2157 this run. Either:
2158
2159 - These section offsets were properly reset earlier, and thus
2160 already contain the correct values. This can happen for instance
2161 when reconnecting via the remote protocol to a target that supports
2162 the `qOffsets' packet.
2163
2164 - The section offsets were not reset earlier, and the best we can
2165 hope is that the old offsets are still applicable to the new run. */
2166
2167 if (! svr4_exec_displacement (&displacement))
2168 return;
2169
2170 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2171 addresses. */
2172
2173 if (symfile_objfile)
2174 {
2175 struct section_offsets *new_offsets;
2176 int i;
2177
2178 new_offsets = alloca (symfile_objfile->num_sections
2179 * sizeof (*new_offsets));
2180
2181 for (i = 0; i < symfile_objfile->num_sections; i++)
2182 new_offsets->offsets[i] = displacement;
2183
2184 objfile_relocate (symfile_objfile, new_offsets);
2185 }
2186 else if (exec_bfd)
2187 {
2188 asection *asect;
2189
2190 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2191 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2192 (bfd_section_vma (exec_bfd, asect)
2193 + displacement));
2194 }
2195 }
2196
2197 /* Implement the "create_inferior_hook" target_solib_ops method.
2198
2199 For SVR4 executables, this first instruction is either the first
2200 instruction in the dynamic linker (for dynamically linked
2201 executables) or the instruction at "start" for statically linked
2202 executables. For dynamically linked executables, the system
2203 first exec's /lib/libc.so.N, which contains the dynamic linker,
2204 and starts it running. The dynamic linker maps in any needed
2205 shared libraries, maps in the actual user executable, and then
2206 jumps to "start" in the user executable.
2207
2208 We can arrange to cooperate with the dynamic linker to discover the
2209 names of shared libraries that are dynamically linked, and the base
2210 addresses to which they are linked.
2211
2212 This function is responsible for discovering those names and
2213 addresses, and saving sufficient information about them to allow
2214 their symbols to be read at a later time. */
2215
2216 static void
2217 svr4_solib_create_inferior_hook (int from_tty)
2218 {
2219 struct svr4_info *info;
2220
2221 info = get_svr4_info ();
2222
2223 /* Relocate the main executable if necessary. */
2224 svr4_relocate_main_executable ();
2225
2226 /* No point setting a breakpoint in the dynamic linker if we can't
2227 hit it (e.g., a core file, or a trace file). */
2228 if (!target_has_execution)
2229 return;
2230
2231 if (!svr4_have_link_map_offsets ())
2232 return;
2233
2234 if (!enable_break (info, from_tty))
2235 return;
2236 }
2237
2238 static void
2239 svr4_clear_solib (void)
2240 {
2241 struct svr4_info *info;
2242
2243 info = get_svr4_info ();
2244 info->debug_base = 0;
2245 info->debug_loader_offset_p = 0;
2246 info->debug_loader_offset = 0;
2247 xfree (info->debug_loader_name);
2248 info->debug_loader_name = NULL;
2249 }
2250
2251 /* Clear any bits of ADDR that wouldn't fit in a target-format
2252 data pointer. "Data pointer" here refers to whatever sort of
2253 address the dynamic linker uses to manage its sections. At the
2254 moment, we don't support shared libraries on any processors where
2255 code and data pointers are different sizes.
2256
2257 This isn't really the right solution. What we really need here is
2258 a way to do arithmetic on CORE_ADDR values that respects the
2259 natural pointer/address correspondence. (For example, on the MIPS,
2260 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2261 sign-extend the value. There, simply truncating the bits above
2262 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2263 be a new gdbarch method or something. */
2264 static CORE_ADDR
2265 svr4_truncate_ptr (CORE_ADDR addr)
2266 {
2267 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
2268 /* We don't need to truncate anything, and the bit twiddling below
2269 will fail due to overflow problems. */
2270 return addr;
2271 else
2272 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
2273 }
2274
2275
2276 static void
2277 svr4_relocate_section_addresses (struct so_list *so,
2278 struct target_section *sec)
2279 {
2280 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so,
2281 sec->bfd));
2282 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so,
2283 sec->bfd));
2284 }
2285 \f
2286
2287 /* Architecture-specific operations. */
2288
2289 /* Per-architecture data key. */
2290 static struct gdbarch_data *solib_svr4_data;
2291
2292 struct solib_svr4_ops
2293 {
2294 /* Return a description of the layout of `struct link_map'. */
2295 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2296 };
2297
2298 /* Return a default for the architecture-specific operations. */
2299
2300 static void *
2301 solib_svr4_init (struct obstack *obstack)
2302 {
2303 struct solib_svr4_ops *ops;
2304
2305 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2306 ops->fetch_link_map_offsets = NULL;
2307 return ops;
2308 }
2309
2310 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2311 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2312
2313 void
2314 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2315 struct link_map_offsets *(*flmo) (void))
2316 {
2317 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2318
2319 ops->fetch_link_map_offsets = flmo;
2320
2321 set_solib_ops (gdbarch, &svr4_so_ops);
2322 }
2323
2324 /* Fetch a link_map_offsets structure using the architecture-specific
2325 `struct link_map_offsets' fetcher. */
2326
2327 static struct link_map_offsets *
2328 svr4_fetch_link_map_offsets (void)
2329 {
2330 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2331
2332 gdb_assert (ops->fetch_link_map_offsets);
2333 return ops->fetch_link_map_offsets ();
2334 }
2335
2336 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2337
2338 static int
2339 svr4_have_link_map_offsets (void)
2340 {
2341 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2342
2343 return (ops->fetch_link_map_offsets != NULL);
2344 }
2345 \f
2346
2347 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2348 `struct r_debug' and a `struct link_map' that are binary compatible
2349 with the origional SVR4 implementation. */
2350
2351 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2352 for an ILP32 SVR4 system. */
2353
2354 struct link_map_offsets *
2355 svr4_ilp32_fetch_link_map_offsets (void)
2356 {
2357 static struct link_map_offsets lmo;
2358 static struct link_map_offsets *lmp = NULL;
2359
2360 if (lmp == NULL)
2361 {
2362 lmp = &lmo;
2363
2364 lmo.r_version_offset = 0;
2365 lmo.r_version_size = 4;
2366 lmo.r_map_offset = 4;
2367 lmo.r_brk_offset = 8;
2368 lmo.r_ldsomap_offset = 20;
2369
2370 /* Everything we need is in the first 20 bytes. */
2371 lmo.link_map_size = 20;
2372 lmo.l_addr_offset = 0;
2373 lmo.l_name_offset = 4;
2374 lmo.l_ld_offset = 8;
2375 lmo.l_next_offset = 12;
2376 lmo.l_prev_offset = 16;
2377 }
2378
2379 return lmp;
2380 }
2381
2382 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2383 for an LP64 SVR4 system. */
2384
2385 struct link_map_offsets *
2386 svr4_lp64_fetch_link_map_offsets (void)
2387 {
2388 static struct link_map_offsets lmo;
2389 static struct link_map_offsets *lmp = NULL;
2390
2391 if (lmp == NULL)
2392 {
2393 lmp = &lmo;
2394
2395 lmo.r_version_offset = 0;
2396 lmo.r_version_size = 4;
2397 lmo.r_map_offset = 8;
2398 lmo.r_brk_offset = 16;
2399 lmo.r_ldsomap_offset = 40;
2400
2401 /* Everything we need is in the first 40 bytes. */
2402 lmo.link_map_size = 40;
2403 lmo.l_addr_offset = 0;
2404 lmo.l_name_offset = 8;
2405 lmo.l_ld_offset = 16;
2406 lmo.l_next_offset = 24;
2407 lmo.l_prev_offset = 32;
2408 }
2409
2410 return lmp;
2411 }
2412 \f
2413
2414 struct target_so_ops svr4_so_ops;
2415
2416 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2417 different rule for symbol lookup. The lookup begins here in the DSO, not in
2418 the main executable. */
2419
2420 static struct symbol *
2421 elf_lookup_lib_symbol (const struct objfile *objfile,
2422 const char *name,
2423 const domain_enum domain)
2424 {
2425 bfd *abfd;
2426
2427 if (objfile == symfile_objfile)
2428 abfd = exec_bfd;
2429 else
2430 {
2431 /* OBJFILE should have been passed as the non-debug one. */
2432 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2433
2434 abfd = objfile->obfd;
2435 }
2436
2437 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2438 return NULL;
2439
2440 return lookup_global_symbol_from_objfile (objfile, name, domain);
2441 }
2442
2443 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2444
2445 void
2446 _initialize_svr4_solib (void)
2447 {
2448 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2449 solib_svr4_pspace_data
2450 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
2451
2452 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2453 svr4_so_ops.free_so = svr4_free_so;
2454 svr4_so_ops.clear_solib = svr4_clear_solib;
2455 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2456 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2457 svr4_so_ops.current_sos = svr4_current_sos;
2458 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2459 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2460 svr4_so_ops.bfd_open = solib_bfd_open;
2461 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2462 svr4_so_ops.same = svr4_same;
2463 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2464 }