Sync dlang demangling tests from upstream libiberty testsuite
[binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return NULL;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if a weak symbol with a real definition needs a copy reloc.
761 When there is a weak symbol with a real definition, the processor
762 independent code will have arranged for us to see the real
763 definition first. We need to copy the needs_copy bit from the
764 real definition and check it when allowing copy reloc in PIE. */
765 unsigned int needs_copy : 1;
766
767 /* TRUE if symbol has at least one BND relocation. */
768 unsigned int has_bnd_reloc : 1;
769
770 /* Information about the GOT PLT entry. Filled when there are both
771 GOT and PLT relocations against the same function. */
772 union gotplt_union plt_got;
773
774 /* Information about the second PLT entry. Filled when has_bnd_reloc is
775 set. */
776 union gotplt_union plt_bnd;
777
778 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
779 starting at the end of the jump table. */
780 bfd_vma tlsdesc_got;
781 };
782
783 #define elf_x86_64_hash_entry(ent) \
784 ((struct elf_x86_64_link_hash_entry *)(ent))
785
786 struct elf_x86_64_obj_tdata
787 {
788 struct elf_obj_tdata root;
789
790 /* tls_type for each local got entry. */
791 char *local_got_tls_type;
792
793 /* GOTPLT entries for TLS descriptors. */
794 bfd_vma *local_tlsdesc_gotent;
795 };
796
797 #define elf_x86_64_tdata(abfd) \
798 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
799
800 #define elf_x86_64_local_got_tls_type(abfd) \
801 (elf_x86_64_tdata (abfd)->local_got_tls_type)
802
803 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
804 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
805
806 #define is_x86_64_elf(bfd) \
807 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
808 && elf_tdata (bfd) != NULL \
809 && elf_object_id (bfd) == X86_64_ELF_DATA)
810
811 static bfd_boolean
812 elf_x86_64_mkobject (bfd *abfd)
813 {
814 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
815 X86_64_ELF_DATA);
816 }
817
818 /* x86-64 ELF linker hash table. */
819
820 struct elf_x86_64_link_hash_table
821 {
822 struct elf_link_hash_table elf;
823
824 /* Short-cuts to get to dynamic linker sections. */
825 asection *sdynbss;
826 asection *srelbss;
827 asection *plt_eh_frame;
828 asection *plt_bnd;
829 asection *plt_got;
830
831 union
832 {
833 bfd_signed_vma refcount;
834 bfd_vma offset;
835 } tls_ld_got;
836
837 /* The amount of space used by the jump slots in the GOT. */
838 bfd_vma sgotplt_jump_table_size;
839
840 /* Small local sym cache. */
841 struct sym_cache sym_cache;
842
843 bfd_vma (*r_info) (bfd_vma, bfd_vma);
844 bfd_vma (*r_sym) (bfd_vma);
845 unsigned int pointer_r_type;
846 const char *dynamic_interpreter;
847 int dynamic_interpreter_size;
848
849 /* _TLS_MODULE_BASE_ symbol. */
850 struct bfd_link_hash_entry *tls_module_base;
851
852 /* Used by local STT_GNU_IFUNC symbols. */
853 htab_t loc_hash_table;
854 void * loc_hash_memory;
855
856 /* The offset into splt of the PLT entry for the TLS descriptor
857 resolver. Special values are 0, if not necessary (or not found
858 to be necessary yet), and -1 if needed but not determined
859 yet. */
860 bfd_vma tlsdesc_plt;
861 /* The offset into sgot of the GOT entry used by the PLT entry
862 above. */
863 bfd_vma tlsdesc_got;
864
865 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
866 bfd_vma next_jump_slot_index;
867 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
868 bfd_vma next_irelative_index;
869 };
870
871 /* Get the x86-64 ELF linker hash table from a link_info structure. */
872
873 #define elf_x86_64_hash_table(p) \
874 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
875 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
876
877 #define elf_x86_64_compute_jump_table_size(htab) \
878 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
879
880 /* Create an entry in an x86-64 ELF linker hash table. */
881
882 static struct bfd_hash_entry *
883 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
884 struct bfd_hash_table *table,
885 const char *string)
886 {
887 /* Allocate the structure if it has not already been allocated by a
888 subclass. */
889 if (entry == NULL)
890 {
891 entry = (struct bfd_hash_entry *)
892 bfd_hash_allocate (table,
893 sizeof (struct elf_x86_64_link_hash_entry));
894 if (entry == NULL)
895 return entry;
896 }
897
898 /* Call the allocation method of the superclass. */
899 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
900 if (entry != NULL)
901 {
902 struct elf_x86_64_link_hash_entry *eh;
903
904 eh = (struct elf_x86_64_link_hash_entry *) entry;
905 eh->dyn_relocs = NULL;
906 eh->tls_type = GOT_UNKNOWN;
907 eh->needs_copy = 0;
908 eh->has_bnd_reloc = 0;
909 eh->plt_bnd.offset = (bfd_vma) -1;
910 eh->plt_got.offset = (bfd_vma) -1;
911 eh->tlsdesc_got = (bfd_vma) -1;
912 }
913
914 return entry;
915 }
916
917 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
918 for local symbol so that we can handle local STT_GNU_IFUNC symbols
919 as global symbol. We reuse indx and dynstr_index for local symbol
920 hash since they aren't used by global symbols in this backend. */
921
922 static hashval_t
923 elf_x86_64_local_htab_hash (const void *ptr)
924 {
925 struct elf_link_hash_entry *h
926 = (struct elf_link_hash_entry *) ptr;
927 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
928 }
929
930 /* Compare local hash entries. */
931
932 static int
933 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
934 {
935 struct elf_link_hash_entry *h1
936 = (struct elf_link_hash_entry *) ptr1;
937 struct elf_link_hash_entry *h2
938 = (struct elf_link_hash_entry *) ptr2;
939
940 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
941 }
942
943 /* Find and/or create a hash entry for local symbol. */
944
945 static struct elf_link_hash_entry *
946 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
947 bfd *abfd, const Elf_Internal_Rela *rel,
948 bfd_boolean create)
949 {
950 struct elf_x86_64_link_hash_entry e, *ret;
951 asection *sec = abfd->sections;
952 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
953 htab->r_sym (rel->r_info));
954 void **slot;
955
956 e.elf.indx = sec->id;
957 e.elf.dynstr_index = htab->r_sym (rel->r_info);
958 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
959 create ? INSERT : NO_INSERT);
960
961 if (!slot)
962 return NULL;
963
964 if (*slot)
965 {
966 ret = (struct elf_x86_64_link_hash_entry *) *slot;
967 return &ret->elf;
968 }
969
970 ret = (struct elf_x86_64_link_hash_entry *)
971 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
972 sizeof (struct elf_x86_64_link_hash_entry));
973 if (ret)
974 {
975 memset (ret, 0, sizeof (*ret));
976 ret->elf.indx = sec->id;
977 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
978 ret->elf.dynindx = -1;
979 ret->plt_got.offset = (bfd_vma) -1;
980 *slot = ret;
981 }
982 return &ret->elf;
983 }
984
985 /* Destroy an X86-64 ELF linker hash table. */
986
987 static void
988 elf_x86_64_link_hash_table_free (bfd *obfd)
989 {
990 struct elf_x86_64_link_hash_table *htab
991 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
992
993 if (htab->loc_hash_table)
994 htab_delete (htab->loc_hash_table);
995 if (htab->loc_hash_memory)
996 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
997 _bfd_elf_link_hash_table_free (obfd);
998 }
999
1000 /* Create an X86-64 ELF linker hash table. */
1001
1002 static struct bfd_link_hash_table *
1003 elf_x86_64_link_hash_table_create (bfd *abfd)
1004 {
1005 struct elf_x86_64_link_hash_table *ret;
1006 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1007
1008 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1009 if (ret == NULL)
1010 return NULL;
1011
1012 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1013 elf_x86_64_link_hash_newfunc,
1014 sizeof (struct elf_x86_64_link_hash_entry),
1015 X86_64_ELF_DATA))
1016 {
1017 free (ret);
1018 return NULL;
1019 }
1020
1021 if (ABI_64_P (abfd))
1022 {
1023 ret->r_info = elf64_r_info;
1024 ret->r_sym = elf64_r_sym;
1025 ret->pointer_r_type = R_X86_64_64;
1026 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1027 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1028 }
1029 else
1030 {
1031 ret->r_info = elf32_r_info;
1032 ret->r_sym = elf32_r_sym;
1033 ret->pointer_r_type = R_X86_64_32;
1034 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1035 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1036 }
1037
1038 ret->loc_hash_table = htab_try_create (1024,
1039 elf_x86_64_local_htab_hash,
1040 elf_x86_64_local_htab_eq,
1041 NULL);
1042 ret->loc_hash_memory = objalloc_create ();
1043 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1044 {
1045 elf_x86_64_link_hash_table_free (abfd);
1046 return NULL;
1047 }
1048 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1049
1050 return &ret->elf.root;
1051 }
1052
1053 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1054 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1055 hash table. */
1056
1057 static bfd_boolean
1058 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1059 struct bfd_link_info *info)
1060 {
1061 struct elf_x86_64_link_hash_table *htab;
1062
1063 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1064 return FALSE;
1065
1066 htab = elf_x86_64_hash_table (info);
1067 if (htab == NULL)
1068 return FALSE;
1069
1070 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1071 if (!htab->sdynbss)
1072 abort ();
1073
1074 if (info->executable)
1075 {
1076 /* Always allow copy relocs for building executables. */
1077 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1078 if (s == NULL)
1079 {
1080 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1081 s = bfd_make_section_anyway_with_flags (dynobj,
1082 ".rela.bss",
1083 (bed->dynamic_sec_flags
1084 | SEC_READONLY));
1085 if (s == NULL
1086 || ! bfd_set_section_alignment (dynobj, s,
1087 bed->s->log_file_align))
1088 return FALSE;
1089 }
1090 htab->srelbss = s;
1091 }
1092
1093 if (!info->no_ld_generated_unwind_info
1094 && htab->plt_eh_frame == NULL
1095 && htab->elf.splt != NULL)
1096 {
1097 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1098 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1099 | SEC_LINKER_CREATED);
1100 htab->plt_eh_frame
1101 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1102 if (htab->plt_eh_frame == NULL
1103 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1104 return FALSE;
1105 }
1106 return TRUE;
1107 }
1108
1109 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1110
1111 static void
1112 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1113 struct elf_link_hash_entry *dir,
1114 struct elf_link_hash_entry *ind)
1115 {
1116 struct elf_x86_64_link_hash_entry *edir, *eind;
1117
1118 edir = (struct elf_x86_64_link_hash_entry *) dir;
1119 eind = (struct elf_x86_64_link_hash_entry *) ind;
1120
1121 if (!edir->has_bnd_reloc)
1122 edir->has_bnd_reloc = eind->has_bnd_reloc;
1123
1124 if (eind->dyn_relocs != NULL)
1125 {
1126 if (edir->dyn_relocs != NULL)
1127 {
1128 struct elf_dyn_relocs **pp;
1129 struct elf_dyn_relocs *p;
1130
1131 /* Add reloc counts against the indirect sym to the direct sym
1132 list. Merge any entries against the same section. */
1133 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1134 {
1135 struct elf_dyn_relocs *q;
1136
1137 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1138 if (q->sec == p->sec)
1139 {
1140 q->pc_count += p->pc_count;
1141 q->count += p->count;
1142 *pp = p->next;
1143 break;
1144 }
1145 if (q == NULL)
1146 pp = &p->next;
1147 }
1148 *pp = edir->dyn_relocs;
1149 }
1150
1151 edir->dyn_relocs = eind->dyn_relocs;
1152 eind->dyn_relocs = NULL;
1153 }
1154
1155 if (ind->root.type == bfd_link_hash_indirect
1156 && dir->got.refcount <= 0)
1157 {
1158 edir->tls_type = eind->tls_type;
1159 eind->tls_type = GOT_UNKNOWN;
1160 }
1161
1162 if (ELIMINATE_COPY_RELOCS
1163 && ind->root.type != bfd_link_hash_indirect
1164 && dir->dynamic_adjusted)
1165 {
1166 /* If called to transfer flags for a weakdef during processing
1167 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1168 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1169 dir->ref_dynamic |= ind->ref_dynamic;
1170 dir->ref_regular |= ind->ref_regular;
1171 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1172 dir->needs_plt |= ind->needs_plt;
1173 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1174 }
1175 else
1176 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1177 }
1178
1179 static bfd_boolean
1180 elf64_x86_64_elf_object_p (bfd *abfd)
1181 {
1182 /* Set the right machine number for an x86-64 elf64 file. */
1183 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1184 return TRUE;
1185 }
1186
1187 static bfd_boolean
1188 elf32_x86_64_elf_object_p (bfd *abfd)
1189 {
1190 /* Set the right machine number for an x86-64 elf32 file. */
1191 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1192 return TRUE;
1193 }
1194
1195 /* Return TRUE if the TLS access code sequence support transition
1196 from R_TYPE. */
1197
1198 static bfd_boolean
1199 elf_x86_64_check_tls_transition (bfd *abfd,
1200 struct bfd_link_info *info,
1201 asection *sec,
1202 bfd_byte *contents,
1203 Elf_Internal_Shdr *symtab_hdr,
1204 struct elf_link_hash_entry **sym_hashes,
1205 unsigned int r_type,
1206 const Elf_Internal_Rela *rel,
1207 const Elf_Internal_Rela *relend)
1208 {
1209 unsigned int val;
1210 unsigned long r_symndx;
1211 bfd_boolean largepic = FALSE;
1212 struct elf_link_hash_entry *h;
1213 bfd_vma offset;
1214 struct elf_x86_64_link_hash_table *htab;
1215
1216 /* Get the section contents. */
1217 if (contents == NULL)
1218 {
1219 if (elf_section_data (sec)->this_hdr.contents != NULL)
1220 contents = elf_section_data (sec)->this_hdr.contents;
1221 else
1222 {
1223 /* FIXME: How to better handle error condition? */
1224 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1225 return FALSE;
1226
1227 /* Cache the section contents for elf_link_input_bfd. */
1228 elf_section_data (sec)->this_hdr.contents = contents;
1229 }
1230 }
1231
1232 htab = elf_x86_64_hash_table (info);
1233 offset = rel->r_offset;
1234 switch (r_type)
1235 {
1236 case R_X86_64_TLSGD:
1237 case R_X86_64_TLSLD:
1238 if ((rel + 1) >= relend)
1239 return FALSE;
1240
1241 if (r_type == R_X86_64_TLSGD)
1242 {
1243 /* Check transition from GD access model. For 64bit, only
1244 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1245 .word 0x6666; rex64; call __tls_get_addr
1246 can transit to different access model. For 32bit, only
1247 leaq foo@tlsgd(%rip), %rdi
1248 .word 0x6666; rex64; call __tls_get_addr
1249 can transit to different access model. For largepic
1250 we also support:
1251 leaq foo@tlsgd(%rip), %rdi
1252 movabsq $__tls_get_addr@pltoff, %rax
1253 addq $rbx, %rax
1254 call *%rax. */
1255
1256 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1257 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1258
1259 if ((offset + 12) > sec->size)
1260 return FALSE;
1261
1262 if (memcmp (contents + offset + 4, call, 4) != 0)
1263 {
1264 if (!ABI_64_P (abfd)
1265 || (offset + 19) > sec->size
1266 || offset < 3
1267 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1268 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1269 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1270 != 0)
1271 return FALSE;
1272 largepic = TRUE;
1273 }
1274 else if (ABI_64_P (abfd))
1275 {
1276 if (offset < 4
1277 || memcmp (contents + offset - 4, leaq, 4) != 0)
1278 return FALSE;
1279 }
1280 else
1281 {
1282 if (offset < 3
1283 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1284 return FALSE;
1285 }
1286 }
1287 else
1288 {
1289 /* Check transition from LD access model. Only
1290 leaq foo@tlsld(%rip), %rdi;
1291 call __tls_get_addr
1292 can transit to different access model. For largepic
1293 we also support:
1294 leaq foo@tlsld(%rip), %rdi
1295 movabsq $__tls_get_addr@pltoff, %rax
1296 addq $rbx, %rax
1297 call *%rax. */
1298
1299 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1300
1301 if (offset < 3 || (offset + 9) > sec->size)
1302 return FALSE;
1303
1304 if (memcmp (contents + offset - 3, lea, 3) != 0)
1305 return FALSE;
1306
1307 if (0xe8 != *(contents + offset + 4))
1308 {
1309 if (!ABI_64_P (abfd)
1310 || (offset + 19) > sec->size
1311 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1312 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1313 != 0)
1314 return FALSE;
1315 largepic = TRUE;
1316 }
1317 }
1318
1319 r_symndx = htab->r_sym (rel[1].r_info);
1320 if (r_symndx < symtab_hdr->sh_info)
1321 return FALSE;
1322
1323 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1324 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1325 may be versioned. */
1326 return (h != NULL
1327 && h->root.root.string != NULL
1328 && (largepic
1329 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1330 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1331 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1332 && (strncmp (h->root.root.string,
1333 "__tls_get_addr", 14) == 0));
1334
1335 case R_X86_64_GOTTPOFF:
1336 /* Check transition from IE access model:
1337 mov foo@gottpoff(%rip), %reg
1338 add foo@gottpoff(%rip), %reg
1339 */
1340
1341 /* Check REX prefix first. */
1342 if (offset >= 3 && (offset + 4) <= sec->size)
1343 {
1344 val = bfd_get_8 (abfd, contents + offset - 3);
1345 if (val != 0x48 && val != 0x4c)
1346 {
1347 /* X32 may have 0x44 REX prefix or no REX prefix. */
1348 if (ABI_64_P (abfd))
1349 return FALSE;
1350 }
1351 }
1352 else
1353 {
1354 /* X32 may not have any REX prefix. */
1355 if (ABI_64_P (abfd))
1356 return FALSE;
1357 if (offset < 2 || (offset + 3) > sec->size)
1358 return FALSE;
1359 }
1360
1361 val = bfd_get_8 (abfd, contents + offset - 2);
1362 if (val != 0x8b && val != 0x03)
1363 return FALSE;
1364
1365 val = bfd_get_8 (abfd, contents + offset - 1);
1366 return (val & 0xc7) == 5;
1367
1368 case R_X86_64_GOTPC32_TLSDESC:
1369 /* Check transition from GDesc access model:
1370 leaq x@tlsdesc(%rip), %rax
1371
1372 Make sure it's a leaq adding rip to a 32-bit offset
1373 into any register, although it's probably almost always
1374 going to be rax. */
1375
1376 if (offset < 3 || (offset + 4) > sec->size)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 3);
1380 if ((val & 0xfb) != 0x48)
1381 return FALSE;
1382
1383 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1384 return FALSE;
1385
1386 val = bfd_get_8 (abfd, contents + offset - 1);
1387 return (val & 0xc7) == 0x05;
1388
1389 case R_X86_64_TLSDESC_CALL:
1390 /* Check transition from GDesc access model:
1391 call *x@tlsdesc(%rax)
1392 */
1393 if (offset + 2 <= sec->size)
1394 {
1395 /* Make sure that it's a call *x@tlsdesc(%rax). */
1396 static const unsigned char call[] = { 0xff, 0x10 };
1397 return memcmp (contents + offset, call, 2) == 0;
1398 }
1399
1400 return FALSE;
1401
1402 default:
1403 abort ();
1404 }
1405 }
1406
1407 /* Return TRUE if the TLS access transition is OK or no transition
1408 will be performed. Update R_TYPE if there is a transition. */
1409
1410 static bfd_boolean
1411 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1412 asection *sec, bfd_byte *contents,
1413 Elf_Internal_Shdr *symtab_hdr,
1414 struct elf_link_hash_entry **sym_hashes,
1415 unsigned int *r_type, int tls_type,
1416 const Elf_Internal_Rela *rel,
1417 const Elf_Internal_Rela *relend,
1418 struct elf_link_hash_entry *h,
1419 unsigned long r_symndx)
1420 {
1421 unsigned int from_type = *r_type;
1422 unsigned int to_type = from_type;
1423 bfd_boolean check = TRUE;
1424
1425 /* Skip TLS transition for functions. */
1426 if (h != NULL
1427 && (h->type == STT_FUNC
1428 || h->type == STT_GNU_IFUNC))
1429 return TRUE;
1430
1431 switch (from_type)
1432 {
1433 case R_X86_64_TLSGD:
1434 case R_X86_64_GOTPC32_TLSDESC:
1435 case R_X86_64_TLSDESC_CALL:
1436 case R_X86_64_GOTTPOFF:
1437 if (info->executable)
1438 {
1439 if (h == NULL)
1440 to_type = R_X86_64_TPOFF32;
1441 else
1442 to_type = R_X86_64_GOTTPOFF;
1443 }
1444
1445 /* When we are called from elf_x86_64_relocate_section,
1446 CONTENTS isn't NULL and there may be additional transitions
1447 based on TLS_TYPE. */
1448 if (contents != NULL)
1449 {
1450 unsigned int new_to_type = to_type;
1451
1452 if (info->executable
1453 && h != NULL
1454 && h->dynindx == -1
1455 && tls_type == GOT_TLS_IE)
1456 new_to_type = R_X86_64_TPOFF32;
1457
1458 if (to_type == R_X86_64_TLSGD
1459 || to_type == R_X86_64_GOTPC32_TLSDESC
1460 || to_type == R_X86_64_TLSDESC_CALL)
1461 {
1462 if (tls_type == GOT_TLS_IE)
1463 new_to_type = R_X86_64_GOTTPOFF;
1464 }
1465
1466 /* We checked the transition before when we were called from
1467 elf_x86_64_check_relocs. We only want to check the new
1468 transition which hasn't been checked before. */
1469 check = new_to_type != to_type && from_type == to_type;
1470 to_type = new_to_type;
1471 }
1472
1473 break;
1474
1475 case R_X86_64_TLSLD:
1476 if (info->executable)
1477 to_type = R_X86_64_TPOFF32;
1478 break;
1479
1480 default:
1481 return TRUE;
1482 }
1483
1484 /* Return TRUE if there is no transition. */
1485 if (from_type == to_type)
1486 return TRUE;
1487
1488 /* Check if the transition can be performed. */
1489 if (check
1490 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1491 symtab_hdr, sym_hashes,
1492 from_type, rel, relend))
1493 {
1494 reloc_howto_type *from, *to;
1495 const char *name;
1496
1497 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1498 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1499
1500 if (h)
1501 name = h->root.root.string;
1502 else
1503 {
1504 struct elf_x86_64_link_hash_table *htab;
1505
1506 htab = elf_x86_64_hash_table (info);
1507 if (htab == NULL)
1508 name = "*unknown*";
1509 else
1510 {
1511 Elf_Internal_Sym *isym;
1512
1513 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1514 abfd, r_symndx);
1515 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1516 }
1517 }
1518
1519 (*_bfd_error_handler)
1520 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1521 "in section `%A' failed"),
1522 abfd, sec, from->name, to->name, name,
1523 (unsigned long) rel->r_offset);
1524 bfd_set_error (bfd_error_bad_value);
1525 return FALSE;
1526 }
1527
1528 *r_type = to_type;
1529 return TRUE;
1530 }
1531
1532 /* Rename some of the generic section flags to better document how they
1533 are used here. */
1534 #define need_convert_mov_to_lea sec_flg0
1535
1536 /* Look through the relocs for a section during the first phase, and
1537 calculate needed space in the global offset table, procedure
1538 linkage table, and dynamic reloc sections. */
1539
1540 static bfd_boolean
1541 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1542 asection *sec,
1543 const Elf_Internal_Rela *relocs)
1544 {
1545 struct elf_x86_64_link_hash_table *htab;
1546 Elf_Internal_Shdr *symtab_hdr;
1547 struct elf_link_hash_entry **sym_hashes;
1548 const Elf_Internal_Rela *rel;
1549 const Elf_Internal_Rela *rel_end;
1550 asection *sreloc;
1551 bfd_boolean use_plt_got;
1552
1553 if (info->relocatable)
1554 return TRUE;
1555
1556 BFD_ASSERT (is_x86_64_elf (abfd));
1557
1558 htab = elf_x86_64_hash_table (info);
1559 if (htab == NULL)
1560 return FALSE;
1561
1562 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1563
1564 symtab_hdr = &elf_symtab_hdr (abfd);
1565 sym_hashes = elf_sym_hashes (abfd);
1566
1567 sreloc = NULL;
1568
1569 rel_end = relocs + sec->reloc_count;
1570 for (rel = relocs; rel < rel_end; rel++)
1571 {
1572 unsigned int r_type;
1573 unsigned long r_symndx;
1574 struct elf_link_hash_entry *h;
1575 Elf_Internal_Sym *isym;
1576 const char *name;
1577 bfd_boolean size_reloc;
1578
1579 r_symndx = htab->r_sym (rel->r_info);
1580 r_type = ELF32_R_TYPE (rel->r_info);
1581
1582 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1583 {
1584 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1585 abfd, r_symndx);
1586 return FALSE;
1587 }
1588
1589 if (r_symndx < symtab_hdr->sh_info)
1590 {
1591 /* A local symbol. */
1592 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1593 abfd, r_symndx);
1594 if (isym == NULL)
1595 return FALSE;
1596
1597 /* Check relocation against local STT_GNU_IFUNC symbol. */
1598 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1599 {
1600 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1601 TRUE);
1602 if (h == NULL)
1603 return FALSE;
1604
1605 /* Fake a STT_GNU_IFUNC symbol. */
1606 h->type = STT_GNU_IFUNC;
1607 h->def_regular = 1;
1608 h->ref_regular = 1;
1609 h->forced_local = 1;
1610 h->root.type = bfd_link_hash_defined;
1611 }
1612 else
1613 h = NULL;
1614 }
1615 else
1616 {
1617 isym = NULL;
1618 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1619 while (h->root.type == bfd_link_hash_indirect
1620 || h->root.type == bfd_link_hash_warning)
1621 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1622 }
1623
1624 /* Check invalid x32 relocations. */
1625 if (!ABI_64_P (abfd))
1626 switch (r_type)
1627 {
1628 default:
1629 break;
1630
1631 case R_X86_64_DTPOFF64:
1632 case R_X86_64_TPOFF64:
1633 case R_X86_64_PC64:
1634 case R_X86_64_GOTOFF64:
1635 case R_X86_64_GOT64:
1636 case R_X86_64_GOTPCREL64:
1637 case R_X86_64_GOTPC64:
1638 case R_X86_64_GOTPLT64:
1639 case R_X86_64_PLTOFF64:
1640 {
1641 if (h)
1642 name = h->root.root.string;
1643 else
1644 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1645 NULL);
1646 (*_bfd_error_handler)
1647 (_("%B: relocation %s against symbol `%s' isn't "
1648 "supported in x32 mode"), abfd,
1649 x86_64_elf_howto_table[r_type].name, name);
1650 bfd_set_error (bfd_error_bad_value);
1651 return FALSE;
1652 }
1653 break;
1654 }
1655
1656 if (h != NULL)
1657 {
1658 /* Create the ifunc sections for static executables. If we
1659 never see an indirect function symbol nor we are building
1660 a static executable, those sections will be empty and
1661 won't appear in output. */
1662 switch (r_type)
1663 {
1664 default:
1665 break;
1666
1667 case R_X86_64_PC32_BND:
1668 case R_X86_64_PLT32_BND:
1669 case R_X86_64_PC32:
1670 case R_X86_64_PLT32:
1671 case R_X86_64_32:
1672 case R_X86_64_64:
1673 /* MPX PLT is supported only if elf_x86_64_arch_bed
1674 is used in 64-bit mode. */
1675 if (ABI_64_P (abfd)
1676 && info->bndplt
1677 && (get_elf_x86_64_backend_data (abfd)
1678 == &elf_x86_64_arch_bed))
1679 {
1680 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
1681
1682 /* Create the second PLT for Intel MPX support. */
1683 if (htab->plt_bnd == NULL)
1684 {
1685 unsigned int plt_bnd_align;
1686 const struct elf_backend_data *bed;
1687
1688 bed = get_elf_backend_data (info->output_bfd);
1689 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1690 && (sizeof (elf_x86_64_bnd_plt2_entry)
1691 == sizeof (elf_x86_64_legacy_plt2_entry)));
1692 plt_bnd_align = 3;
1693
1694 if (htab->elf.dynobj == NULL)
1695 htab->elf.dynobj = abfd;
1696 htab->plt_bnd
1697 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1698 ".plt.bnd",
1699 (bed->dynamic_sec_flags
1700 | SEC_ALLOC
1701 | SEC_CODE
1702 | SEC_LOAD
1703 | SEC_READONLY));
1704 if (htab->plt_bnd == NULL
1705 || !bfd_set_section_alignment (htab->elf.dynobj,
1706 htab->plt_bnd,
1707 plt_bnd_align))
1708 return FALSE;
1709 }
1710 }
1711
1712 case R_X86_64_32S:
1713 case R_X86_64_PC64:
1714 case R_X86_64_GOTPCREL:
1715 case R_X86_64_GOTPCREL64:
1716 if (htab->elf.dynobj == NULL)
1717 htab->elf.dynobj = abfd;
1718 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1719 return FALSE;
1720 break;
1721 }
1722
1723 /* It is referenced by a non-shared object. */
1724 h->ref_regular = 1;
1725 h->root.non_ir_ref = 1;
1726 }
1727
1728 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1729 symtab_hdr, sym_hashes,
1730 &r_type, GOT_UNKNOWN,
1731 rel, rel_end, h, r_symndx))
1732 return FALSE;
1733
1734 switch (r_type)
1735 {
1736 case R_X86_64_TLSLD:
1737 htab->tls_ld_got.refcount += 1;
1738 goto create_got;
1739
1740 case R_X86_64_TPOFF32:
1741 if (!info->executable && ABI_64_P (abfd))
1742 {
1743 if (h)
1744 name = h->root.root.string;
1745 else
1746 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1747 NULL);
1748 (*_bfd_error_handler)
1749 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1750 abfd,
1751 x86_64_elf_howto_table[r_type].name, name);
1752 bfd_set_error (bfd_error_bad_value);
1753 return FALSE;
1754 }
1755 break;
1756
1757 case R_X86_64_GOTTPOFF:
1758 if (!info->executable)
1759 info->flags |= DF_STATIC_TLS;
1760 /* Fall through */
1761
1762 case R_X86_64_GOT32:
1763 case R_X86_64_GOTPCREL:
1764 case R_X86_64_TLSGD:
1765 case R_X86_64_GOT64:
1766 case R_X86_64_GOTPCREL64:
1767 case R_X86_64_GOTPLT64:
1768 case R_X86_64_GOTPC32_TLSDESC:
1769 case R_X86_64_TLSDESC_CALL:
1770 /* This symbol requires a global offset table entry. */
1771 {
1772 int tls_type, old_tls_type;
1773
1774 switch (r_type)
1775 {
1776 default: tls_type = GOT_NORMAL; break;
1777 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1778 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1779 case R_X86_64_GOTPC32_TLSDESC:
1780 case R_X86_64_TLSDESC_CALL:
1781 tls_type = GOT_TLS_GDESC; break;
1782 }
1783
1784 if (h != NULL)
1785 {
1786 h->got.refcount += 1;
1787 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1788 }
1789 else
1790 {
1791 bfd_signed_vma *local_got_refcounts;
1792
1793 /* This is a global offset table entry for a local symbol. */
1794 local_got_refcounts = elf_local_got_refcounts (abfd);
1795 if (local_got_refcounts == NULL)
1796 {
1797 bfd_size_type size;
1798
1799 size = symtab_hdr->sh_info;
1800 size *= sizeof (bfd_signed_vma)
1801 + sizeof (bfd_vma) + sizeof (char);
1802 local_got_refcounts = ((bfd_signed_vma *)
1803 bfd_zalloc (abfd, size));
1804 if (local_got_refcounts == NULL)
1805 return FALSE;
1806 elf_local_got_refcounts (abfd) = local_got_refcounts;
1807 elf_x86_64_local_tlsdesc_gotent (abfd)
1808 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1809 elf_x86_64_local_got_tls_type (abfd)
1810 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1811 }
1812 local_got_refcounts[r_symndx] += 1;
1813 old_tls_type
1814 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1815 }
1816
1817 /* If a TLS symbol is accessed using IE at least once,
1818 there is no point to use dynamic model for it. */
1819 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1820 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1821 || tls_type != GOT_TLS_IE))
1822 {
1823 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1824 tls_type = old_tls_type;
1825 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1826 && GOT_TLS_GD_ANY_P (tls_type))
1827 tls_type |= old_tls_type;
1828 else
1829 {
1830 if (h)
1831 name = h->root.root.string;
1832 else
1833 name = bfd_elf_sym_name (abfd, symtab_hdr,
1834 isym, NULL);
1835 (*_bfd_error_handler)
1836 (_("%B: '%s' accessed both as normal and thread local symbol"),
1837 abfd, name);
1838 bfd_set_error (bfd_error_bad_value);
1839 return FALSE;
1840 }
1841 }
1842
1843 if (old_tls_type != tls_type)
1844 {
1845 if (h != NULL)
1846 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1847 else
1848 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1849 }
1850 }
1851 /* Fall through */
1852
1853 case R_X86_64_GOTOFF64:
1854 case R_X86_64_GOTPC32:
1855 case R_X86_64_GOTPC64:
1856 create_got:
1857 if (htab->elf.sgot == NULL)
1858 {
1859 if (htab->elf.dynobj == NULL)
1860 htab->elf.dynobj = abfd;
1861 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1862 info))
1863 return FALSE;
1864 }
1865 break;
1866
1867 case R_X86_64_PLT32:
1868 case R_X86_64_PLT32_BND:
1869 /* This symbol requires a procedure linkage table entry. We
1870 actually build the entry in adjust_dynamic_symbol,
1871 because this might be a case of linking PIC code which is
1872 never referenced by a dynamic object, in which case we
1873 don't need to generate a procedure linkage table entry
1874 after all. */
1875
1876 /* If this is a local symbol, we resolve it directly without
1877 creating a procedure linkage table entry. */
1878 if (h == NULL)
1879 continue;
1880
1881 h->needs_plt = 1;
1882 h->plt.refcount += 1;
1883 break;
1884
1885 case R_X86_64_PLTOFF64:
1886 /* This tries to form the 'address' of a function relative
1887 to GOT. For global symbols we need a PLT entry. */
1888 if (h != NULL)
1889 {
1890 h->needs_plt = 1;
1891 h->plt.refcount += 1;
1892 }
1893 goto create_got;
1894
1895 case R_X86_64_SIZE32:
1896 case R_X86_64_SIZE64:
1897 size_reloc = TRUE;
1898 goto do_size;
1899
1900 case R_X86_64_32:
1901 if (!ABI_64_P (abfd))
1902 goto pointer;
1903 case R_X86_64_8:
1904 case R_X86_64_16:
1905 case R_X86_64_32S:
1906 /* Let's help debug shared library creation. These relocs
1907 cannot be used in shared libs. Don't error out for
1908 sections we don't care about, such as debug sections or
1909 non-constant sections. */
1910 if (info->shared
1911 && (sec->flags & SEC_ALLOC) != 0
1912 && (sec->flags & SEC_READONLY) != 0)
1913 {
1914 if (h)
1915 name = h->root.root.string;
1916 else
1917 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1918 (*_bfd_error_handler)
1919 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1920 abfd, x86_64_elf_howto_table[r_type].name, name);
1921 bfd_set_error (bfd_error_bad_value);
1922 return FALSE;
1923 }
1924 /* Fall through. */
1925
1926 case R_X86_64_PC8:
1927 case R_X86_64_PC16:
1928 case R_X86_64_PC32:
1929 case R_X86_64_PC32_BND:
1930 case R_X86_64_PC64:
1931 case R_X86_64_64:
1932 pointer:
1933 if (h != NULL && info->executable)
1934 {
1935 /* If this reloc is in a read-only section, we might
1936 need a copy reloc. We can't check reliably at this
1937 stage whether the section is read-only, as input
1938 sections have not yet been mapped to output sections.
1939 Tentatively set the flag for now, and correct in
1940 adjust_dynamic_symbol. */
1941 h->non_got_ref = 1;
1942
1943 /* We may need a .plt entry if the function this reloc
1944 refers to is in a shared lib. */
1945 h->plt.refcount += 1;
1946 if (r_type != R_X86_64_PC32
1947 && r_type != R_X86_64_PC32_BND
1948 && r_type != R_X86_64_PC64)
1949 h->pointer_equality_needed = 1;
1950 }
1951
1952 size_reloc = FALSE;
1953 do_size:
1954 /* If we are creating a shared library, and this is a reloc
1955 against a global symbol, or a non PC relative reloc
1956 against a local symbol, then we need to copy the reloc
1957 into the shared library. However, if we are linking with
1958 -Bsymbolic, we do not need to copy a reloc against a
1959 global symbol which is defined in an object we are
1960 including in the link (i.e., DEF_REGULAR is set). At
1961 this point we have not seen all the input files, so it is
1962 possible that DEF_REGULAR is not set now but will be set
1963 later (it is never cleared). In case of a weak definition,
1964 DEF_REGULAR may be cleared later by a strong definition in
1965 a shared library. We account for that possibility below by
1966 storing information in the relocs_copied field of the hash
1967 table entry. A similar situation occurs when creating
1968 shared libraries and symbol visibility changes render the
1969 symbol local.
1970
1971 If on the other hand, we are creating an executable, we
1972 may need to keep relocations for symbols satisfied by a
1973 dynamic library if we manage to avoid copy relocs for the
1974 symbol. */
1975 if ((info->shared
1976 && (sec->flags & SEC_ALLOC) != 0
1977 && (! IS_X86_64_PCREL_TYPE (r_type)
1978 || (h != NULL
1979 && (! SYMBOLIC_BIND (info, h)
1980 || h->root.type == bfd_link_hash_defweak
1981 || !h->def_regular))))
1982 || (ELIMINATE_COPY_RELOCS
1983 && !info->shared
1984 && (sec->flags & SEC_ALLOC) != 0
1985 && h != NULL
1986 && (h->root.type == bfd_link_hash_defweak
1987 || !h->def_regular)))
1988 {
1989 struct elf_dyn_relocs *p;
1990 struct elf_dyn_relocs **head;
1991
1992 /* We must copy these reloc types into the output file.
1993 Create a reloc section in dynobj and make room for
1994 this reloc. */
1995 if (sreloc == NULL)
1996 {
1997 if (htab->elf.dynobj == NULL)
1998 htab->elf.dynobj = abfd;
1999
2000 sreloc = _bfd_elf_make_dynamic_reloc_section
2001 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2002 abfd, /*rela?*/ TRUE);
2003
2004 if (sreloc == NULL)
2005 return FALSE;
2006 }
2007
2008 /* If this is a global symbol, we count the number of
2009 relocations we need for this symbol. */
2010 if (h != NULL)
2011 {
2012 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2013 }
2014 else
2015 {
2016 /* Track dynamic relocs needed for local syms too.
2017 We really need local syms available to do this
2018 easily. Oh well. */
2019 asection *s;
2020 void **vpp;
2021
2022 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2023 abfd, r_symndx);
2024 if (isym == NULL)
2025 return FALSE;
2026
2027 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2028 if (s == NULL)
2029 s = sec;
2030
2031 /* Beware of type punned pointers vs strict aliasing
2032 rules. */
2033 vpp = &(elf_section_data (s)->local_dynrel);
2034 head = (struct elf_dyn_relocs **)vpp;
2035 }
2036
2037 p = *head;
2038 if (p == NULL || p->sec != sec)
2039 {
2040 bfd_size_type amt = sizeof *p;
2041
2042 p = ((struct elf_dyn_relocs *)
2043 bfd_alloc (htab->elf.dynobj, amt));
2044 if (p == NULL)
2045 return FALSE;
2046 p->next = *head;
2047 *head = p;
2048 p->sec = sec;
2049 p->count = 0;
2050 p->pc_count = 0;
2051 }
2052
2053 p->count += 1;
2054 /* Count size relocation as PC-relative relocation. */
2055 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2056 p->pc_count += 1;
2057 }
2058 break;
2059
2060 /* This relocation describes the C++ object vtable hierarchy.
2061 Reconstruct it for later use during GC. */
2062 case R_X86_64_GNU_VTINHERIT:
2063 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2064 return FALSE;
2065 break;
2066
2067 /* This relocation describes which C++ vtable entries are actually
2068 used. Record for later use during GC. */
2069 case R_X86_64_GNU_VTENTRY:
2070 BFD_ASSERT (h != NULL);
2071 if (h != NULL
2072 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2073 return FALSE;
2074 break;
2075
2076 default:
2077 break;
2078 }
2079
2080 if (use_plt_got
2081 && h != NULL
2082 && h->plt.refcount > 0
2083 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2084 || h->got.refcount > 0)
2085 && htab->plt_got == NULL)
2086 {
2087 /* Create the GOT procedure linkage table. */
2088 unsigned int plt_got_align;
2089 const struct elf_backend_data *bed;
2090
2091 bed = get_elf_backend_data (info->output_bfd);
2092 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2093 && (sizeof (elf_x86_64_bnd_plt2_entry)
2094 == sizeof (elf_x86_64_legacy_plt2_entry)));
2095 plt_got_align = 3;
2096
2097 if (htab->elf.dynobj == NULL)
2098 htab->elf.dynobj = abfd;
2099 htab->plt_got
2100 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2101 ".plt.got",
2102 (bed->dynamic_sec_flags
2103 | SEC_ALLOC
2104 | SEC_CODE
2105 | SEC_LOAD
2106 | SEC_READONLY));
2107 if (htab->plt_got == NULL
2108 || !bfd_set_section_alignment (htab->elf.dynobj,
2109 htab->plt_got,
2110 plt_got_align))
2111 return FALSE;
2112 }
2113
2114 if (r_type == R_X86_64_GOTPCREL
2115 && (h == NULL || h->type != STT_GNU_IFUNC))
2116 sec->need_convert_mov_to_lea = 1;
2117 }
2118
2119 return TRUE;
2120 }
2121
2122 /* Return the section that should be marked against GC for a given
2123 relocation. */
2124
2125 static asection *
2126 elf_x86_64_gc_mark_hook (asection *sec,
2127 struct bfd_link_info *info,
2128 Elf_Internal_Rela *rel,
2129 struct elf_link_hash_entry *h,
2130 Elf_Internal_Sym *sym)
2131 {
2132 if (h != NULL)
2133 switch (ELF32_R_TYPE (rel->r_info))
2134 {
2135 case R_X86_64_GNU_VTINHERIT:
2136 case R_X86_64_GNU_VTENTRY:
2137 return NULL;
2138 }
2139
2140 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2141 }
2142
2143 /* Update the got entry reference counts for the section being removed. */
2144
2145 static bfd_boolean
2146 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2147 asection *sec,
2148 const Elf_Internal_Rela *relocs)
2149 {
2150 struct elf_x86_64_link_hash_table *htab;
2151 Elf_Internal_Shdr *symtab_hdr;
2152 struct elf_link_hash_entry **sym_hashes;
2153 bfd_signed_vma *local_got_refcounts;
2154 const Elf_Internal_Rela *rel, *relend;
2155
2156 if (info->relocatable)
2157 return TRUE;
2158
2159 htab = elf_x86_64_hash_table (info);
2160 if (htab == NULL)
2161 return FALSE;
2162
2163 elf_section_data (sec)->local_dynrel = NULL;
2164
2165 symtab_hdr = &elf_symtab_hdr (abfd);
2166 sym_hashes = elf_sym_hashes (abfd);
2167 local_got_refcounts = elf_local_got_refcounts (abfd);
2168
2169 htab = elf_x86_64_hash_table (info);
2170 relend = relocs + sec->reloc_count;
2171 for (rel = relocs; rel < relend; rel++)
2172 {
2173 unsigned long r_symndx;
2174 unsigned int r_type;
2175 struct elf_link_hash_entry *h = NULL;
2176
2177 r_symndx = htab->r_sym (rel->r_info);
2178 if (r_symndx >= symtab_hdr->sh_info)
2179 {
2180 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2181 while (h->root.type == bfd_link_hash_indirect
2182 || h->root.type == bfd_link_hash_warning)
2183 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2184 }
2185 else
2186 {
2187 /* A local symbol. */
2188 Elf_Internal_Sym *isym;
2189
2190 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2191 abfd, r_symndx);
2192
2193 /* Check relocation against local STT_GNU_IFUNC symbol. */
2194 if (isym != NULL
2195 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2196 {
2197 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2198 if (h == NULL)
2199 abort ();
2200 }
2201 }
2202
2203 if (h)
2204 {
2205 struct elf_x86_64_link_hash_entry *eh;
2206 struct elf_dyn_relocs **pp;
2207 struct elf_dyn_relocs *p;
2208
2209 eh = (struct elf_x86_64_link_hash_entry *) h;
2210
2211 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2212 if (p->sec == sec)
2213 {
2214 /* Everything must go for SEC. */
2215 *pp = p->next;
2216 break;
2217 }
2218 }
2219
2220 r_type = ELF32_R_TYPE (rel->r_info);
2221 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2222 symtab_hdr, sym_hashes,
2223 &r_type, GOT_UNKNOWN,
2224 rel, relend, h, r_symndx))
2225 return FALSE;
2226
2227 switch (r_type)
2228 {
2229 case R_X86_64_TLSLD:
2230 if (htab->tls_ld_got.refcount > 0)
2231 htab->tls_ld_got.refcount -= 1;
2232 break;
2233
2234 case R_X86_64_TLSGD:
2235 case R_X86_64_GOTPC32_TLSDESC:
2236 case R_X86_64_TLSDESC_CALL:
2237 case R_X86_64_GOTTPOFF:
2238 case R_X86_64_GOT32:
2239 case R_X86_64_GOTPCREL:
2240 case R_X86_64_GOT64:
2241 case R_X86_64_GOTPCREL64:
2242 case R_X86_64_GOTPLT64:
2243 if (h != NULL)
2244 {
2245 if (h->got.refcount > 0)
2246 h->got.refcount -= 1;
2247 if (h->type == STT_GNU_IFUNC)
2248 {
2249 if (h->plt.refcount > 0)
2250 h->plt.refcount -= 1;
2251 }
2252 }
2253 else if (local_got_refcounts != NULL)
2254 {
2255 if (local_got_refcounts[r_symndx] > 0)
2256 local_got_refcounts[r_symndx] -= 1;
2257 }
2258 break;
2259
2260 case R_X86_64_8:
2261 case R_X86_64_16:
2262 case R_X86_64_32:
2263 case R_X86_64_64:
2264 case R_X86_64_32S:
2265 case R_X86_64_PC8:
2266 case R_X86_64_PC16:
2267 case R_X86_64_PC32:
2268 case R_X86_64_PC32_BND:
2269 case R_X86_64_PC64:
2270 case R_X86_64_SIZE32:
2271 case R_X86_64_SIZE64:
2272 if (info->shared
2273 && (h == NULL || h->type != STT_GNU_IFUNC))
2274 break;
2275 /* Fall thru */
2276
2277 case R_X86_64_PLT32:
2278 case R_X86_64_PLT32_BND:
2279 case R_X86_64_PLTOFF64:
2280 if (h != NULL)
2281 {
2282 if (h->plt.refcount > 0)
2283 h->plt.refcount -= 1;
2284 }
2285 break;
2286
2287 default:
2288 break;
2289 }
2290 }
2291
2292 return TRUE;
2293 }
2294
2295 /* Adjust a symbol defined by a dynamic object and referenced by a
2296 regular object. The current definition is in some section of the
2297 dynamic object, but we're not including those sections. We have to
2298 change the definition to something the rest of the link can
2299 understand. */
2300
2301 static bfd_boolean
2302 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2303 struct elf_link_hash_entry *h)
2304 {
2305 struct elf_x86_64_link_hash_table *htab;
2306 asection *s;
2307 struct elf_x86_64_link_hash_entry *eh;
2308 struct elf_dyn_relocs *p;
2309
2310 /* STT_GNU_IFUNC symbol must go through PLT. */
2311 if (h->type == STT_GNU_IFUNC)
2312 {
2313 /* All local STT_GNU_IFUNC references must be treate as local
2314 calls via local PLT. */
2315 if (h->ref_regular
2316 && SYMBOL_CALLS_LOCAL (info, h))
2317 {
2318 bfd_size_type pc_count = 0, count = 0;
2319 struct elf_dyn_relocs **pp;
2320
2321 eh = (struct elf_x86_64_link_hash_entry *) h;
2322 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2323 {
2324 pc_count += p->pc_count;
2325 p->count -= p->pc_count;
2326 p->pc_count = 0;
2327 count += p->count;
2328 if (p->count == 0)
2329 *pp = p->next;
2330 else
2331 pp = &p->next;
2332 }
2333
2334 if (pc_count || count)
2335 {
2336 h->needs_plt = 1;
2337 h->non_got_ref = 1;
2338 if (h->plt.refcount <= 0)
2339 h->plt.refcount = 1;
2340 else
2341 h->plt.refcount += 1;
2342 }
2343 }
2344
2345 if (h->plt.refcount <= 0)
2346 {
2347 h->plt.offset = (bfd_vma) -1;
2348 h->needs_plt = 0;
2349 }
2350 return TRUE;
2351 }
2352
2353 /* If this is a function, put it in the procedure linkage table. We
2354 will fill in the contents of the procedure linkage table later,
2355 when we know the address of the .got section. */
2356 if (h->type == STT_FUNC
2357 || h->needs_plt)
2358 {
2359 if (h->plt.refcount <= 0
2360 || SYMBOL_CALLS_LOCAL (info, h)
2361 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2362 && h->root.type == bfd_link_hash_undefweak))
2363 {
2364 /* This case can occur if we saw a PLT32 reloc in an input
2365 file, but the symbol was never referred to by a dynamic
2366 object, or if all references were garbage collected. In
2367 such a case, we don't actually need to build a procedure
2368 linkage table, and we can just do a PC32 reloc instead. */
2369 h->plt.offset = (bfd_vma) -1;
2370 h->needs_plt = 0;
2371 }
2372
2373 return TRUE;
2374 }
2375 else
2376 /* It's possible that we incorrectly decided a .plt reloc was
2377 needed for an R_X86_64_PC32 reloc to a non-function sym in
2378 check_relocs. We can't decide accurately between function and
2379 non-function syms in check-relocs; Objects loaded later in
2380 the link may change h->type. So fix it now. */
2381 h->plt.offset = (bfd_vma) -1;
2382
2383 /* If this is a weak symbol, and there is a real definition, the
2384 processor independent code will have arranged for us to see the
2385 real definition first, and we can just use the same value. */
2386 if (h->u.weakdef != NULL)
2387 {
2388 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2389 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2390 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2391 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2392 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2393 {
2394 eh = (struct elf_x86_64_link_hash_entry *) h;
2395 h->non_got_ref = h->u.weakdef->non_got_ref;
2396 eh->needs_copy = h->u.weakdef->needs_copy;
2397 }
2398 return TRUE;
2399 }
2400
2401 /* This is a reference to a symbol defined by a dynamic object which
2402 is not a function. */
2403
2404 /* If we are creating a shared library, we must presume that the
2405 only references to the symbol are via the global offset table.
2406 For such cases we need not do anything here; the relocations will
2407 be handled correctly by relocate_section. */
2408 if (!info->executable)
2409 return TRUE;
2410
2411 /* If there are no references to this symbol that do not use the
2412 GOT, we don't need to generate a copy reloc. */
2413 if (!h->non_got_ref)
2414 return TRUE;
2415
2416 /* If -z nocopyreloc was given, we won't generate them either. */
2417 if (info->nocopyreloc)
2418 {
2419 h->non_got_ref = 0;
2420 return TRUE;
2421 }
2422
2423 if (ELIMINATE_COPY_RELOCS)
2424 {
2425 eh = (struct elf_x86_64_link_hash_entry *) h;
2426 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2427 {
2428 s = p->sec->output_section;
2429 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2430 break;
2431 }
2432
2433 /* If we didn't find any dynamic relocs in read-only sections, then
2434 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2435 if (p == NULL)
2436 {
2437 h->non_got_ref = 0;
2438 return TRUE;
2439 }
2440 }
2441
2442 /* We must allocate the symbol in our .dynbss section, which will
2443 become part of the .bss section of the executable. There will be
2444 an entry for this symbol in the .dynsym section. The dynamic
2445 object will contain position independent code, so all references
2446 from the dynamic object to this symbol will go through the global
2447 offset table. The dynamic linker will use the .dynsym entry to
2448 determine the address it must put in the global offset table, so
2449 both the dynamic object and the regular object will refer to the
2450 same memory location for the variable. */
2451
2452 htab = elf_x86_64_hash_table (info);
2453 if (htab == NULL)
2454 return FALSE;
2455
2456 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2457 to copy the initial value out of the dynamic object and into the
2458 runtime process image. */
2459 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2460 {
2461 const struct elf_backend_data *bed;
2462 bed = get_elf_backend_data (info->output_bfd);
2463 htab->srelbss->size += bed->s->sizeof_rela;
2464 h->needs_copy = 1;
2465 }
2466
2467 s = htab->sdynbss;
2468
2469 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2470 }
2471
2472 /* Allocate space in .plt, .got and associated reloc sections for
2473 dynamic relocs. */
2474
2475 static bfd_boolean
2476 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2477 {
2478 struct bfd_link_info *info;
2479 struct elf_x86_64_link_hash_table *htab;
2480 struct elf_x86_64_link_hash_entry *eh;
2481 struct elf_dyn_relocs *p;
2482 const struct elf_backend_data *bed;
2483 unsigned int plt_entry_size;
2484
2485 if (h->root.type == bfd_link_hash_indirect)
2486 return TRUE;
2487
2488 eh = (struct elf_x86_64_link_hash_entry *) h;
2489
2490 info = (struct bfd_link_info *) inf;
2491 htab = elf_x86_64_hash_table (info);
2492 if (htab == NULL)
2493 return FALSE;
2494 bed = get_elf_backend_data (info->output_bfd);
2495 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2496
2497 /* We can't use the GOT PLT if pointer equality is needed since
2498 finish_dynamic_symbol won't clear symbol value and the dynamic
2499 linker won't update the GOT slot. We will get into an infinite
2500 loop at run-time. */
2501 if (htab->plt_got != NULL
2502 && h->type != STT_GNU_IFUNC
2503 && !h->pointer_equality_needed
2504 && h->plt.refcount > 0
2505 && h->got.refcount > 0)
2506 {
2507 /* Don't use the regular PLT if there are both GOT and GOTPLT
2508 reloctions. */
2509 h->plt.offset = (bfd_vma) -1;
2510
2511 /* Use the GOT PLT. */
2512 eh->plt_got.refcount = 1;
2513 }
2514
2515 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2516 here if it is defined and referenced in a non-shared object. */
2517 if (h->type == STT_GNU_IFUNC
2518 && h->def_regular)
2519 {
2520 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2521 &eh->dyn_relocs,
2522 plt_entry_size,
2523 plt_entry_size,
2524 GOT_ENTRY_SIZE))
2525 {
2526 asection *s = htab->plt_bnd;
2527 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2528 {
2529 /* Use the .plt.bnd section if it is created. */
2530 eh->plt_bnd.offset = s->size;
2531
2532 /* Make room for this entry in the .plt.bnd section. */
2533 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2534 }
2535
2536 return TRUE;
2537 }
2538 else
2539 return FALSE;
2540 }
2541 else if (htab->elf.dynamic_sections_created
2542 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0))
2543 {
2544 bfd_boolean use_plt_got;
2545
2546 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2547 {
2548 /* Don't use the regular PLT for DF_BIND_NOW. */
2549 h->plt.offset = (bfd_vma) -1;
2550
2551 /* Use the GOT PLT. */
2552 h->got.refcount = 1;
2553 eh->plt_got.refcount = 1;
2554 }
2555
2556 use_plt_got = eh->plt_got.refcount > 0;
2557
2558 /* Make sure this symbol is output as a dynamic symbol.
2559 Undefined weak syms won't yet be marked as dynamic. */
2560 if (h->dynindx == -1
2561 && !h->forced_local)
2562 {
2563 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2564 return FALSE;
2565 }
2566
2567 if (info->shared
2568 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2569 {
2570 asection *s = htab->elf.splt;
2571 asection *bnd_s = htab->plt_bnd;
2572 asection *got_s = htab->plt_got;
2573
2574 /* If this is the first .plt entry, make room for the special
2575 first entry. The .plt section is used by prelink to undo
2576 prelinking for dynamic relocations. */
2577 if (s->size == 0)
2578 s->size = plt_entry_size;
2579
2580 if (use_plt_got)
2581 eh->plt_got.offset = got_s->size;
2582 else
2583 {
2584 h->plt.offset = s->size;
2585 if (bnd_s)
2586 eh->plt_bnd.offset = bnd_s->size;
2587 }
2588
2589 /* If this symbol is not defined in a regular file, and we are
2590 not generating a shared library, then set the symbol to this
2591 location in the .plt. This is required to make function
2592 pointers compare as equal between the normal executable and
2593 the shared library. */
2594 if (! info->shared
2595 && !h->def_regular)
2596 {
2597 if (use_plt_got)
2598 {
2599 /* We need to make a call to the entry of the GOT PLT
2600 instead of regular PLT entry. */
2601 h->root.u.def.section = got_s;
2602 h->root.u.def.value = eh->plt_got.offset;
2603 }
2604 else
2605 {
2606 if (bnd_s)
2607 {
2608 /* We need to make a call to the entry of the second
2609 PLT instead of regular PLT entry. */
2610 h->root.u.def.section = bnd_s;
2611 h->root.u.def.value = eh->plt_bnd.offset;
2612 }
2613 else
2614 {
2615 h->root.u.def.section = s;
2616 h->root.u.def.value = h->plt.offset;
2617 }
2618 }
2619 }
2620
2621 /* Make room for this entry. */
2622 if (use_plt_got)
2623 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2624 else
2625 {
2626 s->size += plt_entry_size;
2627 if (bnd_s)
2628 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2629
2630 /* We also need to make an entry in the .got.plt section,
2631 which will be placed in the .got section by the linker
2632 script. */
2633 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2634
2635 /* We also need to make an entry in the .rela.plt
2636 section. */
2637 htab->elf.srelplt->size += bed->s->sizeof_rela;
2638 htab->elf.srelplt->reloc_count++;
2639 }
2640 }
2641 else
2642 {
2643 h->plt.offset = (bfd_vma) -1;
2644 h->needs_plt = 0;
2645 }
2646 }
2647 else
2648 {
2649 h->plt.offset = (bfd_vma) -1;
2650 h->needs_plt = 0;
2651 }
2652
2653 eh->tlsdesc_got = (bfd_vma) -1;
2654
2655 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2656 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2657 if (h->got.refcount > 0
2658 && info->executable
2659 && h->dynindx == -1
2660 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2661 {
2662 h->got.offset = (bfd_vma) -1;
2663 }
2664 else if (h->got.refcount > 0)
2665 {
2666 asection *s;
2667 bfd_boolean dyn;
2668 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2669
2670 /* Make sure this symbol is output as a dynamic symbol.
2671 Undefined weak syms won't yet be marked as dynamic. */
2672 if (h->dynindx == -1
2673 && !h->forced_local)
2674 {
2675 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2676 return FALSE;
2677 }
2678
2679 if (GOT_TLS_GDESC_P (tls_type))
2680 {
2681 eh->tlsdesc_got = htab->elf.sgotplt->size
2682 - elf_x86_64_compute_jump_table_size (htab);
2683 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2684 h->got.offset = (bfd_vma) -2;
2685 }
2686 if (! GOT_TLS_GDESC_P (tls_type)
2687 || GOT_TLS_GD_P (tls_type))
2688 {
2689 s = htab->elf.sgot;
2690 h->got.offset = s->size;
2691 s->size += GOT_ENTRY_SIZE;
2692 if (GOT_TLS_GD_P (tls_type))
2693 s->size += GOT_ENTRY_SIZE;
2694 }
2695 dyn = htab->elf.dynamic_sections_created;
2696 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2697 and two if global.
2698 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2699 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2700 || tls_type == GOT_TLS_IE)
2701 htab->elf.srelgot->size += bed->s->sizeof_rela;
2702 else if (GOT_TLS_GD_P (tls_type))
2703 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2704 else if (! GOT_TLS_GDESC_P (tls_type)
2705 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2706 || h->root.type != bfd_link_hash_undefweak)
2707 && (info->shared
2708 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2709 htab->elf.srelgot->size += bed->s->sizeof_rela;
2710 if (GOT_TLS_GDESC_P (tls_type))
2711 {
2712 htab->elf.srelplt->size += bed->s->sizeof_rela;
2713 htab->tlsdesc_plt = (bfd_vma) -1;
2714 }
2715 }
2716 else
2717 h->got.offset = (bfd_vma) -1;
2718
2719 if (eh->dyn_relocs == NULL)
2720 return TRUE;
2721
2722 /* In the shared -Bsymbolic case, discard space allocated for
2723 dynamic pc-relative relocs against symbols which turn out to be
2724 defined in regular objects. For the normal shared case, discard
2725 space for pc-relative relocs that have become local due to symbol
2726 visibility changes. */
2727
2728 if (info->shared)
2729 {
2730 /* Relocs that use pc_count are those that appear on a call
2731 insn, or certain REL relocs that can generated via assembly.
2732 We want calls to protected symbols to resolve directly to the
2733 function rather than going via the plt. If people want
2734 function pointer comparisons to work as expected then they
2735 should avoid writing weird assembly. */
2736 if (SYMBOL_CALLS_LOCAL (info, h))
2737 {
2738 struct elf_dyn_relocs **pp;
2739
2740 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2741 {
2742 p->count -= p->pc_count;
2743 p->pc_count = 0;
2744 if (p->count == 0)
2745 *pp = p->next;
2746 else
2747 pp = &p->next;
2748 }
2749 }
2750
2751 /* Also discard relocs on undefined weak syms with non-default
2752 visibility. */
2753 if (eh->dyn_relocs != NULL)
2754 {
2755 if (h->root.type == bfd_link_hash_undefweak)
2756 {
2757 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2758 eh->dyn_relocs = NULL;
2759
2760 /* Make sure undefined weak symbols are output as a dynamic
2761 symbol in PIEs. */
2762 else if (h->dynindx == -1
2763 && ! h->forced_local
2764 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2765 return FALSE;
2766 }
2767 /* For PIE, discard space for pc-relative relocs against
2768 symbols which turn out to need copy relocs. */
2769 else if (info->executable
2770 && (h->needs_copy || eh->needs_copy)
2771 && h->def_dynamic
2772 && !h->def_regular)
2773 {
2774 struct elf_dyn_relocs **pp;
2775
2776 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2777 {
2778 if (p->pc_count != 0)
2779 *pp = p->next;
2780 else
2781 pp = &p->next;
2782 }
2783 }
2784 }
2785 }
2786 else if (ELIMINATE_COPY_RELOCS)
2787 {
2788 /* For the non-shared case, discard space for relocs against
2789 symbols which turn out to need copy relocs or are not
2790 dynamic. */
2791
2792 if (!h->non_got_ref
2793 && ((h->def_dynamic
2794 && !h->def_regular)
2795 || (htab->elf.dynamic_sections_created
2796 && (h->root.type == bfd_link_hash_undefweak
2797 || h->root.type == bfd_link_hash_undefined))))
2798 {
2799 /* Make sure this symbol is output as a dynamic symbol.
2800 Undefined weak syms won't yet be marked as dynamic. */
2801 if (h->dynindx == -1
2802 && ! h->forced_local
2803 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2804 return FALSE;
2805
2806 /* If that succeeded, we know we'll be keeping all the
2807 relocs. */
2808 if (h->dynindx != -1)
2809 goto keep;
2810 }
2811
2812 eh->dyn_relocs = NULL;
2813
2814 keep: ;
2815 }
2816
2817 /* Finally, allocate space. */
2818 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2819 {
2820 asection * sreloc;
2821
2822 sreloc = elf_section_data (p->sec)->sreloc;
2823
2824 BFD_ASSERT (sreloc != NULL);
2825
2826 sreloc->size += p->count * bed->s->sizeof_rela;
2827 }
2828
2829 return TRUE;
2830 }
2831
2832 /* Allocate space in .plt, .got and associated reloc sections for
2833 local dynamic relocs. */
2834
2835 static bfd_boolean
2836 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2837 {
2838 struct elf_link_hash_entry *h
2839 = (struct elf_link_hash_entry *) *slot;
2840
2841 if (h->type != STT_GNU_IFUNC
2842 || !h->def_regular
2843 || !h->ref_regular
2844 || !h->forced_local
2845 || h->root.type != bfd_link_hash_defined)
2846 abort ();
2847
2848 return elf_x86_64_allocate_dynrelocs (h, inf);
2849 }
2850
2851 /* Find any dynamic relocs that apply to read-only sections. */
2852
2853 static bfd_boolean
2854 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2855 void * inf)
2856 {
2857 struct elf_x86_64_link_hash_entry *eh;
2858 struct elf_dyn_relocs *p;
2859
2860 /* Skip local IFUNC symbols. */
2861 if (h->forced_local && h->type == STT_GNU_IFUNC)
2862 return TRUE;
2863
2864 eh = (struct elf_x86_64_link_hash_entry *) h;
2865 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2866 {
2867 asection *s = p->sec->output_section;
2868
2869 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2870 {
2871 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2872
2873 info->flags |= DF_TEXTREL;
2874
2875 if ((info->warn_shared_textrel && info->shared)
2876 || info->error_textrel)
2877 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
2878 p->sec->owner, h->root.root.string,
2879 p->sec);
2880
2881 /* Not an error, just cut short the traversal. */
2882 return FALSE;
2883 }
2884 }
2885 return TRUE;
2886 }
2887
2888 /* Convert
2889 mov foo@GOTPCREL(%rip), %reg
2890 to
2891 lea foo(%rip), %reg
2892 with the local symbol, foo. */
2893
2894 static bfd_boolean
2895 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2896 struct bfd_link_info *link_info)
2897 {
2898 Elf_Internal_Shdr *symtab_hdr;
2899 Elf_Internal_Rela *internal_relocs;
2900 Elf_Internal_Rela *irel, *irelend;
2901 bfd_byte *contents;
2902 struct elf_x86_64_link_hash_table *htab;
2903 bfd_boolean changed_contents;
2904 bfd_boolean changed_relocs;
2905 bfd_signed_vma *local_got_refcounts;
2906 bfd_vma maxpagesize;
2907
2908 /* Don't even try to convert non-ELF outputs. */
2909 if (!is_elf_hash_table (link_info->hash))
2910 return FALSE;
2911
2912 /* Nothing to do if there is no need or no output. */
2913 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2914 || sec->need_convert_mov_to_lea == 0
2915 || bfd_is_abs_section (sec->output_section))
2916 return TRUE;
2917
2918 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2919
2920 /* Load the relocations for this section. */
2921 internal_relocs = (_bfd_elf_link_read_relocs
2922 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2923 link_info->keep_memory));
2924 if (internal_relocs == NULL)
2925 return FALSE;
2926
2927 htab = elf_x86_64_hash_table (link_info);
2928 changed_contents = FALSE;
2929 changed_relocs = FALSE;
2930 local_got_refcounts = elf_local_got_refcounts (abfd);
2931 maxpagesize = get_elf_backend_data (abfd)->maxpagesize;
2932
2933 /* Get the section contents. */
2934 if (elf_section_data (sec)->this_hdr.contents != NULL)
2935 contents = elf_section_data (sec)->this_hdr.contents;
2936 else
2937 {
2938 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2939 goto error_return;
2940 }
2941
2942 irelend = internal_relocs + sec->reloc_count;
2943 for (irel = internal_relocs; irel < irelend; irel++)
2944 {
2945 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2946 unsigned int r_symndx = htab->r_sym (irel->r_info);
2947 unsigned int indx;
2948 struct elf_link_hash_entry *h;
2949 asection *tsec;
2950 char symtype;
2951 bfd_vma toff, roff;
2952 enum {
2953 none, local, global
2954 } convert_mov_to_lea;
2955
2956 if (r_type != R_X86_64_GOTPCREL)
2957 continue;
2958
2959 roff = irel->r_offset;
2960
2961 /* Don't convert R_X86_64_GOTPCREL relocation if it isn't for mov
2962 instruction. */
2963 if (roff < 2
2964 || bfd_get_8 (abfd, contents + roff - 2) != 0x8b)
2965 continue;
2966
2967 tsec = NULL;
2968 convert_mov_to_lea = none;
2969
2970 /* Get the symbol referred to by the reloc. */
2971 if (r_symndx < symtab_hdr->sh_info)
2972 {
2973 Elf_Internal_Sym *isym;
2974
2975 /* Silence older GCC warning. */
2976 h = NULL;
2977
2978 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2979 abfd, r_symndx);
2980
2981 symtype = ELF_ST_TYPE (isym->st_info);
2982
2983 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation and
2984 skip relocation against undefined symbols. */
2985 if (symtype != STT_GNU_IFUNC && isym->st_shndx != SHN_UNDEF)
2986 {
2987 if (isym->st_shndx == SHN_ABS)
2988 tsec = bfd_abs_section_ptr;
2989 else if (isym->st_shndx == SHN_COMMON)
2990 tsec = bfd_com_section_ptr;
2991 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
2992 tsec = &_bfd_elf_large_com_section;
2993 else
2994 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
2995
2996 toff = isym->st_value;
2997 convert_mov_to_lea = local;
2998 }
2999 }
3000 else
3001 {
3002 indx = r_symndx - symtab_hdr->sh_info;
3003 h = elf_sym_hashes (abfd)[indx];
3004 BFD_ASSERT (h != NULL);
3005
3006 while (h->root.type == bfd_link_hash_indirect
3007 || h->root.type == bfd_link_hash_warning)
3008 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3009
3010 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
3011 avoid optimizing _DYNAMIC since ld.so may use its link-time
3012 address. */
3013 if (h->def_regular
3014 && h->type != STT_GNU_IFUNC
3015 && h != htab->elf.hdynamic
3016 && SYMBOL_REFERENCES_LOCAL (link_info, h))
3017 {
3018 tsec = h->root.u.def.section;
3019 toff = h->root.u.def.value;
3020 symtype = h->type;
3021 convert_mov_to_lea = global;
3022 }
3023 }
3024
3025 if (convert_mov_to_lea == none)
3026 continue;
3027
3028 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
3029 {
3030 /* At this stage in linking, no SEC_MERGE symbol has been
3031 adjusted, so all references to such symbols need to be
3032 passed through _bfd_merged_section_offset. (Later, in
3033 relocate_section, all SEC_MERGE symbols *except* for
3034 section symbols have been adjusted.)
3035
3036 gas may reduce relocations against symbols in SEC_MERGE
3037 sections to a relocation against the section symbol when
3038 the original addend was zero. When the reloc is against
3039 a section symbol we should include the addend in the
3040 offset passed to _bfd_merged_section_offset, since the
3041 location of interest is the original symbol. On the
3042 other hand, an access to "sym+addend" where "sym" is not
3043 a section symbol should not include the addend; Such an
3044 access is presumed to be an offset from "sym"; The
3045 location of interest is just "sym". */
3046 if (symtype == STT_SECTION)
3047 toff += irel->r_addend;
3048
3049 toff = _bfd_merged_section_offset (abfd, &tsec,
3050 elf_section_data (tsec)->sec_info,
3051 toff);
3052
3053 if (symtype != STT_SECTION)
3054 toff += irel->r_addend;
3055 }
3056 else
3057 toff += irel->r_addend;
3058
3059 /* Don't convert if R_X86_64_PC32 relocation overflows. */
3060 if (tsec->output_section == sec->output_section)
3061 {
3062 if ((toff - roff + 0x80000000) > 0xffffffff)
3063 continue;
3064 }
3065 else
3066 {
3067 asection *asect;
3068 bfd_size_type size;
3069
3070 /* At this point, we don't know the load addresses of TSEC
3071 section nor SEC section. We estimate the distrance between
3072 SEC and TSEC. */
3073 size = 0;
3074 for (asect = sec->output_section;
3075 asect != NULL && asect != tsec->output_section;
3076 asect = asect->next)
3077 {
3078 asection *i;
3079 for (i = asect->output_section->map_head.s;
3080 i != NULL;
3081 i = i->map_head.s)
3082 {
3083 size = align_power (size, i->alignment_power);
3084 size += i->size;
3085 }
3086 }
3087
3088 /* Don't convert R_X86_64_GOTPCREL if TSEC isn't placed after
3089 SEC. */
3090 if (asect == NULL)
3091 continue;
3092
3093 /* Take PT_GNU_RELRO segment into account by adding
3094 maxpagesize. */
3095 if ((toff + size + maxpagesize - roff + 0x80000000)
3096 > 0xffffffff)
3097 continue;
3098 }
3099
3100 bfd_put_8 (abfd, 0x8d, contents + roff - 2);
3101 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
3102 changed_contents = TRUE;
3103 changed_relocs = TRUE;
3104
3105 if (convert_mov_to_lea == local)
3106 {
3107 if (local_got_refcounts != NULL
3108 && local_got_refcounts[r_symndx] > 0)
3109 local_got_refcounts[r_symndx] -= 1;
3110 }
3111 else
3112 {
3113 if (h->got.refcount > 0)
3114 h->got.refcount -= 1;
3115 }
3116 }
3117
3118 if (contents != NULL
3119 && elf_section_data (sec)->this_hdr.contents != contents)
3120 {
3121 if (!changed_contents && !link_info->keep_memory)
3122 free (contents);
3123 else
3124 {
3125 /* Cache the section contents for elf_link_input_bfd. */
3126 elf_section_data (sec)->this_hdr.contents = contents;
3127 }
3128 }
3129
3130 if (elf_section_data (sec)->relocs != internal_relocs)
3131 {
3132 if (!changed_relocs)
3133 free (internal_relocs);
3134 else
3135 elf_section_data (sec)->relocs = internal_relocs;
3136 }
3137
3138 return TRUE;
3139
3140 error_return:
3141 if (contents != NULL
3142 && elf_section_data (sec)->this_hdr.contents != contents)
3143 free (contents);
3144 if (internal_relocs != NULL
3145 && elf_section_data (sec)->relocs != internal_relocs)
3146 free (internal_relocs);
3147 return FALSE;
3148 }
3149
3150 /* Set the sizes of the dynamic sections. */
3151
3152 static bfd_boolean
3153 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3154 struct bfd_link_info *info)
3155 {
3156 struct elf_x86_64_link_hash_table *htab;
3157 bfd *dynobj;
3158 asection *s;
3159 bfd_boolean relocs;
3160 bfd *ibfd;
3161 const struct elf_backend_data *bed;
3162
3163 htab = elf_x86_64_hash_table (info);
3164 if (htab == NULL)
3165 return FALSE;
3166 bed = get_elf_backend_data (output_bfd);
3167
3168 dynobj = htab->elf.dynobj;
3169 if (dynobj == NULL)
3170 abort ();
3171
3172 if (htab->elf.dynamic_sections_created)
3173 {
3174 /* Set the contents of the .interp section to the interpreter. */
3175 if (info->executable)
3176 {
3177 s = bfd_get_linker_section (dynobj, ".interp");
3178 if (s == NULL)
3179 abort ();
3180 s->size = htab->dynamic_interpreter_size;
3181 s->contents = (unsigned char *) htab->dynamic_interpreter;
3182 }
3183 }
3184
3185 /* Set up .got offsets for local syms, and space for local dynamic
3186 relocs. */
3187 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3188 {
3189 bfd_signed_vma *local_got;
3190 bfd_signed_vma *end_local_got;
3191 char *local_tls_type;
3192 bfd_vma *local_tlsdesc_gotent;
3193 bfd_size_type locsymcount;
3194 Elf_Internal_Shdr *symtab_hdr;
3195 asection *srel;
3196
3197 if (! is_x86_64_elf (ibfd))
3198 continue;
3199
3200 for (s = ibfd->sections; s != NULL; s = s->next)
3201 {
3202 struct elf_dyn_relocs *p;
3203
3204 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3205 return FALSE;
3206
3207 for (p = (struct elf_dyn_relocs *)
3208 (elf_section_data (s)->local_dynrel);
3209 p != NULL;
3210 p = p->next)
3211 {
3212 if (!bfd_is_abs_section (p->sec)
3213 && bfd_is_abs_section (p->sec->output_section))
3214 {
3215 /* Input section has been discarded, either because
3216 it is a copy of a linkonce section or due to
3217 linker script /DISCARD/, so we'll be discarding
3218 the relocs too. */
3219 }
3220 else if (p->count != 0)
3221 {
3222 srel = elf_section_data (p->sec)->sreloc;
3223 srel->size += p->count * bed->s->sizeof_rela;
3224 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3225 && (info->flags & DF_TEXTREL) == 0)
3226 {
3227 info->flags |= DF_TEXTREL;
3228 if ((info->warn_shared_textrel && info->shared)
3229 || info->error_textrel)
3230 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3231 p->sec->owner, p->sec);
3232 }
3233 }
3234 }
3235 }
3236
3237 local_got = elf_local_got_refcounts (ibfd);
3238 if (!local_got)
3239 continue;
3240
3241 symtab_hdr = &elf_symtab_hdr (ibfd);
3242 locsymcount = symtab_hdr->sh_info;
3243 end_local_got = local_got + locsymcount;
3244 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3245 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3246 s = htab->elf.sgot;
3247 srel = htab->elf.srelgot;
3248 for (; local_got < end_local_got;
3249 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3250 {
3251 *local_tlsdesc_gotent = (bfd_vma) -1;
3252 if (*local_got > 0)
3253 {
3254 if (GOT_TLS_GDESC_P (*local_tls_type))
3255 {
3256 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3257 - elf_x86_64_compute_jump_table_size (htab);
3258 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3259 *local_got = (bfd_vma) -2;
3260 }
3261 if (! GOT_TLS_GDESC_P (*local_tls_type)
3262 || GOT_TLS_GD_P (*local_tls_type))
3263 {
3264 *local_got = s->size;
3265 s->size += GOT_ENTRY_SIZE;
3266 if (GOT_TLS_GD_P (*local_tls_type))
3267 s->size += GOT_ENTRY_SIZE;
3268 }
3269 if (info->shared
3270 || GOT_TLS_GD_ANY_P (*local_tls_type)
3271 || *local_tls_type == GOT_TLS_IE)
3272 {
3273 if (GOT_TLS_GDESC_P (*local_tls_type))
3274 {
3275 htab->elf.srelplt->size
3276 += bed->s->sizeof_rela;
3277 htab->tlsdesc_plt = (bfd_vma) -1;
3278 }
3279 if (! GOT_TLS_GDESC_P (*local_tls_type)
3280 || GOT_TLS_GD_P (*local_tls_type))
3281 srel->size += bed->s->sizeof_rela;
3282 }
3283 }
3284 else
3285 *local_got = (bfd_vma) -1;
3286 }
3287 }
3288
3289 if (htab->tls_ld_got.refcount > 0)
3290 {
3291 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3292 relocs. */
3293 htab->tls_ld_got.offset = htab->elf.sgot->size;
3294 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3295 htab->elf.srelgot->size += bed->s->sizeof_rela;
3296 }
3297 else
3298 htab->tls_ld_got.offset = -1;
3299
3300 /* Allocate global sym .plt and .got entries, and space for global
3301 sym dynamic relocs. */
3302 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3303 info);
3304
3305 /* Allocate .plt and .got entries, and space for local symbols. */
3306 htab_traverse (htab->loc_hash_table,
3307 elf_x86_64_allocate_local_dynrelocs,
3308 info);
3309
3310 /* For every jump slot reserved in the sgotplt, reloc_count is
3311 incremented. However, when we reserve space for TLS descriptors,
3312 it's not incremented, so in order to compute the space reserved
3313 for them, it suffices to multiply the reloc count by the jump
3314 slot size.
3315
3316 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3317 so that R_X86_64_IRELATIVE entries come last. */
3318 if (htab->elf.srelplt)
3319 {
3320 htab->sgotplt_jump_table_size
3321 = elf_x86_64_compute_jump_table_size (htab);
3322 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3323 }
3324 else if (htab->elf.irelplt)
3325 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3326
3327 if (htab->tlsdesc_plt)
3328 {
3329 /* If we're not using lazy TLS relocations, don't generate the
3330 PLT and GOT entries they require. */
3331 if ((info->flags & DF_BIND_NOW))
3332 htab->tlsdesc_plt = 0;
3333 else
3334 {
3335 htab->tlsdesc_got = htab->elf.sgot->size;
3336 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3337 /* Reserve room for the initial entry.
3338 FIXME: we could probably do away with it in this case. */
3339 if (htab->elf.splt->size == 0)
3340 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3341 htab->tlsdesc_plt = htab->elf.splt->size;
3342 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3343 }
3344 }
3345
3346 if (htab->elf.sgotplt)
3347 {
3348 /* Don't allocate .got.plt section if there are no GOT nor PLT
3349 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3350 if ((htab->elf.hgot == NULL
3351 || !htab->elf.hgot->ref_regular_nonweak)
3352 && (htab->elf.sgotplt->size
3353 == get_elf_backend_data (output_bfd)->got_header_size)
3354 && (htab->elf.splt == NULL
3355 || htab->elf.splt->size == 0)
3356 && (htab->elf.sgot == NULL
3357 || htab->elf.sgot->size == 0)
3358 && (htab->elf.iplt == NULL
3359 || htab->elf.iplt->size == 0)
3360 && (htab->elf.igotplt == NULL
3361 || htab->elf.igotplt->size == 0))
3362 htab->elf.sgotplt->size = 0;
3363 }
3364
3365 if (htab->plt_eh_frame != NULL
3366 && htab->elf.splt != NULL
3367 && htab->elf.splt->size != 0
3368 && !bfd_is_abs_section (htab->elf.splt->output_section)
3369 && _bfd_elf_eh_frame_present (info))
3370 {
3371 const struct elf_x86_64_backend_data *arch_data
3372 = get_elf_x86_64_arch_data (bed);
3373 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3374 }
3375
3376 /* We now have determined the sizes of the various dynamic sections.
3377 Allocate memory for them. */
3378 relocs = FALSE;
3379 for (s = dynobj->sections; s != NULL; s = s->next)
3380 {
3381 if ((s->flags & SEC_LINKER_CREATED) == 0)
3382 continue;
3383
3384 if (s == htab->elf.splt
3385 || s == htab->elf.sgot
3386 || s == htab->elf.sgotplt
3387 || s == htab->elf.iplt
3388 || s == htab->elf.igotplt
3389 || s == htab->plt_bnd
3390 || s == htab->plt_got
3391 || s == htab->plt_eh_frame
3392 || s == htab->sdynbss)
3393 {
3394 /* Strip this section if we don't need it; see the
3395 comment below. */
3396 }
3397 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3398 {
3399 if (s->size != 0 && s != htab->elf.srelplt)
3400 relocs = TRUE;
3401
3402 /* We use the reloc_count field as a counter if we need
3403 to copy relocs into the output file. */
3404 if (s != htab->elf.srelplt)
3405 s->reloc_count = 0;
3406 }
3407 else
3408 {
3409 /* It's not one of our sections, so don't allocate space. */
3410 continue;
3411 }
3412
3413 if (s->size == 0)
3414 {
3415 /* If we don't need this section, strip it from the
3416 output file. This is mostly to handle .rela.bss and
3417 .rela.plt. We must create both sections in
3418 create_dynamic_sections, because they must be created
3419 before the linker maps input sections to output
3420 sections. The linker does that before
3421 adjust_dynamic_symbol is called, and it is that
3422 function which decides whether anything needs to go
3423 into these sections. */
3424
3425 s->flags |= SEC_EXCLUDE;
3426 continue;
3427 }
3428
3429 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3430 continue;
3431
3432 /* Allocate memory for the section contents. We use bfd_zalloc
3433 here in case unused entries are not reclaimed before the
3434 section's contents are written out. This should not happen,
3435 but this way if it does, we get a R_X86_64_NONE reloc instead
3436 of garbage. */
3437 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3438 if (s->contents == NULL)
3439 return FALSE;
3440 }
3441
3442 if (htab->plt_eh_frame != NULL
3443 && htab->plt_eh_frame->contents != NULL)
3444 {
3445 const struct elf_x86_64_backend_data *arch_data
3446 = get_elf_x86_64_arch_data (bed);
3447
3448 memcpy (htab->plt_eh_frame->contents,
3449 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3450 bfd_put_32 (dynobj, htab->elf.splt->size,
3451 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3452 }
3453
3454 if (htab->elf.dynamic_sections_created)
3455 {
3456 /* Add some entries to the .dynamic section. We fill in the
3457 values later, in elf_x86_64_finish_dynamic_sections, but we
3458 must add the entries now so that we get the correct size for
3459 the .dynamic section. The DT_DEBUG entry is filled in by the
3460 dynamic linker and used by the debugger. */
3461 #define add_dynamic_entry(TAG, VAL) \
3462 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3463
3464 if (info->executable)
3465 {
3466 if (!add_dynamic_entry (DT_DEBUG, 0))
3467 return FALSE;
3468 }
3469
3470 if (htab->elf.splt->size != 0)
3471 {
3472 /* DT_PLTGOT is used by prelink even if there is no PLT
3473 relocation. */
3474 if (!add_dynamic_entry (DT_PLTGOT, 0))
3475 return FALSE;
3476
3477 if (htab->elf.srelplt->size != 0)
3478 {
3479 if (!add_dynamic_entry (DT_PLTRELSZ, 0)
3480 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3481 || !add_dynamic_entry (DT_JMPREL, 0))
3482 return FALSE;
3483 }
3484
3485 if (htab->tlsdesc_plt
3486 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3487 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3488 return FALSE;
3489 }
3490
3491 if (relocs)
3492 {
3493 if (!add_dynamic_entry (DT_RELA, 0)
3494 || !add_dynamic_entry (DT_RELASZ, 0)
3495 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3496 return FALSE;
3497
3498 /* If any dynamic relocs apply to a read-only section,
3499 then we need a DT_TEXTREL entry. */
3500 if ((info->flags & DF_TEXTREL) == 0)
3501 elf_link_hash_traverse (&htab->elf,
3502 elf_x86_64_readonly_dynrelocs,
3503 info);
3504
3505 if ((info->flags & DF_TEXTREL) != 0)
3506 {
3507 if (!add_dynamic_entry (DT_TEXTREL, 0))
3508 return FALSE;
3509 }
3510 }
3511 }
3512 #undef add_dynamic_entry
3513
3514 return TRUE;
3515 }
3516
3517 static bfd_boolean
3518 elf_x86_64_always_size_sections (bfd *output_bfd,
3519 struct bfd_link_info *info)
3520 {
3521 asection *tls_sec = elf_hash_table (info)->tls_sec;
3522
3523 if (tls_sec)
3524 {
3525 struct elf_link_hash_entry *tlsbase;
3526
3527 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3528 "_TLS_MODULE_BASE_",
3529 FALSE, FALSE, FALSE);
3530
3531 if (tlsbase && tlsbase->type == STT_TLS)
3532 {
3533 struct elf_x86_64_link_hash_table *htab;
3534 struct bfd_link_hash_entry *bh = NULL;
3535 const struct elf_backend_data *bed
3536 = get_elf_backend_data (output_bfd);
3537
3538 htab = elf_x86_64_hash_table (info);
3539 if (htab == NULL)
3540 return FALSE;
3541
3542 if (!(_bfd_generic_link_add_one_symbol
3543 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3544 tls_sec, 0, NULL, FALSE,
3545 bed->collect, &bh)))
3546 return FALSE;
3547
3548 htab->tls_module_base = bh;
3549
3550 tlsbase = (struct elf_link_hash_entry *)bh;
3551 tlsbase->def_regular = 1;
3552 tlsbase->other = STV_HIDDEN;
3553 tlsbase->root.linker_def = 1;
3554 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3555 }
3556 }
3557
3558 return TRUE;
3559 }
3560
3561 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3562 executables. Rather than setting it to the beginning of the TLS
3563 section, we have to set it to the end. This function may be called
3564 multiple times, it is idempotent. */
3565
3566 static void
3567 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3568 {
3569 struct elf_x86_64_link_hash_table *htab;
3570 struct bfd_link_hash_entry *base;
3571
3572 if (!info->executable)
3573 return;
3574
3575 htab = elf_x86_64_hash_table (info);
3576 if (htab == NULL)
3577 return;
3578
3579 base = htab->tls_module_base;
3580 if (base == NULL)
3581 return;
3582
3583 base->u.def.value = htab->elf.tls_size;
3584 }
3585
3586 /* Return the base VMA address which should be subtracted from real addresses
3587 when resolving @dtpoff relocation.
3588 This is PT_TLS segment p_vaddr. */
3589
3590 static bfd_vma
3591 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3592 {
3593 /* If tls_sec is NULL, we should have signalled an error already. */
3594 if (elf_hash_table (info)->tls_sec == NULL)
3595 return 0;
3596 return elf_hash_table (info)->tls_sec->vma;
3597 }
3598
3599 /* Return the relocation value for @tpoff relocation
3600 if STT_TLS virtual address is ADDRESS. */
3601
3602 static bfd_vma
3603 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3604 {
3605 struct elf_link_hash_table *htab = elf_hash_table (info);
3606 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3607 bfd_vma static_tls_size;
3608
3609 /* If tls_segment is NULL, we should have signalled an error already. */
3610 if (htab->tls_sec == NULL)
3611 return 0;
3612
3613 /* Consider special static TLS alignment requirements. */
3614 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3615 return address - static_tls_size - htab->tls_sec->vma;
3616 }
3617
3618 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3619 branch? */
3620
3621 static bfd_boolean
3622 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3623 {
3624 /* Opcode Instruction
3625 0xe8 call
3626 0xe9 jump
3627 0x0f 0x8x conditional jump */
3628 return ((offset > 0
3629 && (contents [offset - 1] == 0xe8
3630 || contents [offset - 1] == 0xe9))
3631 || (offset > 1
3632 && contents [offset - 2] == 0x0f
3633 && (contents [offset - 1] & 0xf0) == 0x80));
3634 }
3635
3636 /* Relocate an x86_64 ELF section. */
3637
3638 static bfd_boolean
3639 elf_x86_64_relocate_section (bfd *output_bfd,
3640 struct bfd_link_info *info,
3641 bfd *input_bfd,
3642 asection *input_section,
3643 bfd_byte *contents,
3644 Elf_Internal_Rela *relocs,
3645 Elf_Internal_Sym *local_syms,
3646 asection **local_sections)
3647 {
3648 struct elf_x86_64_link_hash_table *htab;
3649 Elf_Internal_Shdr *symtab_hdr;
3650 struct elf_link_hash_entry **sym_hashes;
3651 bfd_vma *local_got_offsets;
3652 bfd_vma *local_tlsdesc_gotents;
3653 Elf_Internal_Rela *rel;
3654 Elf_Internal_Rela *relend;
3655 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3656
3657 BFD_ASSERT (is_x86_64_elf (input_bfd));
3658
3659 htab = elf_x86_64_hash_table (info);
3660 if (htab == NULL)
3661 return FALSE;
3662 symtab_hdr = &elf_symtab_hdr (input_bfd);
3663 sym_hashes = elf_sym_hashes (input_bfd);
3664 local_got_offsets = elf_local_got_offsets (input_bfd);
3665 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3666
3667 elf_x86_64_set_tls_module_base (info);
3668
3669 rel = relocs;
3670 relend = relocs + input_section->reloc_count;
3671 for (; rel < relend; rel++)
3672 {
3673 unsigned int r_type;
3674 reloc_howto_type *howto;
3675 unsigned long r_symndx;
3676 struct elf_link_hash_entry *h;
3677 struct elf_x86_64_link_hash_entry *eh;
3678 Elf_Internal_Sym *sym;
3679 asection *sec;
3680 bfd_vma off, offplt, plt_offset;
3681 bfd_vma relocation;
3682 bfd_boolean unresolved_reloc;
3683 bfd_reloc_status_type r;
3684 int tls_type;
3685 asection *base_got, *resolved_plt;
3686 bfd_vma st_size;
3687
3688 r_type = ELF32_R_TYPE (rel->r_info);
3689 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3690 || r_type == (int) R_X86_64_GNU_VTENTRY)
3691 continue;
3692
3693 if (r_type >= (int) R_X86_64_standard)
3694 {
3695 (*_bfd_error_handler)
3696 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3697 input_bfd, input_section, r_type);
3698 bfd_set_error (bfd_error_bad_value);
3699 return FALSE;
3700 }
3701
3702 if (r_type != (int) R_X86_64_32
3703 || ABI_64_P (output_bfd))
3704 howto = x86_64_elf_howto_table + r_type;
3705 else
3706 howto = (x86_64_elf_howto_table
3707 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3708 r_symndx = htab->r_sym (rel->r_info);
3709 h = NULL;
3710 sym = NULL;
3711 sec = NULL;
3712 unresolved_reloc = FALSE;
3713 if (r_symndx < symtab_hdr->sh_info)
3714 {
3715 sym = local_syms + r_symndx;
3716 sec = local_sections[r_symndx];
3717
3718 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3719 &sec, rel);
3720 st_size = sym->st_size;
3721
3722 /* Relocate against local STT_GNU_IFUNC symbol. */
3723 if (!info->relocatable
3724 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3725 {
3726 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3727 rel, FALSE);
3728 if (h == NULL)
3729 abort ();
3730
3731 /* Set STT_GNU_IFUNC symbol value. */
3732 h->root.u.def.value = sym->st_value;
3733 h->root.u.def.section = sec;
3734 }
3735 }
3736 else
3737 {
3738 bfd_boolean warned ATTRIBUTE_UNUSED;
3739 bfd_boolean ignored ATTRIBUTE_UNUSED;
3740
3741 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3742 r_symndx, symtab_hdr, sym_hashes,
3743 h, sec, relocation,
3744 unresolved_reloc, warned, ignored);
3745 st_size = h->size;
3746 }
3747
3748 if (sec != NULL && discarded_section (sec))
3749 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3750 rel, 1, relend, howto, 0, contents);
3751
3752 if (info->relocatable)
3753 continue;
3754
3755 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3756 {
3757 if (r_type == R_X86_64_64)
3758 {
3759 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3760 zero-extend it to 64bit if addend is zero. */
3761 r_type = R_X86_64_32;
3762 memset (contents + rel->r_offset + 4, 0, 4);
3763 }
3764 else if (r_type == R_X86_64_SIZE64)
3765 {
3766 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3767 zero-extend it to 64bit if addend is zero. */
3768 r_type = R_X86_64_SIZE32;
3769 memset (contents + rel->r_offset + 4, 0, 4);
3770 }
3771 }
3772
3773 eh = (struct elf_x86_64_link_hash_entry *) h;
3774
3775 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3776 it here if it is defined in a non-shared object. */
3777 if (h != NULL
3778 && h->type == STT_GNU_IFUNC
3779 && h->def_regular)
3780 {
3781 bfd_vma plt_index;
3782 const char *name;
3783
3784 if ((input_section->flags & SEC_ALLOC) == 0
3785 || h->plt.offset == (bfd_vma) -1)
3786 abort ();
3787
3788 /* STT_GNU_IFUNC symbol must go through PLT. */
3789 if (htab->elf.splt != NULL)
3790 {
3791 if (htab->plt_bnd != NULL)
3792 {
3793 resolved_plt = htab->plt_bnd;
3794 plt_offset = eh->plt_bnd.offset;
3795 }
3796 else
3797 {
3798 resolved_plt = htab->elf.splt;
3799 plt_offset = h->plt.offset;
3800 }
3801 }
3802 else
3803 {
3804 resolved_plt = htab->elf.iplt;
3805 plt_offset = h->plt.offset;
3806 }
3807
3808 relocation = (resolved_plt->output_section->vma
3809 + resolved_plt->output_offset + plt_offset);
3810
3811 switch (r_type)
3812 {
3813 default:
3814 if (h->root.root.string)
3815 name = h->root.root.string;
3816 else
3817 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3818 NULL);
3819 (*_bfd_error_handler)
3820 (_("%B: relocation %s against STT_GNU_IFUNC "
3821 "symbol `%s' isn't handled by %s"), input_bfd,
3822 x86_64_elf_howto_table[r_type].name,
3823 name, __FUNCTION__);
3824 bfd_set_error (bfd_error_bad_value);
3825 return FALSE;
3826
3827 case R_X86_64_32S:
3828 if (info->shared)
3829 abort ();
3830 goto do_relocation;
3831
3832 case R_X86_64_32:
3833 if (ABI_64_P (output_bfd))
3834 goto do_relocation;
3835 /* FALLTHROUGH */
3836 case R_X86_64_64:
3837 if (rel->r_addend != 0)
3838 {
3839 if (h->root.root.string)
3840 name = h->root.root.string;
3841 else
3842 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3843 sym, NULL);
3844 (*_bfd_error_handler)
3845 (_("%B: relocation %s against STT_GNU_IFUNC "
3846 "symbol `%s' has non-zero addend: %d"),
3847 input_bfd, x86_64_elf_howto_table[r_type].name,
3848 name, rel->r_addend);
3849 bfd_set_error (bfd_error_bad_value);
3850 return FALSE;
3851 }
3852
3853 /* Generate dynamic relcoation only when there is a
3854 non-GOT reference in a shared object. */
3855 if (info->shared && h->non_got_ref)
3856 {
3857 Elf_Internal_Rela outrel;
3858 asection *sreloc;
3859
3860 /* Need a dynamic relocation to get the real function
3861 address. */
3862 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3863 info,
3864 input_section,
3865 rel->r_offset);
3866 if (outrel.r_offset == (bfd_vma) -1
3867 || outrel.r_offset == (bfd_vma) -2)
3868 abort ();
3869
3870 outrel.r_offset += (input_section->output_section->vma
3871 + input_section->output_offset);
3872
3873 if (h->dynindx == -1
3874 || h->forced_local
3875 || info->executable)
3876 {
3877 /* This symbol is resolved locally. */
3878 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3879 outrel.r_addend = (h->root.u.def.value
3880 + h->root.u.def.section->output_section->vma
3881 + h->root.u.def.section->output_offset);
3882 }
3883 else
3884 {
3885 outrel.r_info = htab->r_info (h->dynindx, r_type);
3886 outrel.r_addend = 0;
3887 }
3888
3889 sreloc = htab->elf.irelifunc;
3890 elf_append_rela (output_bfd, sreloc, &outrel);
3891
3892 /* If this reloc is against an external symbol, we
3893 do not want to fiddle with the addend. Otherwise,
3894 we need to include the symbol value so that it
3895 becomes an addend for the dynamic reloc. For an
3896 internal symbol, we have updated addend. */
3897 continue;
3898 }
3899 /* FALLTHROUGH */
3900 case R_X86_64_PC32:
3901 case R_X86_64_PC32_BND:
3902 case R_X86_64_PC64:
3903 case R_X86_64_PLT32:
3904 case R_X86_64_PLT32_BND:
3905 goto do_relocation;
3906
3907 case R_X86_64_GOTPCREL:
3908 case R_X86_64_GOTPCREL64:
3909 base_got = htab->elf.sgot;
3910 off = h->got.offset;
3911
3912 if (base_got == NULL)
3913 abort ();
3914
3915 if (off == (bfd_vma) -1)
3916 {
3917 /* We can't use h->got.offset here to save state, or
3918 even just remember the offset, as finish_dynamic_symbol
3919 would use that as offset into .got. */
3920
3921 if (htab->elf.splt != NULL)
3922 {
3923 plt_index = h->plt.offset / plt_entry_size - 1;
3924 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3925 base_got = htab->elf.sgotplt;
3926 }
3927 else
3928 {
3929 plt_index = h->plt.offset / plt_entry_size;
3930 off = plt_index * GOT_ENTRY_SIZE;
3931 base_got = htab->elf.igotplt;
3932 }
3933
3934 if (h->dynindx == -1
3935 || h->forced_local
3936 || info->symbolic)
3937 {
3938 /* This references the local defitionion. We must
3939 initialize this entry in the global offset table.
3940 Since the offset must always be a multiple of 8,
3941 we use the least significant bit to record
3942 whether we have initialized it already.
3943
3944 When doing a dynamic link, we create a .rela.got
3945 relocation entry to initialize the value. This
3946 is done in the finish_dynamic_symbol routine. */
3947 if ((off & 1) != 0)
3948 off &= ~1;
3949 else
3950 {
3951 bfd_put_64 (output_bfd, relocation,
3952 base_got->contents + off);
3953 /* Note that this is harmless for the GOTPLT64
3954 case, as -1 | 1 still is -1. */
3955 h->got.offset |= 1;
3956 }
3957 }
3958 }
3959
3960 relocation = (base_got->output_section->vma
3961 + base_got->output_offset + off);
3962
3963 goto do_relocation;
3964 }
3965 }
3966
3967 /* When generating a shared object, the relocations handled here are
3968 copied into the output file to be resolved at run time. */
3969 switch (r_type)
3970 {
3971 case R_X86_64_GOT32:
3972 case R_X86_64_GOT64:
3973 /* Relocation is to the entry for this symbol in the global
3974 offset table. */
3975 case R_X86_64_GOTPCREL:
3976 case R_X86_64_GOTPCREL64:
3977 /* Use global offset table entry as symbol value. */
3978 case R_X86_64_GOTPLT64:
3979 /* This is obsolete and treated the the same as GOT64. */
3980 base_got = htab->elf.sgot;
3981
3982 if (htab->elf.sgot == NULL)
3983 abort ();
3984
3985 if (h != NULL)
3986 {
3987 bfd_boolean dyn;
3988
3989 off = h->got.offset;
3990 if (h->needs_plt
3991 && h->plt.offset != (bfd_vma)-1
3992 && off == (bfd_vma)-1)
3993 {
3994 /* We can't use h->got.offset here to save
3995 state, or even just remember the offset, as
3996 finish_dynamic_symbol would use that as offset into
3997 .got. */
3998 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3999 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4000 base_got = htab->elf.sgotplt;
4001 }
4002
4003 dyn = htab->elf.dynamic_sections_created;
4004
4005 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
4006 || (info->shared
4007 && SYMBOL_REFERENCES_LOCAL (info, h))
4008 || (ELF_ST_VISIBILITY (h->other)
4009 && h->root.type == bfd_link_hash_undefweak))
4010 {
4011 /* This is actually a static link, or it is a -Bsymbolic
4012 link and the symbol is defined locally, or the symbol
4013 was forced to be local because of a version file. We
4014 must initialize this entry in the global offset table.
4015 Since the offset must always be a multiple of 8, we
4016 use the least significant bit to record whether we
4017 have initialized it already.
4018
4019 When doing a dynamic link, we create a .rela.got
4020 relocation entry to initialize the value. This is
4021 done in the finish_dynamic_symbol routine. */
4022 if ((off & 1) != 0)
4023 off &= ~1;
4024 else
4025 {
4026 bfd_put_64 (output_bfd, relocation,
4027 base_got->contents + off);
4028 /* Note that this is harmless for the GOTPLT64 case,
4029 as -1 | 1 still is -1. */
4030 h->got.offset |= 1;
4031 }
4032 }
4033 else
4034 unresolved_reloc = FALSE;
4035 }
4036 else
4037 {
4038 if (local_got_offsets == NULL)
4039 abort ();
4040
4041 off = local_got_offsets[r_symndx];
4042
4043 /* The offset must always be a multiple of 8. We use
4044 the least significant bit to record whether we have
4045 already generated the necessary reloc. */
4046 if ((off & 1) != 0)
4047 off &= ~1;
4048 else
4049 {
4050 bfd_put_64 (output_bfd, relocation,
4051 base_got->contents + off);
4052
4053 if (info->shared)
4054 {
4055 asection *s;
4056 Elf_Internal_Rela outrel;
4057
4058 /* We need to generate a R_X86_64_RELATIVE reloc
4059 for the dynamic linker. */
4060 s = htab->elf.srelgot;
4061 if (s == NULL)
4062 abort ();
4063
4064 outrel.r_offset = (base_got->output_section->vma
4065 + base_got->output_offset
4066 + off);
4067 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4068 outrel.r_addend = relocation;
4069 elf_append_rela (output_bfd, s, &outrel);
4070 }
4071
4072 local_got_offsets[r_symndx] |= 1;
4073 }
4074 }
4075
4076 if (off >= (bfd_vma) -2)
4077 abort ();
4078
4079 relocation = base_got->output_section->vma
4080 + base_got->output_offset + off;
4081 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
4082 relocation -= htab->elf.sgotplt->output_section->vma
4083 - htab->elf.sgotplt->output_offset;
4084
4085 break;
4086
4087 case R_X86_64_GOTOFF64:
4088 /* Relocation is relative to the start of the global offset
4089 table. */
4090
4091 /* Check to make sure it isn't a protected function or data
4092 symbol for shared library since it may not be local when
4093 used as function address or with copy relocation. We also
4094 need to make sure that a symbol is referenced locally. */
4095 if (info->shared && h)
4096 {
4097 if (!h->def_regular)
4098 {
4099 const char *v;
4100
4101 switch (ELF_ST_VISIBILITY (h->other))
4102 {
4103 case STV_HIDDEN:
4104 v = _("hidden symbol");
4105 break;
4106 case STV_INTERNAL:
4107 v = _("internal symbol");
4108 break;
4109 case STV_PROTECTED:
4110 v = _("protected symbol");
4111 break;
4112 default:
4113 v = _("symbol");
4114 break;
4115 }
4116
4117 (*_bfd_error_handler)
4118 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4119 input_bfd, v, h->root.root.string);
4120 bfd_set_error (bfd_error_bad_value);
4121 return FALSE;
4122 }
4123 else if (!info->executable
4124 && !SYMBOL_REFERENCES_LOCAL (info, h)
4125 && (h->type == STT_FUNC
4126 || h->type == STT_OBJECT)
4127 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4128 {
4129 (*_bfd_error_handler)
4130 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4131 input_bfd,
4132 h->type == STT_FUNC ? "function" : "data",
4133 h->root.root.string);
4134 bfd_set_error (bfd_error_bad_value);
4135 return FALSE;
4136 }
4137 }
4138
4139 /* Note that sgot is not involved in this
4140 calculation. We always want the start of .got.plt. If we
4141 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4142 permitted by the ABI, we might have to change this
4143 calculation. */
4144 relocation -= htab->elf.sgotplt->output_section->vma
4145 + htab->elf.sgotplt->output_offset;
4146 break;
4147
4148 case R_X86_64_GOTPC32:
4149 case R_X86_64_GOTPC64:
4150 /* Use global offset table as symbol value. */
4151 relocation = htab->elf.sgotplt->output_section->vma
4152 + htab->elf.sgotplt->output_offset;
4153 unresolved_reloc = FALSE;
4154 break;
4155
4156 case R_X86_64_PLTOFF64:
4157 /* Relocation is PLT entry relative to GOT. For local
4158 symbols it's the symbol itself relative to GOT. */
4159 if (h != NULL
4160 /* See PLT32 handling. */
4161 && h->plt.offset != (bfd_vma) -1
4162 && htab->elf.splt != NULL)
4163 {
4164 if (htab->plt_bnd != NULL)
4165 {
4166 resolved_plt = htab->plt_bnd;
4167 plt_offset = eh->plt_bnd.offset;
4168 }
4169 else
4170 {
4171 resolved_plt = htab->elf.splt;
4172 plt_offset = h->plt.offset;
4173 }
4174
4175 relocation = (resolved_plt->output_section->vma
4176 + resolved_plt->output_offset
4177 + plt_offset);
4178 unresolved_reloc = FALSE;
4179 }
4180
4181 relocation -= htab->elf.sgotplt->output_section->vma
4182 + htab->elf.sgotplt->output_offset;
4183 break;
4184
4185 case R_X86_64_PLT32:
4186 case R_X86_64_PLT32_BND:
4187 /* Relocation is to the entry for this symbol in the
4188 procedure linkage table. */
4189
4190 /* Resolve a PLT32 reloc against a local symbol directly,
4191 without using the procedure linkage table. */
4192 if (h == NULL)
4193 break;
4194
4195 if ((h->plt.offset == (bfd_vma) -1
4196 && eh->plt_got.offset == (bfd_vma) -1)
4197 || htab->elf.splt == NULL)
4198 {
4199 /* We didn't make a PLT entry for this symbol. This
4200 happens when statically linking PIC code, or when
4201 using -Bsymbolic. */
4202 break;
4203 }
4204
4205 if (h->plt.offset != (bfd_vma) -1)
4206 {
4207 if (htab->plt_bnd != NULL)
4208 {
4209 resolved_plt = htab->plt_bnd;
4210 plt_offset = eh->plt_bnd.offset;
4211 }
4212 else
4213 {
4214 resolved_plt = htab->elf.splt;
4215 plt_offset = h->plt.offset;
4216 }
4217 }
4218 else
4219 {
4220 /* Use the GOT PLT. */
4221 resolved_plt = htab->plt_got;
4222 plt_offset = eh->plt_got.offset;
4223 }
4224
4225 relocation = (resolved_plt->output_section->vma
4226 + resolved_plt->output_offset
4227 + plt_offset);
4228 unresolved_reloc = FALSE;
4229 break;
4230
4231 case R_X86_64_SIZE32:
4232 case R_X86_64_SIZE64:
4233 /* Set to symbol size. */
4234 relocation = st_size;
4235 goto direct;
4236
4237 case R_X86_64_PC8:
4238 case R_X86_64_PC16:
4239 case R_X86_64_PC32:
4240 case R_X86_64_PC32_BND:
4241 /* Don't complain about -fPIC if the symbol is undefined when
4242 building executable. */
4243 if (info->shared
4244 && (input_section->flags & SEC_ALLOC) != 0
4245 && (input_section->flags & SEC_READONLY) != 0
4246 && h != NULL
4247 && !(info->executable
4248 && h->root.type == bfd_link_hash_undefined))
4249 {
4250 bfd_boolean fail = FALSE;
4251 bfd_boolean branch
4252 = ((r_type == R_X86_64_PC32
4253 || r_type == R_X86_64_PC32_BND)
4254 && is_32bit_relative_branch (contents, rel->r_offset));
4255
4256 if (SYMBOL_REFERENCES_LOCAL (info, h))
4257 {
4258 /* Symbol is referenced locally. Make sure it is
4259 defined locally or for a branch. */
4260 fail = !h->def_regular && !branch;
4261 }
4262 else if (!(info->executable
4263 && (h->needs_copy || eh->needs_copy)))
4264 {
4265 /* Symbol doesn't need copy reloc and isn't referenced
4266 locally. We only allow branch to symbol with
4267 non-default visibility. */
4268 fail = (!branch
4269 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4270 }
4271
4272 if (fail)
4273 {
4274 const char *fmt;
4275 const char *v;
4276 const char *pic = "";
4277
4278 switch (ELF_ST_VISIBILITY (h->other))
4279 {
4280 case STV_HIDDEN:
4281 v = _("hidden symbol");
4282 break;
4283 case STV_INTERNAL:
4284 v = _("internal symbol");
4285 break;
4286 case STV_PROTECTED:
4287 v = _("protected symbol");
4288 break;
4289 default:
4290 v = _("symbol");
4291 pic = _("; recompile with -fPIC");
4292 break;
4293 }
4294
4295 if (h->def_regular)
4296 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4297 else
4298 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4299
4300 (*_bfd_error_handler) (fmt, input_bfd,
4301 x86_64_elf_howto_table[r_type].name,
4302 v, h->root.root.string, pic);
4303 bfd_set_error (bfd_error_bad_value);
4304 return FALSE;
4305 }
4306 }
4307 /* Fall through. */
4308
4309 case R_X86_64_8:
4310 case R_X86_64_16:
4311 case R_X86_64_32:
4312 case R_X86_64_PC64:
4313 case R_X86_64_64:
4314 /* FIXME: The ABI says the linker should make sure the value is
4315 the same when it's zeroextended to 64 bit. */
4316
4317 direct:
4318 if ((input_section->flags & SEC_ALLOC) == 0)
4319 break;
4320
4321 /* Don't copy a pc-relative relocation into the output file
4322 if the symbol needs copy reloc or the symbol is undefined
4323 when building executable. */
4324 if ((info->shared
4325 && !(info->executable
4326 && h != NULL
4327 && (h->needs_copy
4328 || eh->needs_copy
4329 || h->root.type == bfd_link_hash_undefined)
4330 && IS_X86_64_PCREL_TYPE (r_type))
4331 && (h == NULL
4332 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4333 || h->root.type != bfd_link_hash_undefweak)
4334 && ((! IS_X86_64_PCREL_TYPE (r_type)
4335 && r_type != R_X86_64_SIZE32
4336 && r_type != R_X86_64_SIZE64)
4337 || ! SYMBOL_CALLS_LOCAL (info, h)))
4338 || (ELIMINATE_COPY_RELOCS
4339 && !info->shared
4340 && h != NULL
4341 && h->dynindx != -1
4342 && !h->non_got_ref
4343 && ((h->def_dynamic
4344 && !h->def_regular)
4345 || h->root.type == bfd_link_hash_undefweak
4346 || h->root.type == bfd_link_hash_undefined)))
4347 {
4348 Elf_Internal_Rela outrel;
4349 bfd_boolean skip, relocate;
4350 asection *sreloc;
4351
4352 /* When generating a shared object, these relocations
4353 are copied into the output file to be resolved at run
4354 time. */
4355 skip = FALSE;
4356 relocate = FALSE;
4357
4358 outrel.r_offset =
4359 _bfd_elf_section_offset (output_bfd, info, input_section,
4360 rel->r_offset);
4361 if (outrel.r_offset == (bfd_vma) -1)
4362 skip = TRUE;
4363 else if (outrel.r_offset == (bfd_vma) -2)
4364 skip = TRUE, relocate = TRUE;
4365
4366 outrel.r_offset += (input_section->output_section->vma
4367 + input_section->output_offset);
4368
4369 if (skip)
4370 memset (&outrel, 0, sizeof outrel);
4371
4372 /* h->dynindx may be -1 if this symbol was marked to
4373 become local. */
4374 else if (h != NULL
4375 && h->dynindx != -1
4376 && (IS_X86_64_PCREL_TYPE (r_type)
4377 || ! info->shared
4378 || ! SYMBOLIC_BIND (info, h)
4379 || ! h->def_regular))
4380 {
4381 outrel.r_info = htab->r_info (h->dynindx, r_type);
4382 outrel.r_addend = rel->r_addend;
4383 }
4384 else
4385 {
4386 /* This symbol is local, or marked to become local. */
4387 if (r_type == htab->pointer_r_type)
4388 {
4389 relocate = TRUE;
4390 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4391 outrel.r_addend = relocation + rel->r_addend;
4392 }
4393 else if (r_type == R_X86_64_64
4394 && !ABI_64_P (output_bfd))
4395 {
4396 relocate = TRUE;
4397 outrel.r_info = htab->r_info (0,
4398 R_X86_64_RELATIVE64);
4399 outrel.r_addend = relocation + rel->r_addend;
4400 /* Check addend overflow. */
4401 if ((outrel.r_addend & 0x80000000)
4402 != (rel->r_addend & 0x80000000))
4403 {
4404 const char *name;
4405 int addend = rel->r_addend;
4406 if (h && h->root.root.string)
4407 name = h->root.root.string;
4408 else
4409 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4410 sym, NULL);
4411 if (addend < 0)
4412 (*_bfd_error_handler)
4413 (_("%B: addend -0x%x in relocation %s against "
4414 "symbol `%s' at 0x%lx in section `%A' is "
4415 "out of range"),
4416 input_bfd, input_section, addend,
4417 x86_64_elf_howto_table[r_type].name,
4418 name, (unsigned long) rel->r_offset);
4419 else
4420 (*_bfd_error_handler)
4421 (_("%B: addend 0x%x in relocation %s against "
4422 "symbol `%s' at 0x%lx in section `%A' is "
4423 "out of range"),
4424 input_bfd, input_section, addend,
4425 x86_64_elf_howto_table[r_type].name,
4426 name, (unsigned long) rel->r_offset);
4427 bfd_set_error (bfd_error_bad_value);
4428 return FALSE;
4429 }
4430 }
4431 else
4432 {
4433 long sindx;
4434
4435 if (bfd_is_abs_section (sec))
4436 sindx = 0;
4437 else if (sec == NULL || sec->owner == NULL)
4438 {
4439 bfd_set_error (bfd_error_bad_value);
4440 return FALSE;
4441 }
4442 else
4443 {
4444 asection *osec;
4445
4446 /* We are turning this relocation into one
4447 against a section symbol. It would be
4448 proper to subtract the symbol's value,
4449 osec->vma, from the emitted reloc addend,
4450 but ld.so expects buggy relocs. */
4451 osec = sec->output_section;
4452 sindx = elf_section_data (osec)->dynindx;
4453 if (sindx == 0)
4454 {
4455 asection *oi = htab->elf.text_index_section;
4456 sindx = elf_section_data (oi)->dynindx;
4457 }
4458 BFD_ASSERT (sindx != 0);
4459 }
4460
4461 outrel.r_info = htab->r_info (sindx, r_type);
4462 outrel.r_addend = relocation + rel->r_addend;
4463 }
4464 }
4465
4466 sreloc = elf_section_data (input_section)->sreloc;
4467
4468 if (sreloc == NULL || sreloc->contents == NULL)
4469 {
4470 r = bfd_reloc_notsupported;
4471 goto check_relocation_error;
4472 }
4473
4474 elf_append_rela (output_bfd, sreloc, &outrel);
4475
4476 /* If this reloc is against an external symbol, we do
4477 not want to fiddle with the addend. Otherwise, we
4478 need to include the symbol value so that it becomes
4479 an addend for the dynamic reloc. */
4480 if (! relocate)
4481 continue;
4482 }
4483
4484 break;
4485
4486 case R_X86_64_TLSGD:
4487 case R_X86_64_GOTPC32_TLSDESC:
4488 case R_X86_64_TLSDESC_CALL:
4489 case R_X86_64_GOTTPOFF:
4490 tls_type = GOT_UNKNOWN;
4491 if (h == NULL && local_got_offsets)
4492 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4493 else if (h != NULL)
4494 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4495
4496 if (! elf_x86_64_tls_transition (info, input_bfd,
4497 input_section, contents,
4498 symtab_hdr, sym_hashes,
4499 &r_type, tls_type, rel,
4500 relend, h, r_symndx))
4501 return FALSE;
4502
4503 if (r_type == R_X86_64_TPOFF32)
4504 {
4505 bfd_vma roff = rel->r_offset;
4506
4507 BFD_ASSERT (! unresolved_reloc);
4508
4509 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4510 {
4511 /* GD->LE transition. For 64bit, change
4512 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4513 .word 0x6666; rex64; call __tls_get_addr
4514 into:
4515 movq %fs:0, %rax
4516 leaq foo@tpoff(%rax), %rax
4517 For 32bit, change
4518 leaq foo@tlsgd(%rip), %rdi
4519 .word 0x6666; rex64; call __tls_get_addr
4520 into:
4521 movl %fs:0, %eax
4522 leaq foo@tpoff(%rax), %rax
4523 For largepic, change:
4524 leaq foo@tlsgd(%rip), %rdi
4525 movabsq $__tls_get_addr@pltoff, %rax
4526 addq %rbx, %rax
4527 call *%rax
4528 into:
4529 movq %fs:0, %rax
4530 leaq foo@tpoff(%rax), %rax
4531 nopw 0x0(%rax,%rax,1) */
4532 int largepic = 0;
4533 if (ABI_64_P (output_bfd)
4534 && contents[roff + 5] == (bfd_byte) '\xb8')
4535 {
4536 memcpy (contents + roff - 3,
4537 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4538 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4539 largepic = 1;
4540 }
4541 else if (ABI_64_P (output_bfd))
4542 memcpy (contents + roff - 4,
4543 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4544 16);
4545 else
4546 memcpy (contents + roff - 3,
4547 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4548 15);
4549 bfd_put_32 (output_bfd,
4550 elf_x86_64_tpoff (info, relocation),
4551 contents + roff + 8 + largepic);
4552 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4553 rel++;
4554 continue;
4555 }
4556 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4557 {
4558 /* GDesc -> LE transition.
4559 It's originally something like:
4560 leaq x@tlsdesc(%rip), %rax
4561
4562 Change it to:
4563 movl $x@tpoff, %rax. */
4564
4565 unsigned int val, type;
4566
4567 type = bfd_get_8 (input_bfd, contents + roff - 3);
4568 val = bfd_get_8 (input_bfd, contents + roff - 1);
4569 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4570 contents + roff - 3);
4571 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4572 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4573 contents + roff - 1);
4574 bfd_put_32 (output_bfd,
4575 elf_x86_64_tpoff (info, relocation),
4576 contents + roff);
4577 continue;
4578 }
4579 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4580 {
4581 /* GDesc -> LE transition.
4582 It's originally:
4583 call *(%rax)
4584 Turn it into:
4585 xchg %ax,%ax. */
4586 bfd_put_8 (output_bfd, 0x66, contents + roff);
4587 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4588 continue;
4589 }
4590 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4591 {
4592 /* IE->LE transition:
4593 For 64bit, originally it can be one of:
4594 movq foo@gottpoff(%rip), %reg
4595 addq foo@gottpoff(%rip), %reg
4596 We change it into:
4597 movq $foo, %reg
4598 leaq foo(%reg), %reg
4599 addq $foo, %reg.
4600 For 32bit, originally it can be one of:
4601 movq foo@gottpoff(%rip), %reg
4602 addl foo@gottpoff(%rip), %reg
4603 We change it into:
4604 movq $foo, %reg
4605 leal foo(%reg), %reg
4606 addl $foo, %reg. */
4607
4608 unsigned int val, type, reg;
4609
4610 if (roff >= 3)
4611 val = bfd_get_8 (input_bfd, contents + roff - 3);
4612 else
4613 val = 0;
4614 type = bfd_get_8 (input_bfd, contents + roff - 2);
4615 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4616 reg >>= 3;
4617 if (type == 0x8b)
4618 {
4619 /* movq */
4620 if (val == 0x4c)
4621 bfd_put_8 (output_bfd, 0x49,
4622 contents + roff - 3);
4623 else if (!ABI_64_P (output_bfd) && val == 0x44)
4624 bfd_put_8 (output_bfd, 0x41,
4625 contents + roff - 3);
4626 bfd_put_8 (output_bfd, 0xc7,
4627 contents + roff - 2);
4628 bfd_put_8 (output_bfd, 0xc0 | reg,
4629 contents + roff - 1);
4630 }
4631 else if (reg == 4)
4632 {
4633 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4634 is special */
4635 if (val == 0x4c)
4636 bfd_put_8 (output_bfd, 0x49,
4637 contents + roff - 3);
4638 else if (!ABI_64_P (output_bfd) && val == 0x44)
4639 bfd_put_8 (output_bfd, 0x41,
4640 contents + roff - 3);
4641 bfd_put_8 (output_bfd, 0x81,
4642 contents + roff - 2);
4643 bfd_put_8 (output_bfd, 0xc0 | reg,
4644 contents + roff - 1);
4645 }
4646 else
4647 {
4648 /* addq/addl -> leaq/leal */
4649 if (val == 0x4c)
4650 bfd_put_8 (output_bfd, 0x4d,
4651 contents + roff - 3);
4652 else if (!ABI_64_P (output_bfd) && val == 0x44)
4653 bfd_put_8 (output_bfd, 0x45,
4654 contents + roff - 3);
4655 bfd_put_8 (output_bfd, 0x8d,
4656 contents + roff - 2);
4657 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4658 contents + roff - 1);
4659 }
4660 bfd_put_32 (output_bfd,
4661 elf_x86_64_tpoff (info, relocation),
4662 contents + roff);
4663 continue;
4664 }
4665 else
4666 BFD_ASSERT (FALSE);
4667 }
4668
4669 if (htab->elf.sgot == NULL)
4670 abort ();
4671
4672 if (h != NULL)
4673 {
4674 off = h->got.offset;
4675 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4676 }
4677 else
4678 {
4679 if (local_got_offsets == NULL)
4680 abort ();
4681
4682 off = local_got_offsets[r_symndx];
4683 offplt = local_tlsdesc_gotents[r_symndx];
4684 }
4685
4686 if ((off & 1) != 0)
4687 off &= ~1;
4688 else
4689 {
4690 Elf_Internal_Rela outrel;
4691 int dr_type, indx;
4692 asection *sreloc;
4693
4694 if (htab->elf.srelgot == NULL)
4695 abort ();
4696
4697 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4698
4699 if (GOT_TLS_GDESC_P (tls_type))
4700 {
4701 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4702 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4703 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4704 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4705 + htab->elf.sgotplt->output_offset
4706 + offplt
4707 + htab->sgotplt_jump_table_size);
4708 sreloc = htab->elf.srelplt;
4709 if (indx == 0)
4710 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4711 else
4712 outrel.r_addend = 0;
4713 elf_append_rela (output_bfd, sreloc, &outrel);
4714 }
4715
4716 sreloc = htab->elf.srelgot;
4717
4718 outrel.r_offset = (htab->elf.sgot->output_section->vma
4719 + htab->elf.sgot->output_offset + off);
4720
4721 if (GOT_TLS_GD_P (tls_type))
4722 dr_type = R_X86_64_DTPMOD64;
4723 else if (GOT_TLS_GDESC_P (tls_type))
4724 goto dr_done;
4725 else
4726 dr_type = R_X86_64_TPOFF64;
4727
4728 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4729 outrel.r_addend = 0;
4730 if ((dr_type == R_X86_64_TPOFF64
4731 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4732 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4733 outrel.r_info = htab->r_info (indx, dr_type);
4734
4735 elf_append_rela (output_bfd, sreloc, &outrel);
4736
4737 if (GOT_TLS_GD_P (tls_type))
4738 {
4739 if (indx == 0)
4740 {
4741 BFD_ASSERT (! unresolved_reloc);
4742 bfd_put_64 (output_bfd,
4743 relocation - elf_x86_64_dtpoff_base (info),
4744 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4745 }
4746 else
4747 {
4748 bfd_put_64 (output_bfd, 0,
4749 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4750 outrel.r_info = htab->r_info (indx,
4751 R_X86_64_DTPOFF64);
4752 outrel.r_offset += GOT_ENTRY_SIZE;
4753 elf_append_rela (output_bfd, sreloc,
4754 &outrel);
4755 }
4756 }
4757
4758 dr_done:
4759 if (h != NULL)
4760 h->got.offset |= 1;
4761 else
4762 local_got_offsets[r_symndx] |= 1;
4763 }
4764
4765 if (off >= (bfd_vma) -2
4766 && ! GOT_TLS_GDESC_P (tls_type))
4767 abort ();
4768 if (r_type == ELF32_R_TYPE (rel->r_info))
4769 {
4770 if (r_type == R_X86_64_GOTPC32_TLSDESC
4771 || r_type == R_X86_64_TLSDESC_CALL)
4772 relocation = htab->elf.sgotplt->output_section->vma
4773 + htab->elf.sgotplt->output_offset
4774 + offplt + htab->sgotplt_jump_table_size;
4775 else
4776 relocation = htab->elf.sgot->output_section->vma
4777 + htab->elf.sgot->output_offset + off;
4778 unresolved_reloc = FALSE;
4779 }
4780 else
4781 {
4782 bfd_vma roff = rel->r_offset;
4783
4784 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4785 {
4786 /* GD->IE transition. For 64bit, change
4787 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4788 .word 0x6666; rex64; call __tls_get_addr@plt
4789 into:
4790 movq %fs:0, %rax
4791 addq foo@gottpoff(%rip), %rax
4792 For 32bit, change
4793 leaq foo@tlsgd(%rip), %rdi
4794 .word 0x6666; rex64; call __tls_get_addr@plt
4795 into:
4796 movl %fs:0, %eax
4797 addq foo@gottpoff(%rip), %rax
4798 For largepic, change:
4799 leaq foo@tlsgd(%rip), %rdi
4800 movabsq $__tls_get_addr@pltoff, %rax
4801 addq %rbx, %rax
4802 call *%rax
4803 into:
4804 movq %fs:0, %rax
4805 addq foo@gottpoff(%rax), %rax
4806 nopw 0x0(%rax,%rax,1) */
4807 int largepic = 0;
4808 if (ABI_64_P (output_bfd)
4809 && contents[roff + 5] == (bfd_byte) '\xb8')
4810 {
4811 memcpy (contents + roff - 3,
4812 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4813 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4814 largepic = 1;
4815 }
4816 else if (ABI_64_P (output_bfd))
4817 memcpy (contents + roff - 4,
4818 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4819 16);
4820 else
4821 memcpy (contents + roff - 3,
4822 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4823 15);
4824
4825 relocation = (htab->elf.sgot->output_section->vma
4826 + htab->elf.sgot->output_offset + off
4827 - roff
4828 - largepic
4829 - input_section->output_section->vma
4830 - input_section->output_offset
4831 - 12);
4832 bfd_put_32 (output_bfd, relocation,
4833 contents + roff + 8 + largepic);
4834 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4835 rel++;
4836 continue;
4837 }
4838 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4839 {
4840 /* GDesc -> IE transition.
4841 It's originally something like:
4842 leaq x@tlsdesc(%rip), %rax
4843
4844 Change it to:
4845 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4846
4847 /* Now modify the instruction as appropriate. To
4848 turn a leaq into a movq in the form we use it, it
4849 suffices to change the second byte from 0x8d to
4850 0x8b. */
4851 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4852
4853 bfd_put_32 (output_bfd,
4854 htab->elf.sgot->output_section->vma
4855 + htab->elf.sgot->output_offset + off
4856 - rel->r_offset
4857 - input_section->output_section->vma
4858 - input_section->output_offset
4859 - 4,
4860 contents + roff);
4861 continue;
4862 }
4863 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4864 {
4865 /* GDesc -> IE transition.
4866 It's originally:
4867 call *(%rax)
4868
4869 Change it to:
4870 xchg %ax, %ax. */
4871
4872 bfd_put_8 (output_bfd, 0x66, contents + roff);
4873 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4874 continue;
4875 }
4876 else
4877 BFD_ASSERT (FALSE);
4878 }
4879 break;
4880
4881 case R_X86_64_TLSLD:
4882 if (! elf_x86_64_tls_transition (info, input_bfd,
4883 input_section, contents,
4884 symtab_hdr, sym_hashes,
4885 &r_type, GOT_UNKNOWN,
4886 rel, relend, h, r_symndx))
4887 return FALSE;
4888
4889 if (r_type != R_X86_64_TLSLD)
4890 {
4891 /* LD->LE transition:
4892 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4893 For 64bit, we change it into:
4894 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4895 For 32bit, we change it into:
4896 nopl 0x0(%rax); movl %fs:0, %eax.
4897 For largepic, change:
4898 leaq foo@tlsgd(%rip), %rdi
4899 movabsq $__tls_get_addr@pltoff, %rax
4900 addq %rbx, %rax
4901 call *%rax
4902 into:
4903 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4904 movq %fs:0, %eax */
4905
4906 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4907 if (ABI_64_P (output_bfd)
4908 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4909 memcpy (contents + rel->r_offset - 3,
4910 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4911 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4912 else if (ABI_64_P (output_bfd))
4913 memcpy (contents + rel->r_offset - 3,
4914 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4915 else
4916 memcpy (contents + rel->r_offset - 3,
4917 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4918 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4919 rel++;
4920 continue;
4921 }
4922
4923 if (htab->elf.sgot == NULL)
4924 abort ();
4925
4926 off = htab->tls_ld_got.offset;
4927 if (off & 1)
4928 off &= ~1;
4929 else
4930 {
4931 Elf_Internal_Rela outrel;
4932
4933 if (htab->elf.srelgot == NULL)
4934 abort ();
4935
4936 outrel.r_offset = (htab->elf.sgot->output_section->vma
4937 + htab->elf.sgot->output_offset + off);
4938
4939 bfd_put_64 (output_bfd, 0,
4940 htab->elf.sgot->contents + off);
4941 bfd_put_64 (output_bfd, 0,
4942 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4943 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4944 outrel.r_addend = 0;
4945 elf_append_rela (output_bfd, htab->elf.srelgot,
4946 &outrel);
4947 htab->tls_ld_got.offset |= 1;
4948 }
4949 relocation = htab->elf.sgot->output_section->vma
4950 + htab->elf.sgot->output_offset + off;
4951 unresolved_reloc = FALSE;
4952 break;
4953
4954 case R_X86_64_DTPOFF32:
4955 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4956 relocation -= elf_x86_64_dtpoff_base (info);
4957 else
4958 relocation = elf_x86_64_tpoff (info, relocation);
4959 break;
4960
4961 case R_X86_64_TPOFF32:
4962 case R_X86_64_TPOFF64:
4963 BFD_ASSERT (info->executable);
4964 relocation = elf_x86_64_tpoff (info, relocation);
4965 break;
4966
4967 case R_X86_64_DTPOFF64:
4968 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4969 relocation -= elf_x86_64_dtpoff_base (info);
4970 break;
4971
4972 default:
4973 break;
4974 }
4975
4976 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4977 because such sections are not SEC_ALLOC and thus ld.so will
4978 not process them. */
4979 if (unresolved_reloc
4980 && !((input_section->flags & SEC_DEBUGGING) != 0
4981 && h->def_dynamic)
4982 && _bfd_elf_section_offset (output_bfd, info, input_section,
4983 rel->r_offset) != (bfd_vma) -1)
4984 {
4985 (*_bfd_error_handler)
4986 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4987 input_bfd,
4988 input_section,
4989 (long) rel->r_offset,
4990 howto->name,
4991 h->root.root.string);
4992 return FALSE;
4993 }
4994
4995 do_relocation:
4996 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4997 contents, rel->r_offset,
4998 relocation, rel->r_addend);
4999
5000 check_relocation_error:
5001 if (r != bfd_reloc_ok)
5002 {
5003 const char *name;
5004
5005 if (h != NULL)
5006 name = h->root.root.string;
5007 else
5008 {
5009 name = bfd_elf_string_from_elf_section (input_bfd,
5010 symtab_hdr->sh_link,
5011 sym->st_name);
5012 if (name == NULL)
5013 return FALSE;
5014 if (*name == '\0')
5015 name = bfd_section_name (input_bfd, sec);
5016 }
5017
5018 if (r == bfd_reloc_overflow)
5019 {
5020 if (! ((*info->callbacks->reloc_overflow)
5021 (info, (h ? &h->root : NULL), name, howto->name,
5022 (bfd_vma) 0, input_bfd, input_section,
5023 rel->r_offset)))
5024 return FALSE;
5025 }
5026 else
5027 {
5028 (*_bfd_error_handler)
5029 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5030 input_bfd, input_section,
5031 (long) rel->r_offset, name, (int) r);
5032 return FALSE;
5033 }
5034 }
5035 }
5036
5037 return TRUE;
5038 }
5039
5040 /* Finish up dynamic symbol handling. We set the contents of various
5041 dynamic sections here. */
5042
5043 static bfd_boolean
5044 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5045 struct bfd_link_info *info,
5046 struct elf_link_hash_entry *h,
5047 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
5048 {
5049 struct elf_x86_64_link_hash_table *htab;
5050 const struct elf_x86_64_backend_data *abed;
5051 bfd_boolean use_plt_bnd;
5052 struct elf_x86_64_link_hash_entry *eh;
5053
5054 htab = elf_x86_64_hash_table (info);
5055 if (htab == NULL)
5056 return FALSE;
5057
5058 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5059 section only if there is .plt section. */
5060 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5061 abed = (use_plt_bnd
5062 ? &elf_x86_64_bnd_arch_bed
5063 : get_elf_x86_64_backend_data (output_bfd));
5064
5065 eh = (struct elf_x86_64_link_hash_entry *) h;
5066
5067 if (h->plt.offset != (bfd_vma) -1)
5068 {
5069 bfd_vma plt_index;
5070 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5071 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5072 Elf_Internal_Rela rela;
5073 bfd_byte *loc;
5074 asection *plt, *gotplt, *relplt, *resolved_plt;
5075 const struct elf_backend_data *bed;
5076 bfd_vma plt_got_pcrel_offset;
5077
5078 /* When building a static executable, use .iplt, .igot.plt and
5079 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5080 if (htab->elf.splt != NULL)
5081 {
5082 plt = htab->elf.splt;
5083 gotplt = htab->elf.sgotplt;
5084 relplt = htab->elf.srelplt;
5085 }
5086 else
5087 {
5088 plt = htab->elf.iplt;
5089 gotplt = htab->elf.igotplt;
5090 relplt = htab->elf.irelplt;
5091 }
5092
5093 /* This symbol has an entry in the procedure linkage table. Set
5094 it up. */
5095 if ((h->dynindx == -1
5096 && !((h->forced_local || info->executable)
5097 && h->def_regular
5098 && h->type == STT_GNU_IFUNC))
5099 || plt == NULL
5100 || gotplt == NULL
5101 || relplt == NULL)
5102 abort ();
5103
5104 /* Get the index in the procedure linkage table which
5105 corresponds to this symbol. This is the index of this symbol
5106 in all the symbols for which we are making plt entries. The
5107 first entry in the procedure linkage table is reserved.
5108
5109 Get the offset into the .got table of the entry that
5110 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5111 bytes. The first three are reserved for the dynamic linker.
5112
5113 For static executables, we don't reserve anything. */
5114
5115 if (plt == htab->elf.splt)
5116 {
5117 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5118 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5119 }
5120 else
5121 {
5122 got_offset = h->plt.offset / abed->plt_entry_size;
5123 got_offset = got_offset * GOT_ENTRY_SIZE;
5124 }
5125
5126 plt_plt_insn_end = abed->plt_plt_insn_end;
5127 plt_plt_offset = abed->plt_plt_offset;
5128 plt_got_insn_size = abed->plt_got_insn_size;
5129 plt_got_offset = abed->plt_got_offset;
5130 if (use_plt_bnd)
5131 {
5132 /* Use the second PLT with BND relocations. */
5133 const bfd_byte *plt_entry, *plt2_entry;
5134
5135 if (eh->has_bnd_reloc)
5136 {
5137 plt_entry = elf_x86_64_bnd_plt_entry;
5138 plt2_entry = elf_x86_64_bnd_plt2_entry;
5139 }
5140 else
5141 {
5142 plt_entry = elf_x86_64_legacy_plt_entry;
5143 plt2_entry = elf_x86_64_legacy_plt2_entry;
5144
5145 /* Subtract 1 since there is no BND prefix. */
5146 plt_plt_insn_end -= 1;
5147 plt_plt_offset -= 1;
5148 plt_got_insn_size -= 1;
5149 plt_got_offset -= 1;
5150 }
5151
5152 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5153 == sizeof (elf_x86_64_legacy_plt_entry));
5154
5155 /* Fill in the entry in the procedure linkage table. */
5156 memcpy (plt->contents + h->plt.offset,
5157 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5158 /* Fill in the entry in the second PLT. */
5159 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5160 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5161
5162 resolved_plt = htab->plt_bnd;
5163 plt_offset = eh->plt_bnd.offset;
5164 }
5165 else
5166 {
5167 /* Fill in the entry in the procedure linkage table. */
5168 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5169 abed->plt_entry_size);
5170
5171 resolved_plt = plt;
5172 plt_offset = h->plt.offset;
5173 }
5174
5175 /* Insert the relocation positions of the plt section. */
5176
5177 /* Put offset the PC-relative instruction referring to the GOT entry,
5178 subtracting the size of that instruction. */
5179 plt_got_pcrel_offset = (gotplt->output_section->vma
5180 + gotplt->output_offset
5181 + got_offset
5182 - resolved_plt->output_section->vma
5183 - resolved_plt->output_offset
5184 - plt_offset
5185 - plt_got_insn_size);
5186
5187 /* Check PC-relative offset overflow in PLT entry. */
5188 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5189 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5190 output_bfd, h->root.root.string);
5191
5192 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5193 resolved_plt->contents + plt_offset + plt_got_offset);
5194
5195 /* Fill in the entry in the global offset table, initially this
5196 points to the second part of the PLT entry. */
5197 bfd_put_64 (output_bfd, (plt->output_section->vma
5198 + plt->output_offset
5199 + h->plt.offset + abed->plt_lazy_offset),
5200 gotplt->contents + got_offset);
5201
5202 /* Fill in the entry in the .rela.plt section. */
5203 rela.r_offset = (gotplt->output_section->vma
5204 + gotplt->output_offset
5205 + got_offset);
5206 if (h->dynindx == -1
5207 || ((info->executable
5208 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5209 && h->def_regular
5210 && h->type == STT_GNU_IFUNC))
5211 {
5212 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5213 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5214 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5215 rela.r_addend = (h->root.u.def.value
5216 + h->root.u.def.section->output_section->vma
5217 + h->root.u.def.section->output_offset);
5218 /* R_X86_64_IRELATIVE comes last. */
5219 plt_index = htab->next_irelative_index--;
5220 }
5221 else
5222 {
5223 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5224 rela.r_addend = 0;
5225 plt_index = htab->next_jump_slot_index++;
5226 }
5227
5228 /* Don't fill PLT entry for static executables. */
5229 if (plt == htab->elf.splt)
5230 {
5231 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5232
5233 /* Put relocation index. */
5234 bfd_put_32 (output_bfd, plt_index,
5235 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5236
5237 /* Put offset for jmp .PLT0 and check for overflow. We don't
5238 check relocation index for overflow since branch displacement
5239 will overflow first. */
5240 if (plt0_offset > 0x80000000)
5241 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5242 output_bfd, h->root.root.string);
5243 bfd_put_32 (output_bfd, - plt0_offset,
5244 plt->contents + h->plt.offset + plt_plt_offset);
5245 }
5246
5247 bed = get_elf_backend_data (output_bfd);
5248 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5249 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5250 }
5251 else if (eh->plt_got.offset != (bfd_vma) -1)
5252 {
5253 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5254 asection *plt, *got;
5255 bfd_boolean got_after_plt;
5256 int32_t got_pcrel_offset;
5257 const bfd_byte *got_plt_entry;
5258
5259 /* Set the entry in the GOT procedure linkage table. */
5260 plt = htab->plt_got;
5261 got = htab->elf.sgot;
5262 got_offset = h->got.offset;
5263
5264 if (got_offset == (bfd_vma) -1
5265 || h->type == STT_GNU_IFUNC
5266 || plt == NULL
5267 || got == NULL)
5268 abort ();
5269
5270 /* Use the second PLT entry template for the GOT PLT since they
5271 are the identical. */
5272 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5273 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5274 if (eh->has_bnd_reloc)
5275 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5276 else
5277 {
5278 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5279
5280 /* Subtract 1 since there is no BND prefix. */
5281 plt_got_insn_size -= 1;
5282 plt_got_offset -= 1;
5283 }
5284
5285 /* Fill in the entry in the GOT procedure linkage table. */
5286 plt_offset = eh->plt_got.offset;
5287 memcpy (plt->contents + plt_offset,
5288 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5289
5290 /* Put offset the PC-relative instruction referring to the GOT
5291 entry, subtracting the size of that instruction. */
5292 got_pcrel_offset = (got->output_section->vma
5293 + got->output_offset
5294 + got_offset
5295 - plt->output_section->vma
5296 - plt->output_offset
5297 - plt_offset
5298 - plt_got_insn_size);
5299
5300 /* Check PC-relative offset overflow in GOT PLT entry. */
5301 got_after_plt = got->output_section->vma > plt->output_section->vma;
5302 if ((got_after_plt && got_pcrel_offset < 0)
5303 || (!got_after_plt && got_pcrel_offset > 0))
5304 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5305 output_bfd, h->root.root.string);
5306
5307 bfd_put_32 (output_bfd, got_pcrel_offset,
5308 plt->contents + plt_offset + plt_got_offset);
5309 }
5310
5311 if (!h->def_regular
5312 && (h->plt.offset != (bfd_vma) -1
5313 || eh->plt_got.offset != (bfd_vma) -1))
5314 {
5315 /* Mark the symbol as undefined, rather than as defined in
5316 the .plt section. Leave the value if there were any
5317 relocations where pointer equality matters (this is a clue
5318 for the dynamic linker, to make function pointer
5319 comparisons work between an application and shared
5320 library), otherwise set it to zero. If a function is only
5321 called from a binary, there is no need to slow down
5322 shared libraries because of that. */
5323 sym->st_shndx = SHN_UNDEF;
5324 if (!h->pointer_equality_needed)
5325 sym->st_value = 0;
5326 }
5327
5328 if (h->got.offset != (bfd_vma) -1
5329 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5330 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5331 {
5332 Elf_Internal_Rela rela;
5333
5334 /* This symbol has an entry in the global offset table. Set it
5335 up. */
5336 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5337 abort ();
5338
5339 rela.r_offset = (htab->elf.sgot->output_section->vma
5340 + htab->elf.sgot->output_offset
5341 + (h->got.offset &~ (bfd_vma) 1));
5342
5343 /* If this is a static link, or it is a -Bsymbolic link and the
5344 symbol is defined locally or was forced to be local because
5345 of a version file, we just want to emit a RELATIVE reloc.
5346 The entry in the global offset table will already have been
5347 initialized in the relocate_section function. */
5348 if (h->def_regular
5349 && h->type == STT_GNU_IFUNC)
5350 {
5351 if (info->shared)
5352 {
5353 /* Generate R_X86_64_GLOB_DAT. */
5354 goto do_glob_dat;
5355 }
5356 else
5357 {
5358 asection *plt;
5359
5360 if (!h->pointer_equality_needed)
5361 abort ();
5362
5363 /* For non-shared object, we can't use .got.plt, which
5364 contains the real function addres if we need pointer
5365 equality. We load the GOT entry with the PLT entry. */
5366 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5367 bfd_put_64 (output_bfd, (plt->output_section->vma
5368 + plt->output_offset
5369 + h->plt.offset),
5370 htab->elf.sgot->contents + h->got.offset);
5371 return TRUE;
5372 }
5373 }
5374 else if (info->shared
5375 && SYMBOL_REFERENCES_LOCAL (info, h))
5376 {
5377 if (!h->def_regular)
5378 return FALSE;
5379 BFD_ASSERT((h->got.offset & 1) != 0);
5380 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5381 rela.r_addend = (h->root.u.def.value
5382 + h->root.u.def.section->output_section->vma
5383 + h->root.u.def.section->output_offset);
5384 }
5385 else
5386 {
5387 BFD_ASSERT((h->got.offset & 1) == 0);
5388 do_glob_dat:
5389 bfd_put_64 (output_bfd, (bfd_vma) 0,
5390 htab->elf.sgot->contents + h->got.offset);
5391 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5392 rela.r_addend = 0;
5393 }
5394
5395 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5396 }
5397
5398 if (h->needs_copy)
5399 {
5400 Elf_Internal_Rela rela;
5401
5402 /* This symbol needs a copy reloc. Set it up. */
5403
5404 if (h->dynindx == -1
5405 || (h->root.type != bfd_link_hash_defined
5406 && h->root.type != bfd_link_hash_defweak)
5407 || htab->srelbss == NULL)
5408 abort ();
5409
5410 rela.r_offset = (h->root.u.def.value
5411 + h->root.u.def.section->output_section->vma
5412 + h->root.u.def.section->output_offset);
5413 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5414 rela.r_addend = 0;
5415 elf_append_rela (output_bfd, htab->srelbss, &rela);
5416 }
5417
5418 return TRUE;
5419 }
5420
5421 /* Finish up local dynamic symbol handling. We set the contents of
5422 various dynamic sections here. */
5423
5424 static bfd_boolean
5425 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5426 {
5427 struct elf_link_hash_entry *h
5428 = (struct elf_link_hash_entry *) *slot;
5429 struct bfd_link_info *info
5430 = (struct bfd_link_info *) inf;
5431
5432 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5433 info, h, NULL);
5434 }
5435
5436 /* Used to decide how to sort relocs in an optimal manner for the
5437 dynamic linker, before writing them out. */
5438
5439 static enum elf_reloc_type_class
5440 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5441 const asection *rel_sec ATTRIBUTE_UNUSED,
5442 const Elf_Internal_Rela *rela)
5443 {
5444 switch ((int) ELF32_R_TYPE (rela->r_info))
5445 {
5446 case R_X86_64_RELATIVE:
5447 case R_X86_64_RELATIVE64:
5448 return reloc_class_relative;
5449 case R_X86_64_JUMP_SLOT:
5450 return reloc_class_plt;
5451 case R_X86_64_COPY:
5452 return reloc_class_copy;
5453 default:
5454 return reloc_class_normal;
5455 }
5456 }
5457
5458 /* Finish up the dynamic sections. */
5459
5460 static bfd_boolean
5461 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5462 struct bfd_link_info *info)
5463 {
5464 struct elf_x86_64_link_hash_table *htab;
5465 bfd *dynobj;
5466 asection *sdyn;
5467 const struct elf_x86_64_backend_data *abed;
5468
5469 htab = elf_x86_64_hash_table (info);
5470 if (htab == NULL)
5471 return FALSE;
5472
5473 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5474 section only if there is .plt section. */
5475 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5476 ? &elf_x86_64_bnd_arch_bed
5477 : get_elf_x86_64_backend_data (output_bfd));
5478
5479 dynobj = htab->elf.dynobj;
5480 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5481
5482 if (htab->elf.dynamic_sections_created)
5483 {
5484 bfd_byte *dyncon, *dynconend;
5485 const struct elf_backend_data *bed;
5486 bfd_size_type sizeof_dyn;
5487
5488 if (sdyn == NULL || htab->elf.sgot == NULL)
5489 abort ();
5490
5491 bed = get_elf_backend_data (dynobj);
5492 sizeof_dyn = bed->s->sizeof_dyn;
5493 dyncon = sdyn->contents;
5494 dynconend = sdyn->contents + sdyn->size;
5495 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5496 {
5497 Elf_Internal_Dyn dyn;
5498 asection *s;
5499
5500 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5501
5502 switch (dyn.d_tag)
5503 {
5504 default:
5505 continue;
5506
5507 case DT_PLTGOT:
5508 s = htab->elf.sgotplt;
5509 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5510 break;
5511
5512 case DT_JMPREL:
5513 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5514 break;
5515
5516 case DT_PLTRELSZ:
5517 s = htab->elf.srelplt->output_section;
5518 dyn.d_un.d_val = s->size;
5519 break;
5520
5521 case DT_RELASZ:
5522 /* The procedure linkage table relocs (DT_JMPREL) should
5523 not be included in the overall relocs (DT_RELA).
5524 Therefore, we override the DT_RELASZ entry here to
5525 make it not include the JMPREL relocs. Since the
5526 linker script arranges for .rela.plt to follow all
5527 other relocation sections, we don't have to worry
5528 about changing the DT_RELA entry. */
5529 if (htab->elf.srelplt != NULL)
5530 {
5531 s = htab->elf.srelplt->output_section;
5532 dyn.d_un.d_val -= s->size;
5533 }
5534 break;
5535
5536 case DT_TLSDESC_PLT:
5537 s = htab->elf.splt;
5538 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5539 + htab->tlsdesc_plt;
5540 break;
5541
5542 case DT_TLSDESC_GOT:
5543 s = htab->elf.sgot;
5544 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5545 + htab->tlsdesc_got;
5546 break;
5547 }
5548
5549 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5550 }
5551
5552 /* Fill in the special first entry in the procedure linkage table. */
5553 if (htab->elf.splt && htab->elf.splt->size > 0)
5554 {
5555 /* Fill in the first entry in the procedure linkage table. */
5556 memcpy (htab->elf.splt->contents,
5557 abed->plt0_entry, abed->plt_entry_size);
5558 /* Add offset for pushq GOT+8(%rip), since the instruction
5559 uses 6 bytes subtract this value. */
5560 bfd_put_32 (output_bfd,
5561 (htab->elf.sgotplt->output_section->vma
5562 + htab->elf.sgotplt->output_offset
5563 + 8
5564 - htab->elf.splt->output_section->vma
5565 - htab->elf.splt->output_offset
5566 - 6),
5567 htab->elf.splt->contents + abed->plt0_got1_offset);
5568 /* Add offset for the PC-relative instruction accessing GOT+16,
5569 subtracting the offset to the end of that instruction. */
5570 bfd_put_32 (output_bfd,
5571 (htab->elf.sgotplt->output_section->vma
5572 + htab->elf.sgotplt->output_offset
5573 + 16
5574 - htab->elf.splt->output_section->vma
5575 - htab->elf.splt->output_offset
5576 - abed->plt0_got2_insn_end),
5577 htab->elf.splt->contents + abed->plt0_got2_offset);
5578
5579 elf_section_data (htab->elf.splt->output_section)
5580 ->this_hdr.sh_entsize = abed->plt_entry_size;
5581
5582 if (htab->tlsdesc_plt)
5583 {
5584 bfd_put_64 (output_bfd, (bfd_vma) 0,
5585 htab->elf.sgot->contents + htab->tlsdesc_got);
5586
5587 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5588 abed->plt0_entry, abed->plt_entry_size);
5589
5590 /* Add offset for pushq GOT+8(%rip), since the
5591 instruction uses 6 bytes subtract this value. */
5592 bfd_put_32 (output_bfd,
5593 (htab->elf.sgotplt->output_section->vma
5594 + htab->elf.sgotplt->output_offset
5595 + 8
5596 - htab->elf.splt->output_section->vma
5597 - htab->elf.splt->output_offset
5598 - htab->tlsdesc_plt
5599 - 6),
5600 htab->elf.splt->contents
5601 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5602 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5603 where TGD stands for htab->tlsdesc_got, subtracting the offset
5604 to the end of that instruction. */
5605 bfd_put_32 (output_bfd,
5606 (htab->elf.sgot->output_section->vma
5607 + htab->elf.sgot->output_offset
5608 + htab->tlsdesc_got
5609 - htab->elf.splt->output_section->vma
5610 - htab->elf.splt->output_offset
5611 - htab->tlsdesc_plt
5612 - abed->plt0_got2_insn_end),
5613 htab->elf.splt->contents
5614 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5615 }
5616 }
5617 }
5618
5619 if (htab->plt_bnd != NULL)
5620 elf_section_data (htab->plt_bnd->output_section)
5621 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5622
5623 if (htab->elf.sgotplt)
5624 {
5625 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5626 {
5627 (*_bfd_error_handler)
5628 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5629 return FALSE;
5630 }
5631
5632 /* Fill in the first three entries in the global offset table. */
5633 if (htab->elf.sgotplt->size > 0)
5634 {
5635 /* Set the first entry in the global offset table to the address of
5636 the dynamic section. */
5637 if (sdyn == NULL)
5638 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5639 else
5640 bfd_put_64 (output_bfd,
5641 sdyn->output_section->vma + sdyn->output_offset,
5642 htab->elf.sgotplt->contents);
5643 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5644 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5645 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5646 }
5647
5648 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5649 GOT_ENTRY_SIZE;
5650 }
5651
5652 /* Adjust .eh_frame for .plt section. */
5653 if (htab->plt_eh_frame != NULL
5654 && htab->plt_eh_frame->contents != NULL)
5655 {
5656 if (htab->elf.splt != NULL
5657 && htab->elf.splt->size != 0
5658 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5659 && htab->elf.splt->output_section != NULL
5660 && htab->plt_eh_frame->output_section != NULL)
5661 {
5662 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5663 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5664 + htab->plt_eh_frame->output_offset
5665 + PLT_FDE_START_OFFSET;
5666 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5667 htab->plt_eh_frame->contents
5668 + PLT_FDE_START_OFFSET);
5669 }
5670 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5671 {
5672 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5673 htab->plt_eh_frame,
5674 htab->plt_eh_frame->contents))
5675 return FALSE;
5676 }
5677 }
5678
5679 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5680 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5681 = GOT_ENTRY_SIZE;
5682
5683 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5684 htab_traverse (htab->loc_hash_table,
5685 elf_x86_64_finish_local_dynamic_symbol,
5686 info);
5687
5688 return TRUE;
5689 }
5690
5691 /* Return an array of PLT entry symbol values. */
5692
5693 static bfd_vma *
5694 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
5695 asection *relplt)
5696 {
5697 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5698 arelent *p;
5699 long count, i;
5700 bfd_vma *plt_sym_val;
5701 bfd_vma plt_offset;
5702 bfd_byte *plt_contents;
5703 const struct elf_x86_64_backend_data *bed;
5704 Elf_Internal_Shdr *hdr;
5705 asection *plt_bnd;
5706
5707 /* Get the .plt section contents. PLT passed down may point to the
5708 .plt.bnd section. Make sure that PLT always points to the .plt
5709 section. */
5710 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
5711 if (plt_bnd)
5712 {
5713 if (plt != plt_bnd)
5714 abort ();
5715 plt = bfd_get_section_by_name (abfd, ".plt");
5716 if (plt == NULL)
5717 abort ();
5718 bed = &elf_x86_64_bnd_arch_bed;
5719 }
5720 else
5721 bed = get_elf_x86_64_backend_data (abfd);
5722
5723 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
5724 if (plt_contents == NULL)
5725 return NULL;
5726 if (!bfd_get_section_contents (abfd, (asection *) plt,
5727 plt_contents, 0, plt->size))
5728 {
5729 bad_return:
5730 free (plt_contents);
5731 return NULL;
5732 }
5733
5734 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5735 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5736 goto bad_return;
5737
5738 hdr = &elf_section_data (relplt)->this_hdr;
5739 count = relplt->size / hdr->sh_entsize;
5740
5741 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
5742 if (plt_sym_val == NULL)
5743 goto bad_return;
5744
5745 for (i = 0; i < count; i++)
5746 plt_sym_val[i] = -1;
5747
5748 plt_offset = bed->plt_entry_size;
5749 p = relplt->relocation;
5750 for (i = 0; i < count; i++, p++)
5751 {
5752 long reloc_index;
5753
5754 /* Skip unknown relocation. */
5755 if (p->howto == NULL)
5756 continue;
5757
5758 if (p->howto->type != R_X86_64_JUMP_SLOT
5759 && p->howto->type != R_X86_64_IRELATIVE)
5760 continue;
5761
5762 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
5763 + bed->plt_reloc_offset));
5764 if (reloc_index >= count)
5765 abort ();
5766 if (plt_bnd)
5767 {
5768 /* This is the index in .plt section. */
5769 long plt_index = plt_offset / bed->plt_entry_size;
5770 /* Store VMA + the offset in .plt.bnd section. */
5771 plt_sym_val[reloc_index] =
5772 (plt_bnd->vma
5773 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
5774 }
5775 else
5776 plt_sym_val[reloc_index] = plt->vma + plt_offset;
5777 plt_offset += bed->plt_entry_size;
5778
5779 /* PR binutils/18437: Skip extra relocations in the .rela.plt
5780 section. */
5781 if (plt_offset >= plt->size)
5782 break;
5783 }
5784
5785 free (plt_contents);
5786
5787 return plt_sym_val;
5788 }
5789
5790 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5791 support. */
5792
5793 static long
5794 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5795 long symcount,
5796 asymbol **syms,
5797 long dynsymcount,
5798 asymbol **dynsyms,
5799 asymbol **ret)
5800 {
5801 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
5802 as PLT if it exists. */
5803 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5804 if (plt == NULL)
5805 plt = bfd_get_section_by_name (abfd, ".plt");
5806 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
5807 dynsymcount, dynsyms, ret,
5808 plt,
5809 elf_x86_64_get_plt_sym_val);
5810 }
5811
5812 /* Handle an x86-64 specific section when reading an object file. This
5813 is called when elfcode.h finds a section with an unknown type. */
5814
5815 static bfd_boolean
5816 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5817 const char *name, int shindex)
5818 {
5819 if (hdr->sh_type != SHT_X86_64_UNWIND)
5820 return FALSE;
5821
5822 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5823 return FALSE;
5824
5825 return TRUE;
5826 }
5827
5828 /* Hook called by the linker routine which adds symbols from an object
5829 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5830 of .bss. */
5831
5832 static bfd_boolean
5833 elf_x86_64_add_symbol_hook (bfd *abfd,
5834 struct bfd_link_info *info,
5835 Elf_Internal_Sym *sym,
5836 const char **namep ATTRIBUTE_UNUSED,
5837 flagword *flagsp ATTRIBUTE_UNUSED,
5838 asection **secp,
5839 bfd_vma *valp)
5840 {
5841 asection *lcomm;
5842
5843 switch (sym->st_shndx)
5844 {
5845 case SHN_X86_64_LCOMMON:
5846 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5847 if (lcomm == NULL)
5848 {
5849 lcomm = bfd_make_section_with_flags (abfd,
5850 "LARGE_COMMON",
5851 (SEC_ALLOC
5852 | SEC_IS_COMMON
5853 | SEC_LINKER_CREATED));
5854 if (lcomm == NULL)
5855 return FALSE;
5856 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5857 }
5858 *secp = lcomm;
5859 *valp = sym->st_size;
5860 return TRUE;
5861 }
5862
5863 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
5864 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
5865 && (abfd->flags & DYNAMIC) == 0
5866 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5867 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
5868
5869 return TRUE;
5870 }
5871
5872
5873 /* Given a BFD section, try to locate the corresponding ELF section
5874 index. */
5875
5876 static bfd_boolean
5877 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5878 asection *sec, int *index_return)
5879 {
5880 if (sec == &_bfd_elf_large_com_section)
5881 {
5882 *index_return = SHN_X86_64_LCOMMON;
5883 return TRUE;
5884 }
5885 return FALSE;
5886 }
5887
5888 /* Process a symbol. */
5889
5890 static void
5891 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5892 asymbol *asym)
5893 {
5894 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5895
5896 switch (elfsym->internal_elf_sym.st_shndx)
5897 {
5898 case SHN_X86_64_LCOMMON:
5899 asym->section = &_bfd_elf_large_com_section;
5900 asym->value = elfsym->internal_elf_sym.st_size;
5901 /* Common symbol doesn't set BSF_GLOBAL. */
5902 asym->flags &= ~BSF_GLOBAL;
5903 break;
5904 }
5905 }
5906
5907 static bfd_boolean
5908 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5909 {
5910 return (sym->st_shndx == SHN_COMMON
5911 || sym->st_shndx == SHN_X86_64_LCOMMON);
5912 }
5913
5914 static unsigned int
5915 elf_x86_64_common_section_index (asection *sec)
5916 {
5917 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5918 return SHN_COMMON;
5919 else
5920 return SHN_X86_64_LCOMMON;
5921 }
5922
5923 static asection *
5924 elf_x86_64_common_section (asection *sec)
5925 {
5926 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5927 return bfd_com_section_ptr;
5928 else
5929 return &_bfd_elf_large_com_section;
5930 }
5931
5932 static bfd_boolean
5933 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5934 const Elf_Internal_Sym *sym,
5935 asection **psec,
5936 bfd_boolean newdef,
5937 bfd_boolean olddef,
5938 bfd *oldbfd,
5939 const asection *oldsec)
5940 {
5941 /* A normal common symbol and a large common symbol result in a
5942 normal common symbol. We turn the large common symbol into a
5943 normal one. */
5944 if (!olddef
5945 && h->root.type == bfd_link_hash_common
5946 && !newdef
5947 && bfd_is_com_section (*psec)
5948 && oldsec != *psec)
5949 {
5950 if (sym->st_shndx == SHN_COMMON
5951 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5952 {
5953 h->root.u.c.p->section
5954 = bfd_make_section_old_way (oldbfd, "COMMON");
5955 h->root.u.c.p->section->flags = SEC_ALLOC;
5956 }
5957 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5958 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5959 *psec = bfd_com_section_ptr;
5960 }
5961
5962 return TRUE;
5963 }
5964
5965 static int
5966 elf_x86_64_additional_program_headers (bfd *abfd,
5967 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5968 {
5969 asection *s;
5970 int count = 0;
5971
5972 /* Check to see if we need a large readonly segment. */
5973 s = bfd_get_section_by_name (abfd, ".lrodata");
5974 if (s && (s->flags & SEC_LOAD))
5975 count++;
5976
5977 /* Check to see if we need a large data segment. Since .lbss sections
5978 is placed right after the .bss section, there should be no need for
5979 a large data segment just because of .lbss. */
5980 s = bfd_get_section_by_name (abfd, ".ldata");
5981 if (s && (s->flags & SEC_LOAD))
5982 count++;
5983
5984 return count;
5985 }
5986
5987 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5988
5989 static bfd_boolean
5990 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5991 {
5992 if (h->plt.offset != (bfd_vma) -1
5993 && !h->def_regular
5994 && !h->pointer_equality_needed)
5995 return FALSE;
5996
5997 return _bfd_elf_hash_symbol (h);
5998 }
5999
6000 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6001
6002 static bfd_boolean
6003 elf_x86_64_relocs_compatible (const bfd_target *input,
6004 const bfd_target *output)
6005 {
6006 return ((xvec_get_elf_backend_data (input)->s->elfclass
6007 == xvec_get_elf_backend_data (output)->s->elfclass)
6008 && _bfd_elf_relocs_compatible (input, output));
6009 }
6010
6011 static const struct bfd_elf_special_section
6012 elf_x86_64_special_sections[]=
6013 {
6014 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6015 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6016 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6017 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6018 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6019 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6020 { NULL, 0, 0, 0, 0 }
6021 };
6022
6023 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6024 #define TARGET_LITTLE_NAME "elf64-x86-64"
6025 #define ELF_ARCH bfd_arch_i386
6026 #define ELF_TARGET_ID X86_64_ELF_DATA
6027 #define ELF_MACHINE_CODE EM_X86_64
6028 #define ELF_MAXPAGESIZE 0x200000
6029 #define ELF_MINPAGESIZE 0x1000
6030 #define ELF_COMMONPAGESIZE 0x1000
6031
6032 #define elf_backend_can_gc_sections 1
6033 #define elf_backend_can_refcount 1
6034 #define elf_backend_want_got_plt 1
6035 #define elf_backend_plt_readonly 1
6036 #define elf_backend_want_plt_sym 0
6037 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6038 #define elf_backend_rela_normal 1
6039 #define elf_backend_plt_alignment 4
6040 #define elf_backend_extern_protected_data 1
6041
6042 #define elf_info_to_howto elf_x86_64_info_to_howto
6043
6044 #define bfd_elf64_bfd_link_hash_table_create \
6045 elf_x86_64_link_hash_table_create
6046 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6047 #define bfd_elf64_bfd_reloc_name_lookup \
6048 elf_x86_64_reloc_name_lookup
6049
6050 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6051 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6052 #define elf_backend_check_relocs elf_x86_64_check_relocs
6053 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6054 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6055 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6056 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6057 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6058 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
6059 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6060 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6061 #ifdef CORE_HEADER
6062 #define elf_backend_write_core_note elf_x86_64_write_core_note
6063 #endif
6064 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6065 #define elf_backend_relocate_section elf_x86_64_relocate_section
6066 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6067 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6068 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6069 #define elf_backend_object_p elf64_x86_64_elf_object_p
6070 #define bfd_elf64_mkobject elf_x86_64_mkobject
6071 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6072
6073 #define elf_backend_section_from_shdr \
6074 elf_x86_64_section_from_shdr
6075
6076 #define elf_backend_section_from_bfd_section \
6077 elf_x86_64_elf_section_from_bfd_section
6078 #define elf_backend_add_symbol_hook \
6079 elf_x86_64_add_symbol_hook
6080 #define elf_backend_symbol_processing \
6081 elf_x86_64_symbol_processing
6082 #define elf_backend_common_section_index \
6083 elf_x86_64_common_section_index
6084 #define elf_backend_common_section \
6085 elf_x86_64_common_section
6086 #define elf_backend_common_definition \
6087 elf_x86_64_common_definition
6088 #define elf_backend_merge_symbol \
6089 elf_x86_64_merge_symbol
6090 #define elf_backend_special_sections \
6091 elf_x86_64_special_sections
6092 #define elf_backend_additional_program_headers \
6093 elf_x86_64_additional_program_headers
6094 #define elf_backend_hash_symbol \
6095 elf_x86_64_hash_symbol
6096
6097 #include "elf64-target.h"
6098
6099 /* CloudABI support. */
6100
6101 #undef TARGET_LITTLE_SYM
6102 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6103 #undef TARGET_LITTLE_NAME
6104 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6105
6106 #undef ELF_OSABI
6107 #define ELF_OSABI ELFOSABI_CLOUDABI
6108
6109 #undef elf64_bed
6110 #define elf64_bed elf64_x86_64_cloudabi_bed
6111
6112 #include "elf64-target.h"
6113
6114 /* FreeBSD support. */
6115
6116 #undef TARGET_LITTLE_SYM
6117 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6118 #undef TARGET_LITTLE_NAME
6119 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6120
6121 #undef ELF_OSABI
6122 #define ELF_OSABI ELFOSABI_FREEBSD
6123
6124 #undef elf64_bed
6125 #define elf64_bed elf64_x86_64_fbsd_bed
6126
6127 #include "elf64-target.h"
6128
6129 /* Solaris 2 support. */
6130
6131 #undef TARGET_LITTLE_SYM
6132 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6133 #undef TARGET_LITTLE_NAME
6134 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6135
6136 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6137 objects won't be recognized. */
6138 #undef ELF_OSABI
6139
6140 #undef elf64_bed
6141 #define elf64_bed elf64_x86_64_sol2_bed
6142
6143 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6144 boundary. */
6145 #undef elf_backend_static_tls_alignment
6146 #define elf_backend_static_tls_alignment 16
6147
6148 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6149
6150 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6151 File, p.63. */
6152 #undef elf_backend_want_plt_sym
6153 #define elf_backend_want_plt_sym 1
6154
6155 #include "elf64-target.h"
6156
6157 /* Native Client support. */
6158
6159 static bfd_boolean
6160 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6161 {
6162 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6163 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6164 return TRUE;
6165 }
6166
6167 #undef TARGET_LITTLE_SYM
6168 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6169 #undef TARGET_LITTLE_NAME
6170 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6171 #undef elf64_bed
6172 #define elf64_bed elf64_x86_64_nacl_bed
6173
6174 #undef ELF_MAXPAGESIZE
6175 #undef ELF_MINPAGESIZE
6176 #undef ELF_COMMONPAGESIZE
6177 #define ELF_MAXPAGESIZE 0x10000
6178 #define ELF_MINPAGESIZE 0x10000
6179 #define ELF_COMMONPAGESIZE 0x10000
6180
6181 /* Restore defaults. */
6182 #undef ELF_OSABI
6183 #undef elf_backend_static_tls_alignment
6184 #undef elf_backend_want_plt_sym
6185 #define elf_backend_want_plt_sym 0
6186
6187 /* NaCl uses substantially different PLT entries for the same effects. */
6188
6189 #undef elf_backend_plt_alignment
6190 #define elf_backend_plt_alignment 5
6191 #define NACL_PLT_ENTRY_SIZE 64
6192 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6193
6194 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6195 {
6196 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6197 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6198 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6199 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6200 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6201
6202 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6203 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6204
6205 /* 32 bytes of nop to pad out to the standard size. */
6206 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6207 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6208 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6209 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6210 0x66, /* excess data32 prefix */
6211 0x90 /* nop */
6212 };
6213
6214 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6215 {
6216 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6217 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6218 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6219 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6220
6221 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6222 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6223 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6224
6225 /* Lazy GOT entries point here (32-byte aligned). */
6226 0x68, /* pushq immediate */
6227 0, 0, 0, 0, /* replaced with index into relocation table. */
6228 0xe9, /* jmp relative */
6229 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6230
6231 /* 22 bytes of nop to pad out to the standard size. */
6232 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6233 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6234 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6235 };
6236
6237 /* .eh_frame covering the .plt section. */
6238
6239 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6240 {
6241 #if (PLT_CIE_LENGTH != 20 \
6242 || PLT_FDE_LENGTH != 36 \
6243 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6244 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6245 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6246 #endif
6247 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6248 0, 0, 0, 0, /* CIE ID */
6249 1, /* CIE version */
6250 'z', 'R', 0, /* Augmentation string */
6251 1, /* Code alignment factor */
6252 0x78, /* Data alignment factor */
6253 16, /* Return address column */
6254 1, /* Augmentation size */
6255 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6256 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6257 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6258 DW_CFA_nop, DW_CFA_nop,
6259
6260 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6261 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6262 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6263 0, 0, 0, 0, /* .plt size goes here */
6264 0, /* Augmentation size */
6265 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6266 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6267 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6268 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6269 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6270 13, /* Block length */
6271 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6272 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6273 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6274 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6275 DW_CFA_nop, DW_CFA_nop
6276 };
6277
6278 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6279 {
6280 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6281 elf_x86_64_nacl_plt_entry, /* plt_entry */
6282 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6283 2, /* plt0_got1_offset */
6284 9, /* plt0_got2_offset */
6285 13, /* plt0_got2_insn_end */
6286 3, /* plt_got_offset */
6287 33, /* plt_reloc_offset */
6288 38, /* plt_plt_offset */
6289 7, /* plt_got_insn_size */
6290 42, /* plt_plt_insn_end */
6291 32, /* plt_lazy_offset */
6292 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6293 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6294 };
6295
6296 #undef elf_backend_arch_data
6297 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6298
6299 #undef elf_backend_object_p
6300 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6301 #undef elf_backend_modify_segment_map
6302 #define elf_backend_modify_segment_map nacl_modify_segment_map
6303 #undef elf_backend_modify_program_headers
6304 #define elf_backend_modify_program_headers nacl_modify_program_headers
6305 #undef elf_backend_final_write_processing
6306 #define elf_backend_final_write_processing nacl_final_write_processing
6307
6308 #include "elf64-target.h"
6309
6310 /* Native Client x32 support. */
6311
6312 static bfd_boolean
6313 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6314 {
6315 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6316 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6317 return TRUE;
6318 }
6319
6320 #undef TARGET_LITTLE_SYM
6321 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6322 #undef TARGET_LITTLE_NAME
6323 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6324 #undef elf32_bed
6325 #define elf32_bed elf32_x86_64_nacl_bed
6326
6327 #define bfd_elf32_bfd_link_hash_table_create \
6328 elf_x86_64_link_hash_table_create
6329 #define bfd_elf32_bfd_reloc_type_lookup \
6330 elf_x86_64_reloc_type_lookup
6331 #define bfd_elf32_bfd_reloc_name_lookup \
6332 elf_x86_64_reloc_name_lookup
6333 #define bfd_elf32_mkobject \
6334 elf_x86_64_mkobject
6335 #define bfd_elf32_get_synthetic_symtab \
6336 elf_x86_64_get_synthetic_symtab
6337
6338 #undef elf_backend_object_p
6339 #define elf_backend_object_p \
6340 elf32_x86_64_nacl_elf_object_p
6341
6342 #undef elf_backend_bfd_from_remote_memory
6343 #define elf_backend_bfd_from_remote_memory \
6344 _bfd_elf32_bfd_from_remote_memory
6345
6346 #undef elf_backend_size_info
6347 #define elf_backend_size_info \
6348 _bfd_elf32_size_info
6349
6350 #include "elf32-target.h"
6351
6352 /* Restore defaults. */
6353 #undef elf_backend_object_p
6354 #define elf_backend_object_p elf64_x86_64_elf_object_p
6355 #undef elf_backend_bfd_from_remote_memory
6356 #undef elf_backend_size_info
6357 #undef elf_backend_modify_segment_map
6358 #undef elf_backend_modify_program_headers
6359 #undef elf_backend_final_write_processing
6360
6361 /* Intel L1OM support. */
6362
6363 static bfd_boolean
6364 elf64_l1om_elf_object_p (bfd *abfd)
6365 {
6366 /* Set the right machine number for an L1OM elf64 file. */
6367 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6368 return TRUE;
6369 }
6370
6371 #undef TARGET_LITTLE_SYM
6372 #define TARGET_LITTLE_SYM l1om_elf64_vec
6373 #undef TARGET_LITTLE_NAME
6374 #define TARGET_LITTLE_NAME "elf64-l1om"
6375 #undef ELF_ARCH
6376 #define ELF_ARCH bfd_arch_l1om
6377
6378 #undef ELF_MACHINE_CODE
6379 #define ELF_MACHINE_CODE EM_L1OM
6380
6381 #undef ELF_OSABI
6382
6383 #undef elf64_bed
6384 #define elf64_bed elf64_l1om_bed
6385
6386 #undef elf_backend_object_p
6387 #define elf_backend_object_p elf64_l1om_elf_object_p
6388
6389 /* Restore defaults. */
6390 #undef ELF_MAXPAGESIZE
6391 #undef ELF_MINPAGESIZE
6392 #undef ELF_COMMONPAGESIZE
6393 #define ELF_MAXPAGESIZE 0x200000
6394 #define ELF_MINPAGESIZE 0x1000
6395 #define ELF_COMMONPAGESIZE 0x1000
6396 #undef elf_backend_plt_alignment
6397 #define elf_backend_plt_alignment 4
6398 #undef elf_backend_arch_data
6399 #define elf_backend_arch_data &elf_x86_64_arch_bed
6400
6401 #include "elf64-target.h"
6402
6403 /* FreeBSD L1OM support. */
6404
6405 #undef TARGET_LITTLE_SYM
6406 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6407 #undef TARGET_LITTLE_NAME
6408 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6409
6410 #undef ELF_OSABI
6411 #define ELF_OSABI ELFOSABI_FREEBSD
6412
6413 #undef elf64_bed
6414 #define elf64_bed elf64_l1om_fbsd_bed
6415
6416 #include "elf64-target.h"
6417
6418 /* Intel K1OM support. */
6419
6420 static bfd_boolean
6421 elf64_k1om_elf_object_p (bfd *abfd)
6422 {
6423 /* Set the right machine number for an K1OM elf64 file. */
6424 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6425 return TRUE;
6426 }
6427
6428 #undef TARGET_LITTLE_SYM
6429 #define TARGET_LITTLE_SYM k1om_elf64_vec
6430 #undef TARGET_LITTLE_NAME
6431 #define TARGET_LITTLE_NAME "elf64-k1om"
6432 #undef ELF_ARCH
6433 #define ELF_ARCH bfd_arch_k1om
6434
6435 #undef ELF_MACHINE_CODE
6436 #define ELF_MACHINE_CODE EM_K1OM
6437
6438 #undef ELF_OSABI
6439
6440 #undef elf64_bed
6441 #define elf64_bed elf64_k1om_bed
6442
6443 #undef elf_backend_object_p
6444 #define elf_backend_object_p elf64_k1om_elf_object_p
6445
6446 #undef elf_backend_static_tls_alignment
6447
6448 #undef elf_backend_want_plt_sym
6449 #define elf_backend_want_plt_sym 0
6450
6451 #include "elf64-target.h"
6452
6453 /* FreeBSD K1OM support. */
6454
6455 #undef TARGET_LITTLE_SYM
6456 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6457 #undef TARGET_LITTLE_NAME
6458 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6459
6460 #undef ELF_OSABI
6461 #define ELF_OSABI ELFOSABI_FREEBSD
6462
6463 #undef elf64_bed
6464 #define elf64_bed elf64_k1om_fbsd_bed
6465
6466 #include "elf64-target.h"
6467
6468 /* 32bit x86-64 support. */
6469
6470 #undef TARGET_LITTLE_SYM
6471 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6472 #undef TARGET_LITTLE_NAME
6473 #define TARGET_LITTLE_NAME "elf32-x86-64"
6474 #undef elf32_bed
6475
6476 #undef ELF_ARCH
6477 #define ELF_ARCH bfd_arch_i386
6478
6479 #undef ELF_MACHINE_CODE
6480 #define ELF_MACHINE_CODE EM_X86_64
6481
6482 #undef ELF_OSABI
6483
6484 #undef elf_backend_object_p
6485 #define elf_backend_object_p \
6486 elf32_x86_64_elf_object_p
6487
6488 #undef elf_backend_bfd_from_remote_memory
6489 #define elf_backend_bfd_from_remote_memory \
6490 _bfd_elf32_bfd_from_remote_memory
6491
6492 #undef elf_backend_size_info
6493 #define elf_backend_size_info \
6494 _bfd_elf32_size_info
6495
6496 #include "elf32-target.h"