Reformat elf_x86_64_create_dynamic_sections
[binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2014 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return 0;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if symbol has at least one BND relocation. */
761 bfd_boolean has_bnd_reloc;
762
763 /* Information about the GOT PLT entry. Filled when there are both
764 GOT and PLT relocations against the same function. */
765 union gotplt_union plt_got;
766
767 /* Information about the second PLT entry. Filled when has_bnd_reloc is
768 set. */
769 union gotplt_union plt_bnd;
770
771 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
772 starting at the end of the jump table. */
773 bfd_vma tlsdesc_got;
774 };
775
776 #define elf_x86_64_hash_entry(ent) \
777 ((struct elf_x86_64_link_hash_entry *)(ent))
778
779 struct elf_x86_64_obj_tdata
780 {
781 struct elf_obj_tdata root;
782
783 /* tls_type for each local got entry. */
784 char *local_got_tls_type;
785
786 /* GOTPLT entries for TLS descriptors. */
787 bfd_vma *local_tlsdesc_gotent;
788 };
789
790 #define elf_x86_64_tdata(abfd) \
791 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
792
793 #define elf_x86_64_local_got_tls_type(abfd) \
794 (elf_x86_64_tdata (abfd)->local_got_tls_type)
795
796 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
797 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
798
799 #define is_x86_64_elf(bfd) \
800 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
801 && elf_tdata (bfd) != NULL \
802 && elf_object_id (bfd) == X86_64_ELF_DATA)
803
804 static bfd_boolean
805 elf_x86_64_mkobject (bfd *abfd)
806 {
807 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
808 X86_64_ELF_DATA);
809 }
810
811 /* x86-64 ELF linker hash table. */
812
813 struct elf_x86_64_link_hash_table
814 {
815 struct elf_link_hash_table elf;
816
817 /* Short-cuts to get to dynamic linker sections. */
818 asection *sdynbss;
819 asection *srelbss;
820 asection *plt_eh_frame;
821 asection *plt_bnd;
822 asection *plt_got;
823
824 union
825 {
826 bfd_signed_vma refcount;
827 bfd_vma offset;
828 } tls_ld_got;
829
830 /* The amount of space used by the jump slots in the GOT. */
831 bfd_vma sgotplt_jump_table_size;
832
833 /* Small local sym cache. */
834 struct sym_cache sym_cache;
835
836 bfd_vma (*r_info) (bfd_vma, bfd_vma);
837 bfd_vma (*r_sym) (bfd_vma);
838 unsigned int pointer_r_type;
839 const char *dynamic_interpreter;
840 int dynamic_interpreter_size;
841
842 /* _TLS_MODULE_BASE_ symbol. */
843 struct bfd_link_hash_entry *tls_module_base;
844
845 /* Used by local STT_GNU_IFUNC symbols. */
846 htab_t loc_hash_table;
847 void * loc_hash_memory;
848
849 /* The offset into splt of the PLT entry for the TLS descriptor
850 resolver. Special values are 0, if not necessary (or not found
851 to be necessary yet), and -1 if needed but not determined
852 yet. */
853 bfd_vma tlsdesc_plt;
854 /* The offset into sgot of the GOT entry used by the PLT entry
855 above. */
856 bfd_vma tlsdesc_got;
857
858 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
859 bfd_vma next_jump_slot_index;
860 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
861 bfd_vma next_irelative_index;
862 };
863
864 /* Get the x86-64 ELF linker hash table from a link_info structure. */
865
866 #define elf_x86_64_hash_table(p) \
867 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
868 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
869
870 #define elf_x86_64_compute_jump_table_size(htab) \
871 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
872
873 /* Create an entry in an x86-64 ELF linker hash table. */
874
875 static struct bfd_hash_entry *
876 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
877 struct bfd_hash_table *table,
878 const char *string)
879 {
880 /* Allocate the structure if it has not already been allocated by a
881 subclass. */
882 if (entry == NULL)
883 {
884 entry = (struct bfd_hash_entry *)
885 bfd_hash_allocate (table,
886 sizeof (struct elf_x86_64_link_hash_entry));
887 if (entry == NULL)
888 return entry;
889 }
890
891 /* Call the allocation method of the superclass. */
892 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
893 if (entry != NULL)
894 {
895 struct elf_x86_64_link_hash_entry *eh;
896
897 eh = (struct elf_x86_64_link_hash_entry *) entry;
898 eh->dyn_relocs = NULL;
899 eh->tls_type = GOT_UNKNOWN;
900 eh->has_bnd_reloc = FALSE;
901 eh->plt_bnd.offset = (bfd_vma) -1;
902 eh->plt_got.offset = (bfd_vma) -1;
903 eh->tlsdesc_got = (bfd_vma) -1;
904 }
905
906 return entry;
907 }
908
909 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
910 for local symbol so that we can handle local STT_GNU_IFUNC symbols
911 as global symbol. We reuse indx and dynstr_index for local symbol
912 hash since they aren't used by global symbols in this backend. */
913
914 static hashval_t
915 elf_x86_64_local_htab_hash (const void *ptr)
916 {
917 struct elf_link_hash_entry *h
918 = (struct elf_link_hash_entry *) ptr;
919 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
920 }
921
922 /* Compare local hash entries. */
923
924 static int
925 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
926 {
927 struct elf_link_hash_entry *h1
928 = (struct elf_link_hash_entry *) ptr1;
929 struct elf_link_hash_entry *h2
930 = (struct elf_link_hash_entry *) ptr2;
931
932 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
933 }
934
935 /* Find and/or create a hash entry for local symbol. */
936
937 static struct elf_link_hash_entry *
938 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
939 bfd *abfd, const Elf_Internal_Rela *rel,
940 bfd_boolean create)
941 {
942 struct elf_x86_64_link_hash_entry e, *ret;
943 asection *sec = abfd->sections;
944 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
945 htab->r_sym (rel->r_info));
946 void **slot;
947
948 e.elf.indx = sec->id;
949 e.elf.dynstr_index = htab->r_sym (rel->r_info);
950 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
951 create ? INSERT : NO_INSERT);
952
953 if (!slot)
954 return NULL;
955
956 if (*slot)
957 {
958 ret = (struct elf_x86_64_link_hash_entry *) *slot;
959 return &ret->elf;
960 }
961
962 ret = (struct elf_x86_64_link_hash_entry *)
963 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
964 sizeof (struct elf_x86_64_link_hash_entry));
965 if (ret)
966 {
967 memset (ret, 0, sizeof (*ret));
968 ret->elf.indx = sec->id;
969 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
970 ret->elf.dynindx = -1;
971 ret->plt_got.offset = (bfd_vma) -1;
972 *slot = ret;
973 }
974 return &ret->elf;
975 }
976
977 /* Destroy an X86-64 ELF linker hash table. */
978
979 static void
980 elf_x86_64_link_hash_table_free (bfd *obfd)
981 {
982 struct elf_x86_64_link_hash_table *htab
983 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
984
985 if (htab->loc_hash_table)
986 htab_delete (htab->loc_hash_table);
987 if (htab->loc_hash_memory)
988 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
989 _bfd_elf_link_hash_table_free (obfd);
990 }
991
992 /* Create an X86-64 ELF linker hash table. */
993
994 static struct bfd_link_hash_table *
995 elf_x86_64_link_hash_table_create (bfd *abfd)
996 {
997 struct elf_x86_64_link_hash_table *ret;
998 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
999
1000 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1001 if (ret == NULL)
1002 return NULL;
1003
1004 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1005 elf_x86_64_link_hash_newfunc,
1006 sizeof (struct elf_x86_64_link_hash_entry),
1007 X86_64_ELF_DATA))
1008 {
1009 free (ret);
1010 return NULL;
1011 }
1012
1013 if (ABI_64_P (abfd))
1014 {
1015 ret->r_info = elf64_r_info;
1016 ret->r_sym = elf64_r_sym;
1017 ret->pointer_r_type = R_X86_64_64;
1018 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1019 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1020 }
1021 else
1022 {
1023 ret->r_info = elf32_r_info;
1024 ret->r_sym = elf32_r_sym;
1025 ret->pointer_r_type = R_X86_64_32;
1026 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1027 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1028 }
1029
1030 ret->loc_hash_table = htab_try_create (1024,
1031 elf_x86_64_local_htab_hash,
1032 elf_x86_64_local_htab_eq,
1033 NULL);
1034 ret->loc_hash_memory = objalloc_create ();
1035 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1036 {
1037 elf_x86_64_link_hash_table_free (abfd);
1038 return NULL;
1039 }
1040 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1041
1042 return &ret->elf.root;
1043 }
1044
1045 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1046 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1047 hash table. */
1048
1049 static bfd_boolean
1050 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1051 struct bfd_link_info *info)
1052 {
1053 struct elf_x86_64_link_hash_table *htab;
1054
1055 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1056 return FALSE;
1057
1058 htab = elf_x86_64_hash_table (info);
1059 if (htab == NULL)
1060 return FALSE;
1061
1062 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1063 if (!htab->sdynbss)
1064 abort ();
1065
1066 if (info->executable)
1067 {
1068 /* Always allow copy relocs for building executables. */
1069 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1070 if (s == NULL)
1071 {
1072 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1073 s = bfd_make_section_anyway_with_flags (dynobj,
1074 ".rela.bss",
1075 (bed->dynamic_sec_flags
1076 | SEC_READONLY));
1077 if (s == NULL
1078 || ! bfd_set_section_alignment (dynobj, s,
1079 bed->s->log_file_align))
1080 return FALSE;
1081 }
1082 htab->srelbss = s;
1083 }
1084
1085 if (!info->no_ld_generated_unwind_info
1086 && htab->plt_eh_frame == NULL
1087 && htab->elf.splt != NULL)
1088 {
1089 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1090 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1091 | SEC_LINKER_CREATED);
1092 htab->plt_eh_frame
1093 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1094 if (htab->plt_eh_frame == NULL
1095 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1096 return FALSE;
1097 }
1098 return TRUE;
1099 }
1100
1101 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1102
1103 static void
1104 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1105 struct elf_link_hash_entry *dir,
1106 struct elf_link_hash_entry *ind)
1107 {
1108 struct elf_x86_64_link_hash_entry *edir, *eind;
1109
1110 edir = (struct elf_x86_64_link_hash_entry *) dir;
1111 eind = (struct elf_x86_64_link_hash_entry *) ind;
1112
1113 if (!edir->has_bnd_reloc)
1114 edir->has_bnd_reloc = eind->has_bnd_reloc;
1115
1116 if (eind->dyn_relocs != NULL)
1117 {
1118 if (edir->dyn_relocs != NULL)
1119 {
1120 struct elf_dyn_relocs **pp;
1121 struct elf_dyn_relocs *p;
1122
1123 /* Add reloc counts against the indirect sym to the direct sym
1124 list. Merge any entries against the same section. */
1125 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1126 {
1127 struct elf_dyn_relocs *q;
1128
1129 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1130 if (q->sec == p->sec)
1131 {
1132 q->pc_count += p->pc_count;
1133 q->count += p->count;
1134 *pp = p->next;
1135 break;
1136 }
1137 if (q == NULL)
1138 pp = &p->next;
1139 }
1140 *pp = edir->dyn_relocs;
1141 }
1142
1143 edir->dyn_relocs = eind->dyn_relocs;
1144 eind->dyn_relocs = NULL;
1145 }
1146
1147 if (ind->root.type == bfd_link_hash_indirect
1148 && dir->got.refcount <= 0)
1149 {
1150 edir->tls_type = eind->tls_type;
1151 eind->tls_type = GOT_UNKNOWN;
1152 }
1153
1154 if (ELIMINATE_COPY_RELOCS
1155 && ind->root.type != bfd_link_hash_indirect
1156 && dir->dynamic_adjusted)
1157 {
1158 /* If called to transfer flags for a weakdef during processing
1159 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1160 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1161 dir->ref_dynamic |= ind->ref_dynamic;
1162 dir->ref_regular |= ind->ref_regular;
1163 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1164 dir->needs_plt |= ind->needs_plt;
1165 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1166 }
1167 else
1168 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1169 }
1170
1171 static bfd_boolean
1172 elf64_x86_64_elf_object_p (bfd *abfd)
1173 {
1174 /* Set the right machine number for an x86-64 elf64 file. */
1175 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1176 return TRUE;
1177 }
1178
1179 static bfd_boolean
1180 elf32_x86_64_elf_object_p (bfd *abfd)
1181 {
1182 /* Set the right machine number for an x86-64 elf32 file. */
1183 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1184 return TRUE;
1185 }
1186
1187 /* Return TRUE if the TLS access code sequence support transition
1188 from R_TYPE. */
1189
1190 static bfd_boolean
1191 elf_x86_64_check_tls_transition (bfd *abfd,
1192 struct bfd_link_info *info,
1193 asection *sec,
1194 bfd_byte *contents,
1195 Elf_Internal_Shdr *symtab_hdr,
1196 struct elf_link_hash_entry **sym_hashes,
1197 unsigned int r_type,
1198 const Elf_Internal_Rela *rel,
1199 const Elf_Internal_Rela *relend)
1200 {
1201 unsigned int val;
1202 unsigned long r_symndx;
1203 bfd_boolean largepic = FALSE;
1204 struct elf_link_hash_entry *h;
1205 bfd_vma offset;
1206 struct elf_x86_64_link_hash_table *htab;
1207
1208 /* Get the section contents. */
1209 if (contents == NULL)
1210 {
1211 if (elf_section_data (sec)->this_hdr.contents != NULL)
1212 contents = elf_section_data (sec)->this_hdr.contents;
1213 else
1214 {
1215 /* FIXME: How to better handle error condition? */
1216 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1217 return FALSE;
1218
1219 /* Cache the section contents for elf_link_input_bfd. */
1220 elf_section_data (sec)->this_hdr.contents = contents;
1221 }
1222 }
1223
1224 htab = elf_x86_64_hash_table (info);
1225 offset = rel->r_offset;
1226 switch (r_type)
1227 {
1228 case R_X86_64_TLSGD:
1229 case R_X86_64_TLSLD:
1230 if ((rel + 1) >= relend)
1231 return FALSE;
1232
1233 if (r_type == R_X86_64_TLSGD)
1234 {
1235 /* Check transition from GD access model. For 64bit, only
1236 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1237 .word 0x6666; rex64; call __tls_get_addr
1238 can transit to different access model. For 32bit, only
1239 leaq foo@tlsgd(%rip), %rdi
1240 .word 0x6666; rex64; call __tls_get_addr
1241 can transit to different access model. For largepic
1242 we also support:
1243 leaq foo@tlsgd(%rip), %rdi
1244 movabsq $__tls_get_addr@pltoff, %rax
1245 addq $rbx, %rax
1246 call *%rax. */
1247
1248 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1249 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1250
1251 if ((offset + 12) > sec->size)
1252 return FALSE;
1253
1254 if (memcmp (contents + offset + 4, call, 4) != 0)
1255 {
1256 if (!ABI_64_P (abfd)
1257 || (offset + 19) > sec->size
1258 || offset < 3
1259 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1260 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1261 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1262 != 0)
1263 return FALSE;
1264 largepic = TRUE;
1265 }
1266 else if (ABI_64_P (abfd))
1267 {
1268 if (offset < 4
1269 || memcmp (contents + offset - 4, leaq, 4) != 0)
1270 return FALSE;
1271 }
1272 else
1273 {
1274 if (offset < 3
1275 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1276 return FALSE;
1277 }
1278 }
1279 else
1280 {
1281 /* Check transition from LD access model. Only
1282 leaq foo@tlsld(%rip), %rdi;
1283 call __tls_get_addr
1284 can transit to different access model. For largepic
1285 we also support:
1286 leaq foo@tlsld(%rip), %rdi
1287 movabsq $__tls_get_addr@pltoff, %rax
1288 addq $rbx, %rax
1289 call *%rax. */
1290
1291 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1292
1293 if (offset < 3 || (offset + 9) > sec->size)
1294 return FALSE;
1295
1296 if (memcmp (contents + offset - 3, lea, 3) != 0)
1297 return FALSE;
1298
1299 if (0xe8 != *(contents + offset + 4))
1300 {
1301 if (!ABI_64_P (abfd)
1302 || (offset + 19) > sec->size
1303 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1304 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1305 != 0)
1306 return FALSE;
1307 largepic = TRUE;
1308 }
1309 }
1310
1311 r_symndx = htab->r_sym (rel[1].r_info);
1312 if (r_symndx < symtab_hdr->sh_info)
1313 return FALSE;
1314
1315 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1316 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1317 may be versioned. */
1318 return (h != NULL
1319 && h->root.root.string != NULL
1320 && (largepic
1321 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1322 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1323 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1324 && (strncmp (h->root.root.string,
1325 "__tls_get_addr", 14) == 0));
1326
1327 case R_X86_64_GOTTPOFF:
1328 /* Check transition from IE access model:
1329 mov foo@gottpoff(%rip), %reg
1330 add foo@gottpoff(%rip), %reg
1331 */
1332
1333 /* Check REX prefix first. */
1334 if (offset >= 3 && (offset + 4) <= sec->size)
1335 {
1336 val = bfd_get_8 (abfd, contents + offset - 3);
1337 if (val != 0x48 && val != 0x4c)
1338 {
1339 /* X32 may have 0x44 REX prefix or no REX prefix. */
1340 if (ABI_64_P (abfd))
1341 return FALSE;
1342 }
1343 }
1344 else
1345 {
1346 /* X32 may not have any REX prefix. */
1347 if (ABI_64_P (abfd))
1348 return FALSE;
1349 if (offset < 2 || (offset + 3) > sec->size)
1350 return FALSE;
1351 }
1352
1353 val = bfd_get_8 (abfd, contents + offset - 2);
1354 if (val != 0x8b && val != 0x03)
1355 return FALSE;
1356
1357 val = bfd_get_8 (abfd, contents + offset - 1);
1358 return (val & 0xc7) == 5;
1359
1360 case R_X86_64_GOTPC32_TLSDESC:
1361 /* Check transition from GDesc access model:
1362 leaq x@tlsdesc(%rip), %rax
1363
1364 Make sure it's a leaq adding rip to a 32-bit offset
1365 into any register, although it's probably almost always
1366 going to be rax. */
1367
1368 if (offset < 3 || (offset + 4) > sec->size)
1369 return FALSE;
1370
1371 val = bfd_get_8 (abfd, contents + offset - 3);
1372 if ((val & 0xfb) != 0x48)
1373 return FALSE;
1374
1375 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1376 return FALSE;
1377
1378 val = bfd_get_8 (abfd, contents + offset - 1);
1379 return (val & 0xc7) == 0x05;
1380
1381 case R_X86_64_TLSDESC_CALL:
1382 /* Check transition from GDesc access model:
1383 call *x@tlsdesc(%rax)
1384 */
1385 if (offset + 2 <= sec->size)
1386 {
1387 /* Make sure that it's a call *x@tlsdesc(%rax). */
1388 static const unsigned char call[] = { 0xff, 0x10 };
1389 return memcmp (contents + offset, call, 2) == 0;
1390 }
1391
1392 return FALSE;
1393
1394 default:
1395 abort ();
1396 }
1397 }
1398
1399 /* Return TRUE if the TLS access transition is OK or no transition
1400 will be performed. Update R_TYPE if there is a transition. */
1401
1402 static bfd_boolean
1403 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1404 asection *sec, bfd_byte *contents,
1405 Elf_Internal_Shdr *symtab_hdr,
1406 struct elf_link_hash_entry **sym_hashes,
1407 unsigned int *r_type, int tls_type,
1408 const Elf_Internal_Rela *rel,
1409 const Elf_Internal_Rela *relend,
1410 struct elf_link_hash_entry *h,
1411 unsigned long r_symndx)
1412 {
1413 unsigned int from_type = *r_type;
1414 unsigned int to_type = from_type;
1415 bfd_boolean check = TRUE;
1416
1417 /* Skip TLS transition for functions. */
1418 if (h != NULL
1419 && (h->type == STT_FUNC
1420 || h->type == STT_GNU_IFUNC))
1421 return TRUE;
1422
1423 switch (from_type)
1424 {
1425 case R_X86_64_TLSGD:
1426 case R_X86_64_GOTPC32_TLSDESC:
1427 case R_X86_64_TLSDESC_CALL:
1428 case R_X86_64_GOTTPOFF:
1429 if (info->executable)
1430 {
1431 if (h == NULL)
1432 to_type = R_X86_64_TPOFF32;
1433 else
1434 to_type = R_X86_64_GOTTPOFF;
1435 }
1436
1437 /* When we are called from elf_x86_64_relocate_section,
1438 CONTENTS isn't NULL and there may be additional transitions
1439 based on TLS_TYPE. */
1440 if (contents != NULL)
1441 {
1442 unsigned int new_to_type = to_type;
1443
1444 if (info->executable
1445 && h != NULL
1446 && h->dynindx == -1
1447 && tls_type == GOT_TLS_IE)
1448 new_to_type = R_X86_64_TPOFF32;
1449
1450 if (to_type == R_X86_64_TLSGD
1451 || to_type == R_X86_64_GOTPC32_TLSDESC
1452 || to_type == R_X86_64_TLSDESC_CALL)
1453 {
1454 if (tls_type == GOT_TLS_IE)
1455 new_to_type = R_X86_64_GOTTPOFF;
1456 }
1457
1458 /* We checked the transition before when we were called from
1459 elf_x86_64_check_relocs. We only want to check the new
1460 transition which hasn't been checked before. */
1461 check = new_to_type != to_type && from_type == to_type;
1462 to_type = new_to_type;
1463 }
1464
1465 break;
1466
1467 case R_X86_64_TLSLD:
1468 if (info->executable)
1469 to_type = R_X86_64_TPOFF32;
1470 break;
1471
1472 default:
1473 return TRUE;
1474 }
1475
1476 /* Return TRUE if there is no transition. */
1477 if (from_type == to_type)
1478 return TRUE;
1479
1480 /* Check if the transition can be performed. */
1481 if (check
1482 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1483 symtab_hdr, sym_hashes,
1484 from_type, rel, relend))
1485 {
1486 reloc_howto_type *from, *to;
1487 const char *name;
1488
1489 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1490 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1491
1492 if (h)
1493 name = h->root.root.string;
1494 else
1495 {
1496 struct elf_x86_64_link_hash_table *htab;
1497
1498 htab = elf_x86_64_hash_table (info);
1499 if (htab == NULL)
1500 name = "*unknown*";
1501 else
1502 {
1503 Elf_Internal_Sym *isym;
1504
1505 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1506 abfd, r_symndx);
1507 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1508 }
1509 }
1510
1511 (*_bfd_error_handler)
1512 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1513 "in section `%A' failed"),
1514 abfd, sec, from->name, to->name, name,
1515 (unsigned long) rel->r_offset);
1516 bfd_set_error (bfd_error_bad_value);
1517 return FALSE;
1518 }
1519
1520 *r_type = to_type;
1521 return TRUE;
1522 }
1523
1524 /* Look through the relocs for a section during the first phase, and
1525 calculate needed space in the global offset table, procedure
1526 linkage table, and dynamic reloc sections. */
1527
1528 static bfd_boolean
1529 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1530 asection *sec,
1531 const Elf_Internal_Rela *relocs)
1532 {
1533 struct elf_x86_64_link_hash_table *htab;
1534 Elf_Internal_Shdr *symtab_hdr;
1535 struct elf_link_hash_entry **sym_hashes;
1536 const Elf_Internal_Rela *rel;
1537 const Elf_Internal_Rela *rel_end;
1538 asection *sreloc;
1539 bfd_boolean use_plt_got;
1540
1541 if (info->relocatable)
1542 return TRUE;
1543
1544 BFD_ASSERT (is_x86_64_elf (abfd));
1545
1546 htab = elf_x86_64_hash_table (info);
1547 if (htab == NULL)
1548 return FALSE;
1549
1550 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1551
1552 symtab_hdr = &elf_symtab_hdr (abfd);
1553 sym_hashes = elf_sym_hashes (abfd);
1554
1555 sreloc = NULL;
1556
1557 rel_end = relocs + sec->reloc_count;
1558 for (rel = relocs; rel < rel_end; rel++)
1559 {
1560 unsigned int r_type;
1561 unsigned long r_symndx;
1562 struct elf_link_hash_entry *h;
1563 Elf_Internal_Sym *isym;
1564 const char *name;
1565 bfd_boolean size_reloc;
1566
1567 r_symndx = htab->r_sym (rel->r_info);
1568 r_type = ELF32_R_TYPE (rel->r_info);
1569
1570 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1571 {
1572 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1573 abfd, r_symndx);
1574 return FALSE;
1575 }
1576
1577 if (r_symndx < symtab_hdr->sh_info)
1578 {
1579 /* A local symbol. */
1580 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1581 abfd, r_symndx);
1582 if (isym == NULL)
1583 return FALSE;
1584
1585 /* Check relocation against local STT_GNU_IFUNC symbol. */
1586 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1587 {
1588 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1589 TRUE);
1590 if (h == NULL)
1591 return FALSE;
1592
1593 /* Fake a STT_GNU_IFUNC symbol. */
1594 h->type = STT_GNU_IFUNC;
1595 h->def_regular = 1;
1596 h->ref_regular = 1;
1597 h->forced_local = 1;
1598 h->root.type = bfd_link_hash_defined;
1599 }
1600 else
1601 h = NULL;
1602 }
1603 else
1604 {
1605 isym = NULL;
1606 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1607 while (h->root.type == bfd_link_hash_indirect
1608 || h->root.type == bfd_link_hash_warning)
1609 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1610 }
1611
1612 /* Check invalid x32 relocations. */
1613 if (!ABI_64_P (abfd))
1614 switch (r_type)
1615 {
1616 default:
1617 break;
1618
1619 case R_X86_64_DTPOFF64:
1620 case R_X86_64_TPOFF64:
1621 case R_X86_64_PC64:
1622 case R_X86_64_GOTOFF64:
1623 case R_X86_64_GOT64:
1624 case R_X86_64_GOTPCREL64:
1625 case R_X86_64_GOTPC64:
1626 case R_X86_64_GOTPLT64:
1627 case R_X86_64_PLTOFF64:
1628 {
1629 if (h)
1630 name = h->root.root.string;
1631 else
1632 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1633 NULL);
1634 (*_bfd_error_handler)
1635 (_("%B: relocation %s against symbol `%s' isn't "
1636 "supported in x32 mode"), abfd,
1637 x86_64_elf_howto_table[r_type].name, name);
1638 bfd_set_error (bfd_error_bad_value);
1639 return FALSE;
1640 }
1641 break;
1642 }
1643
1644 if (h != NULL)
1645 {
1646 /* Create the ifunc sections for static executables. If we
1647 never see an indirect function symbol nor we are building
1648 a static executable, those sections will be empty and
1649 won't appear in output. */
1650 switch (r_type)
1651 {
1652 default:
1653 break;
1654
1655 case R_X86_64_PC32_BND:
1656 case R_X86_64_PLT32_BND:
1657 case R_X86_64_PC32:
1658 case R_X86_64_PLT32:
1659 case R_X86_64_32:
1660 case R_X86_64_64:
1661 /* MPX PLT is supported only if elf_x86_64_arch_bed
1662 is used in 64-bit mode. */
1663 if (ABI_64_P (abfd)
1664 && info->bndplt
1665 && (get_elf_x86_64_backend_data (abfd)
1666 == &elf_x86_64_arch_bed))
1667 {
1668 elf_x86_64_hash_entry (h)->has_bnd_reloc = TRUE;
1669
1670 /* Create the second PLT for Intel MPX support. */
1671 if (htab->plt_bnd == NULL)
1672 {
1673 unsigned int plt_bnd_align;
1674 const struct elf_backend_data *bed;
1675
1676 bed = get_elf_backend_data (info->output_bfd);
1677 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1678 && (sizeof (elf_x86_64_bnd_plt2_entry)
1679 == sizeof (elf_x86_64_legacy_plt2_entry)));
1680 plt_bnd_align = 3;
1681
1682 if (htab->elf.dynobj == NULL)
1683 htab->elf.dynobj = abfd;
1684 htab->plt_bnd
1685 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1686 ".plt.bnd",
1687 (bed->dynamic_sec_flags
1688 | SEC_ALLOC
1689 | SEC_CODE
1690 | SEC_LOAD
1691 | SEC_READONLY));
1692 if (htab->plt_bnd == NULL
1693 || !bfd_set_section_alignment (htab->elf.dynobj,
1694 htab->plt_bnd,
1695 plt_bnd_align))
1696 return FALSE;
1697 }
1698 }
1699
1700 case R_X86_64_32S:
1701 case R_X86_64_PC64:
1702 case R_X86_64_GOTPCREL:
1703 case R_X86_64_GOTPCREL64:
1704 if (htab->elf.dynobj == NULL)
1705 htab->elf.dynobj = abfd;
1706 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1707 return FALSE;
1708 break;
1709 }
1710
1711 /* It is referenced by a non-shared object. */
1712 h->ref_regular = 1;
1713 h->root.non_ir_ref = 1;
1714 }
1715
1716 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1717 symtab_hdr, sym_hashes,
1718 &r_type, GOT_UNKNOWN,
1719 rel, rel_end, h, r_symndx))
1720 return FALSE;
1721
1722 switch (r_type)
1723 {
1724 case R_X86_64_TLSLD:
1725 htab->tls_ld_got.refcount += 1;
1726 goto create_got;
1727
1728 case R_X86_64_TPOFF32:
1729 if (!info->executable && ABI_64_P (abfd))
1730 {
1731 if (h)
1732 name = h->root.root.string;
1733 else
1734 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1735 NULL);
1736 (*_bfd_error_handler)
1737 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1738 abfd,
1739 x86_64_elf_howto_table[r_type].name, name);
1740 bfd_set_error (bfd_error_bad_value);
1741 return FALSE;
1742 }
1743 break;
1744
1745 case R_X86_64_GOTTPOFF:
1746 if (!info->executable)
1747 info->flags |= DF_STATIC_TLS;
1748 /* Fall through */
1749
1750 case R_X86_64_GOT32:
1751 case R_X86_64_GOTPCREL:
1752 case R_X86_64_TLSGD:
1753 case R_X86_64_GOT64:
1754 case R_X86_64_GOTPCREL64:
1755 case R_X86_64_GOTPLT64:
1756 case R_X86_64_GOTPC32_TLSDESC:
1757 case R_X86_64_TLSDESC_CALL:
1758 /* This symbol requires a global offset table entry. */
1759 {
1760 int tls_type, old_tls_type;
1761
1762 switch (r_type)
1763 {
1764 default: tls_type = GOT_NORMAL; break;
1765 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1766 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1767 case R_X86_64_GOTPC32_TLSDESC:
1768 case R_X86_64_TLSDESC_CALL:
1769 tls_type = GOT_TLS_GDESC; break;
1770 }
1771
1772 if (h != NULL)
1773 {
1774 h->got.refcount += 1;
1775 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1776 }
1777 else
1778 {
1779 bfd_signed_vma *local_got_refcounts;
1780
1781 /* This is a global offset table entry for a local symbol. */
1782 local_got_refcounts = elf_local_got_refcounts (abfd);
1783 if (local_got_refcounts == NULL)
1784 {
1785 bfd_size_type size;
1786
1787 size = symtab_hdr->sh_info;
1788 size *= sizeof (bfd_signed_vma)
1789 + sizeof (bfd_vma) + sizeof (char);
1790 local_got_refcounts = ((bfd_signed_vma *)
1791 bfd_zalloc (abfd, size));
1792 if (local_got_refcounts == NULL)
1793 return FALSE;
1794 elf_local_got_refcounts (abfd) = local_got_refcounts;
1795 elf_x86_64_local_tlsdesc_gotent (abfd)
1796 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1797 elf_x86_64_local_got_tls_type (abfd)
1798 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1799 }
1800 local_got_refcounts[r_symndx] += 1;
1801 old_tls_type
1802 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1803 }
1804
1805 /* If a TLS symbol is accessed using IE at least once,
1806 there is no point to use dynamic model for it. */
1807 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1808 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1809 || tls_type != GOT_TLS_IE))
1810 {
1811 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1812 tls_type = old_tls_type;
1813 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1814 && GOT_TLS_GD_ANY_P (tls_type))
1815 tls_type |= old_tls_type;
1816 else
1817 {
1818 if (h)
1819 name = h->root.root.string;
1820 else
1821 name = bfd_elf_sym_name (abfd, symtab_hdr,
1822 isym, NULL);
1823 (*_bfd_error_handler)
1824 (_("%B: '%s' accessed both as normal and thread local symbol"),
1825 abfd, name);
1826 bfd_set_error (bfd_error_bad_value);
1827 return FALSE;
1828 }
1829 }
1830
1831 if (old_tls_type != tls_type)
1832 {
1833 if (h != NULL)
1834 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1835 else
1836 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1837 }
1838 }
1839 /* Fall through */
1840
1841 case R_X86_64_GOTOFF64:
1842 case R_X86_64_GOTPC32:
1843 case R_X86_64_GOTPC64:
1844 create_got:
1845 if (htab->elf.sgot == NULL)
1846 {
1847 if (htab->elf.dynobj == NULL)
1848 htab->elf.dynobj = abfd;
1849 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1850 info))
1851 return FALSE;
1852 }
1853 break;
1854
1855 case R_X86_64_PLT32:
1856 case R_X86_64_PLT32_BND:
1857 /* This symbol requires a procedure linkage table entry. We
1858 actually build the entry in adjust_dynamic_symbol,
1859 because this might be a case of linking PIC code which is
1860 never referenced by a dynamic object, in which case we
1861 don't need to generate a procedure linkage table entry
1862 after all. */
1863
1864 /* If this is a local symbol, we resolve it directly without
1865 creating a procedure linkage table entry. */
1866 if (h == NULL)
1867 continue;
1868
1869 h->needs_plt = 1;
1870 h->plt.refcount += 1;
1871 break;
1872
1873 case R_X86_64_PLTOFF64:
1874 /* This tries to form the 'address' of a function relative
1875 to GOT. For global symbols we need a PLT entry. */
1876 if (h != NULL)
1877 {
1878 h->needs_plt = 1;
1879 h->plt.refcount += 1;
1880 }
1881 goto create_got;
1882
1883 case R_X86_64_SIZE32:
1884 case R_X86_64_SIZE64:
1885 size_reloc = TRUE;
1886 goto do_size;
1887
1888 case R_X86_64_32:
1889 if (!ABI_64_P (abfd))
1890 goto pointer;
1891 case R_X86_64_8:
1892 case R_X86_64_16:
1893 case R_X86_64_32S:
1894 /* Let's help debug shared library creation. These relocs
1895 cannot be used in shared libs. Don't error out for
1896 sections we don't care about, such as debug sections or
1897 non-constant sections. */
1898 if (info->shared
1899 && (sec->flags & SEC_ALLOC) != 0
1900 && (sec->flags & SEC_READONLY) != 0)
1901 {
1902 if (h)
1903 name = h->root.root.string;
1904 else
1905 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1906 (*_bfd_error_handler)
1907 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1908 abfd, x86_64_elf_howto_table[r_type].name, name);
1909 bfd_set_error (bfd_error_bad_value);
1910 return FALSE;
1911 }
1912 /* Fall through. */
1913
1914 case R_X86_64_PC8:
1915 case R_X86_64_PC16:
1916 case R_X86_64_PC32:
1917 case R_X86_64_PC32_BND:
1918 case R_X86_64_PC64:
1919 case R_X86_64_64:
1920 pointer:
1921 if (h != NULL && info->executable)
1922 {
1923 /* If this reloc is in a read-only section, we might
1924 need a copy reloc. We can't check reliably at this
1925 stage whether the section is read-only, as input
1926 sections have not yet been mapped to output sections.
1927 Tentatively set the flag for now, and correct in
1928 adjust_dynamic_symbol. */
1929 h->non_got_ref = 1;
1930
1931 /* We may need a .plt entry if the function this reloc
1932 refers to is in a shared lib. */
1933 h->plt.refcount += 1;
1934 if (r_type != R_X86_64_PC32
1935 && r_type != R_X86_64_PC32_BND
1936 && r_type != R_X86_64_PC64)
1937 h->pointer_equality_needed = 1;
1938 }
1939
1940 size_reloc = FALSE;
1941 do_size:
1942 /* If we are creating a shared library, and this is a reloc
1943 against a global symbol, or a non PC relative reloc
1944 against a local symbol, then we need to copy the reloc
1945 into the shared library. However, if we are linking with
1946 -Bsymbolic, we do not need to copy a reloc against a
1947 global symbol which is defined in an object we are
1948 including in the link (i.e., DEF_REGULAR is set). At
1949 this point we have not seen all the input files, so it is
1950 possible that DEF_REGULAR is not set now but will be set
1951 later (it is never cleared). In case of a weak definition,
1952 DEF_REGULAR may be cleared later by a strong definition in
1953 a shared library. We account for that possibility below by
1954 storing information in the relocs_copied field of the hash
1955 table entry. A similar situation occurs when creating
1956 shared libraries and symbol visibility changes render the
1957 symbol local.
1958
1959 If on the other hand, we are creating an executable, we
1960 may need to keep relocations for symbols satisfied by a
1961 dynamic library if we manage to avoid copy relocs for the
1962 symbol. */
1963 if ((info->shared
1964 && (sec->flags & SEC_ALLOC) != 0
1965 && (! IS_X86_64_PCREL_TYPE (r_type)
1966 || (h != NULL
1967 && (! SYMBOLIC_BIND (info, h)
1968 || h->root.type == bfd_link_hash_defweak
1969 || !h->def_regular))))
1970 || (ELIMINATE_COPY_RELOCS
1971 && !info->shared
1972 && (sec->flags & SEC_ALLOC) != 0
1973 && h != NULL
1974 && (h->root.type == bfd_link_hash_defweak
1975 || !h->def_regular)))
1976 {
1977 struct elf_dyn_relocs *p;
1978 struct elf_dyn_relocs **head;
1979
1980 /* We must copy these reloc types into the output file.
1981 Create a reloc section in dynobj and make room for
1982 this reloc. */
1983 if (sreloc == NULL)
1984 {
1985 if (htab->elf.dynobj == NULL)
1986 htab->elf.dynobj = abfd;
1987
1988 sreloc = _bfd_elf_make_dynamic_reloc_section
1989 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
1990 abfd, /*rela?*/ TRUE);
1991
1992 if (sreloc == NULL)
1993 return FALSE;
1994 }
1995
1996 /* If this is a global symbol, we count the number of
1997 relocations we need for this symbol. */
1998 if (h != NULL)
1999 {
2000 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2001 }
2002 else
2003 {
2004 /* Track dynamic relocs needed for local syms too.
2005 We really need local syms available to do this
2006 easily. Oh well. */
2007 asection *s;
2008 void **vpp;
2009
2010 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2011 abfd, r_symndx);
2012 if (isym == NULL)
2013 return FALSE;
2014
2015 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2016 if (s == NULL)
2017 s = sec;
2018
2019 /* Beware of type punned pointers vs strict aliasing
2020 rules. */
2021 vpp = &(elf_section_data (s)->local_dynrel);
2022 head = (struct elf_dyn_relocs **)vpp;
2023 }
2024
2025 p = *head;
2026 if (p == NULL || p->sec != sec)
2027 {
2028 bfd_size_type amt = sizeof *p;
2029
2030 p = ((struct elf_dyn_relocs *)
2031 bfd_alloc (htab->elf.dynobj, amt));
2032 if (p == NULL)
2033 return FALSE;
2034 p->next = *head;
2035 *head = p;
2036 p->sec = sec;
2037 p->count = 0;
2038 p->pc_count = 0;
2039 }
2040
2041 p->count += 1;
2042 /* Count size relocation as PC-relative relocation. */
2043 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2044 p->pc_count += 1;
2045 }
2046 break;
2047
2048 /* This relocation describes the C++ object vtable hierarchy.
2049 Reconstruct it for later use during GC. */
2050 case R_X86_64_GNU_VTINHERIT:
2051 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2052 return FALSE;
2053 break;
2054
2055 /* This relocation describes which C++ vtable entries are actually
2056 used. Record for later use during GC. */
2057 case R_X86_64_GNU_VTENTRY:
2058 BFD_ASSERT (h != NULL);
2059 if (h != NULL
2060 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2061 return FALSE;
2062 break;
2063
2064 default:
2065 break;
2066 }
2067
2068 if (use_plt_got
2069 && h != NULL
2070 && h->plt.refcount > 0
2071 && h->got.refcount > 0
2072 && htab->plt_got == NULL)
2073 {
2074 /* Create the GOT procedure linkage table. */
2075 unsigned int plt_got_align;
2076 const struct elf_backend_data *bed;
2077
2078 bed = get_elf_backend_data (info->output_bfd);
2079 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2080 && (sizeof (elf_x86_64_bnd_plt2_entry)
2081 == sizeof (elf_x86_64_legacy_plt2_entry)));
2082 plt_got_align = 3;
2083
2084 if (htab->elf.dynobj == NULL)
2085 htab->elf.dynobj = abfd;
2086 htab->plt_got
2087 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2088 ".plt.got",
2089 (bed->dynamic_sec_flags
2090 | SEC_ALLOC
2091 | SEC_CODE
2092 | SEC_LOAD
2093 | SEC_READONLY));
2094 if (htab->plt_got == NULL
2095 || !bfd_set_section_alignment (htab->elf.dynobj,
2096 htab->plt_got,
2097 plt_got_align))
2098 return FALSE;
2099 }
2100 }
2101
2102 return TRUE;
2103 }
2104
2105 /* Return the section that should be marked against GC for a given
2106 relocation. */
2107
2108 static asection *
2109 elf_x86_64_gc_mark_hook (asection *sec,
2110 struct bfd_link_info *info,
2111 Elf_Internal_Rela *rel,
2112 struct elf_link_hash_entry *h,
2113 Elf_Internal_Sym *sym)
2114 {
2115 if (h != NULL)
2116 switch (ELF32_R_TYPE (rel->r_info))
2117 {
2118 case R_X86_64_GNU_VTINHERIT:
2119 case R_X86_64_GNU_VTENTRY:
2120 return NULL;
2121 }
2122
2123 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2124 }
2125
2126 /* Update the got entry reference counts for the section being removed. */
2127
2128 static bfd_boolean
2129 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2130 asection *sec,
2131 const Elf_Internal_Rela *relocs)
2132 {
2133 struct elf_x86_64_link_hash_table *htab;
2134 Elf_Internal_Shdr *symtab_hdr;
2135 struct elf_link_hash_entry **sym_hashes;
2136 bfd_signed_vma *local_got_refcounts;
2137 const Elf_Internal_Rela *rel, *relend;
2138
2139 if (info->relocatable)
2140 return TRUE;
2141
2142 htab = elf_x86_64_hash_table (info);
2143 if (htab == NULL)
2144 return FALSE;
2145
2146 elf_section_data (sec)->local_dynrel = NULL;
2147
2148 symtab_hdr = &elf_symtab_hdr (abfd);
2149 sym_hashes = elf_sym_hashes (abfd);
2150 local_got_refcounts = elf_local_got_refcounts (abfd);
2151
2152 htab = elf_x86_64_hash_table (info);
2153 relend = relocs + sec->reloc_count;
2154 for (rel = relocs; rel < relend; rel++)
2155 {
2156 unsigned long r_symndx;
2157 unsigned int r_type;
2158 struct elf_link_hash_entry *h = NULL;
2159
2160 r_symndx = htab->r_sym (rel->r_info);
2161 if (r_symndx >= symtab_hdr->sh_info)
2162 {
2163 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2164 while (h->root.type == bfd_link_hash_indirect
2165 || h->root.type == bfd_link_hash_warning)
2166 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2167 }
2168 else
2169 {
2170 /* A local symbol. */
2171 Elf_Internal_Sym *isym;
2172
2173 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2174 abfd, r_symndx);
2175
2176 /* Check relocation against local STT_GNU_IFUNC symbol. */
2177 if (isym != NULL
2178 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2179 {
2180 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2181 if (h == NULL)
2182 abort ();
2183 }
2184 }
2185
2186 if (h)
2187 {
2188 struct elf_x86_64_link_hash_entry *eh;
2189 struct elf_dyn_relocs **pp;
2190 struct elf_dyn_relocs *p;
2191
2192 eh = (struct elf_x86_64_link_hash_entry *) h;
2193
2194 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2195 if (p->sec == sec)
2196 {
2197 /* Everything must go for SEC. */
2198 *pp = p->next;
2199 break;
2200 }
2201 }
2202
2203 r_type = ELF32_R_TYPE (rel->r_info);
2204 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2205 symtab_hdr, sym_hashes,
2206 &r_type, GOT_UNKNOWN,
2207 rel, relend, h, r_symndx))
2208 return FALSE;
2209
2210 switch (r_type)
2211 {
2212 case R_X86_64_TLSLD:
2213 if (htab->tls_ld_got.refcount > 0)
2214 htab->tls_ld_got.refcount -= 1;
2215 break;
2216
2217 case R_X86_64_TLSGD:
2218 case R_X86_64_GOTPC32_TLSDESC:
2219 case R_X86_64_TLSDESC_CALL:
2220 case R_X86_64_GOTTPOFF:
2221 case R_X86_64_GOT32:
2222 case R_X86_64_GOTPCREL:
2223 case R_X86_64_GOT64:
2224 case R_X86_64_GOTPCREL64:
2225 case R_X86_64_GOTPLT64:
2226 if (h != NULL)
2227 {
2228 if (h->got.refcount > 0)
2229 h->got.refcount -= 1;
2230 if (h->type == STT_GNU_IFUNC)
2231 {
2232 if (h->plt.refcount > 0)
2233 h->plt.refcount -= 1;
2234 }
2235 }
2236 else if (local_got_refcounts != NULL)
2237 {
2238 if (local_got_refcounts[r_symndx] > 0)
2239 local_got_refcounts[r_symndx] -= 1;
2240 }
2241 break;
2242
2243 case R_X86_64_8:
2244 case R_X86_64_16:
2245 case R_X86_64_32:
2246 case R_X86_64_64:
2247 case R_X86_64_32S:
2248 case R_X86_64_PC8:
2249 case R_X86_64_PC16:
2250 case R_X86_64_PC32:
2251 case R_X86_64_PC32_BND:
2252 case R_X86_64_PC64:
2253 case R_X86_64_SIZE32:
2254 case R_X86_64_SIZE64:
2255 if (info->shared
2256 && (h == NULL || h->type != STT_GNU_IFUNC))
2257 break;
2258 /* Fall thru */
2259
2260 case R_X86_64_PLT32:
2261 case R_X86_64_PLT32_BND:
2262 case R_X86_64_PLTOFF64:
2263 if (h != NULL)
2264 {
2265 if (h->plt.refcount > 0)
2266 h->plt.refcount -= 1;
2267 }
2268 break;
2269
2270 default:
2271 break;
2272 }
2273 }
2274
2275 return TRUE;
2276 }
2277
2278 /* Adjust a symbol defined by a dynamic object and referenced by a
2279 regular object. The current definition is in some section of the
2280 dynamic object, but we're not including those sections. We have to
2281 change the definition to something the rest of the link can
2282 understand. */
2283
2284 static bfd_boolean
2285 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2286 struct elf_link_hash_entry *h)
2287 {
2288 struct elf_x86_64_link_hash_table *htab;
2289 asection *s;
2290 struct elf_x86_64_link_hash_entry *eh;
2291 struct elf_dyn_relocs *p;
2292
2293 /* STT_GNU_IFUNC symbol must go through PLT. */
2294 if (h->type == STT_GNU_IFUNC)
2295 {
2296 /* All local STT_GNU_IFUNC references must be treate as local
2297 calls via local PLT. */
2298 if (h->ref_regular
2299 && SYMBOL_CALLS_LOCAL (info, h))
2300 {
2301 bfd_size_type pc_count = 0, count = 0;
2302 struct elf_dyn_relocs **pp;
2303
2304 eh = (struct elf_x86_64_link_hash_entry *) h;
2305 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2306 {
2307 pc_count += p->pc_count;
2308 p->count -= p->pc_count;
2309 p->pc_count = 0;
2310 count += p->count;
2311 if (p->count == 0)
2312 *pp = p->next;
2313 else
2314 pp = &p->next;
2315 }
2316
2317 if (pc_count || count)
2318 {
2319 h->needs_plt = 1;
2320 h->non_got_ref = 1;
2321 if (h->plt.refcount <= 0)
2322 h->plt.refcount = 1;
2323 else
2324 h->plt.refcount += 1;
2325 }
2326 }
2327
2328 if (h->plt.refcount <= 0)
2329 {
2330 h->plt.offset = (bfd_vma) -1;
2331 h->needs_plt = 0;
2332 }
2333 return TRUE;
2334 }
2335
2336 /* If this is a function, put it in the procedure linkage table. We
2337 will fill in the contents of the procedure linkage table later,
2338 when we know the address of the .got section. */
2339 if (h->type == STT_FUNC
2340 || h->needs_plt)
2341 {
2342 if (h->plt.refcount <= 0
2343 || SYMBOL_CALLS_LOCAL (info, h)
2344 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2345 && h->root.type == bfd_link_hash_undefweak))
2346 {
2347 /* This case can occur if we saw a PLT32 reloc in an input
2348 file, but the symbol was never referred to by a dynamic
2349 object, or if all references were garbage collected. In
2350 such a case, we don't actually need to build a procedure
2351 linkage table, and we can just do a PC32 reloc instead. */
2352 h->plt.offset = (bfd_vma) -1;
2353 h->needs_plt = 0;
2354 }
2355
2356 return TRUE;
2357 }
2358 else
2359 /* It's possible that we incorrectly decided a .plt reloc was
2360 needed for an R_X86_64_PC32 reloc to a non-function sym in
2361 check_relocs. We can't decide accurately between function and
2362 non-function syms in check-relocs; Objects loaded later in
2363 the link may change h->type. So fix it now. */
2364 h->plt.offset = (bfd_vma) -1;
2365
2366 /* If this is a weak symbol, and there is a real definition, the
2367 processor independent code will have arranged for us to see the
2368 real definition first, and we can just use the same value. */
2369 if (h->u.weakdef != NULL)
2370 {
2371 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2372 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2373 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2374 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2375 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2376 h->non_got_ref = h->u.weakdef->non_got_ref;
2377 return TRUE;
2378 }
2379
2380 /* This is a reference to a symbol defined by a dynamic object which
2381 is not a function. */
2382
2383 /* If we are creating a shared library, we must presume that the
2384 only references to the symbol are via the global offset table.
2385 For such cases we need not do anything here; the relocations will
2386 be handled correctly by relocate_section. */
2387 if (!info->executable)
2388 return TRUE;
2389
2390 /* If there are no references to this symbol that do not use the
2391 GOT, we don't need to generate a copy reloc. */
2392 if (!h->non_got_ref)
2393 return TRUE;
2394
2395 /* If -z nocopyreloc was given, we won't generate them either. */
2396 if (info->nocopyreloc)
2397 {
2398 h->non_got_ref = 0;
2399 return TRUE;
2400 }
2401
2402 if (ELIMINATE_COPY_RELOCS)
2403 {
2404 eh = (struct elf_x86_64_link_hash_entry *) h;
2405 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2406 {
2407 s = p->sec->output_section;
2408 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2409 break;
2410 }
2411
2412 /* If we didn't find any dynamic relocs in read-only sections, then
2413 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2414 if (p == NULL)
2415 {
2416 h->non_got_ref = 0;
2417 return TRUE;
2418 }
2419 }
2420
2421 /* We must allocate the symbol in our .dynbss section, which will
2422 become part of the .bss section of the executable. There will be
2423 an entry for this symbol in the .dynsym section. The dynamic
2424 object will contain position independent code, so all references
2425 from the dynamic object to this symbol will go through the global
2426 offset table. The dynamic linker will use the .dynsym entry to
2427 determine the address it must put in the global offset table, so
2428 both the dynamic object and the regular object will refer to the
2429 same memory location for the variable. */
2430
2431 htab = elf_x86_64_hash_table (info);
2432 if (htab == NULL)
2433 return FALSE;
2434
2435 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2436 to copy the initial value out of the dynamic object and into the
2437 runtime process image. */
2438 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2439 {
2440 const struct elf_backend_data *bed;
2441 bed = get_elf_backend_data (info->output_bfd);
2442 htab->srelbss->size += bed->s->sizeof_rela;
2443 h->needs_copy = 1;
2444 }
2445
2446 s = htab->sdynbss;
2447
2448 return _bfd_elf_adjust_dynamic_copy (h, s);
2449 }
2450
2451 /* Allocate space in .plt, .got and associated reloc sections for
2452 dynamic relocs. */
2453
2454 static bfd_boolean
2455 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2456 {
2457 struct bfd_link_info *info;
2458 struct elf_x86_64_link_hash_table *htab;
2459 struct elf_x86_64_link_hash_entry *eh;
2460 struct elf_dyn_relocs *p;
2461 const struct elf_backend_data *bed;
2462 unsigned int plt_entry_size;
2463
2464 if (h->root.type == bfd_link_hash_indirect)
2465 return TRUE;
2466
2467 eh = (struct elf_x86_64_link_hash_entry *) h;
2468
2469 info = (struct bfd_link_info *) inf;
2470 htab = elf_x86_64_hash_table (info);
2471 if (htab == NULL)
2472 return FALSE;
2473 bed = get_elf_backend_data (info->output_bfd);
2474 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2475
2476 /* We can't use the GOT PLT if pointer equality is needed since
2477 finish_dynamic_symbol won't clear symbol value and the dynamic
2478 linker won't update the GOT slot. We will get into an infinite
2479 loop at run-time. */
2480 if (htab->plt_got != NULL
2481 && h->type != STT_GNU_IFUNC
2482 && !h->pointer_equality_needed
2483 && h->plt.refcount > 0
2484 && h->got.refcount > 0)
2485 {
2486 /* Don't use the regular PLT if there are both GOT and GOTPLT
2487 reloctions. */
2488 h->plt.offset = (bfd_vma) -1;
2489
2490 /* Use the GOT PLT. */
2491 eh->plt_got.refcount = 1;
2492 }
2493
2494 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2495 here if it is defined and referenced in a non-shared object. */
2496 if (h->type == STT_GNU_IFUNC
2497 && h->def_regular)
2498 {
2499 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2500 &eh->dyn_relocs,
2501 plt_entry_size,
2502 plt_entry_size,
2503 GOT_ENTRY_SIZE))
2504 {
2505 asection *s = htab->plt_bnd;
2506 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2507 {
2508 /* Use the .plt.bnd section if it is created. */
2509 eh->plt_bnd.offset = s->size;
2510
2511 /* Make room for this entry in the .plt.bnd section. */
2512 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2513 }
2514
2515 return TRUE;
2516 }
2517 else
2518 return FALSE;
2519 }
2520 else if (htab->elf.dynamic_sections_created
2521 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0))
2522 {
2523 bfd_boolean use_plt_got = eh->plt_got.refcount > 0;
2524
2525 /* Make sure this symbol is output as a dynamic symbol.
2526 Undefined weak syms won't yet be marked as dynamic. */
2527 if (h->dynindx == -1
2528 && !h->forced_local)
2529 {
2530 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2531 return FALSE;
2532 }
2533
2534 if (info->shared
2535 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2536 {
2537 asection *s = htab->elf.splt;
2538 asection *bnd_s = htab->plt_bnd;
2539 asection *got_s = htab->plt_got;
2540
2541 /* If this is the first .plt entry, make room for the special
2542 first entry. */
2543 if (s->size == 0)
2544 s->size = plt_entry_size;
2545
2546 if (use_plt_got)
2547 eh->plt_got.offset = got_s->size;
2548 else
2549 {
2550 h->plt.offset = s->size;
2551 if (bnd_s)
2552 eh->plt_bnd.offset = bnd_s->size;
2553 }
2554
2555 /* If this symbol is not defined in a regular file, and we are
2556 not generating a shared library, then set the symbol to this
2557 location in the .plt. This is required to make function
2558 pointers compare as equal between the normal executable and
2559 the shared library. */
2560 if (! info->shared
2561 && !h->def_regular)
2562 {
2563 if (use_plt_got)
2564 {
2565 /* We need to make a call to the entry of the GOT PLT
2566 instead of regular PLT entry. */
2567 h->root.u.def.section = got_s;
2568 h->root.u.def.value = eh->plt_got.offset;
2569 }
2570 else
2571 {
2572 if (bnd_s)
2573 {
2574 /* We need to make a call to the entry of the second
2575 PLT instead of regular PLT entry. */
2576 h->root.u.def.section = bnd_s;
2577 h->root.u.def.value = eh->plt_bnd.offset;
2578 }
2579 else
2580 {
2581 h->root.u.def.section = s;
2582 h->root.u.def.value = h->plt.offset;
2583 }
2584 }
2585 }
2586
2587 /* Make room for this entry. */
2588 if (use_plt_got)
2589 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2590 else
2591 {
2592 s->size += plt_entry_size;
2593 if (bnd_s)
2594 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2595
2596 /* We also need to make an entry in the .got.plt section,
2597 which will be placed in the .got section by the linker
2598 script. */
2599 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2600
2601 /* We also need to make an entry in the .rela.plt
2602 section. */
2603 htab->elf.srelplt->size += bed->s->sizeof_rela;
2604 htab->elf.srelplt->reloc_count++;
2605 }
2606 }
2607 else
2608 {
2609 h->plt.offset = (bfd_vma) -1;
2610 h->needs_plt = 0;
2611 }
2612 }
2613 else
2614 {
2615 h->plt.offset = (bfd_vma) -1;
2616 h->needs_plt = 0;
2617 }
2618
2619 eh->tlsdesc_got = (bfd_vma) -1;
2620
2621 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2622 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2623 if (h->got.refcount > 0
2624 && info->executable
2625 && h->dynindx == -1
2626 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2627 {
2628 h->got.offset = (bfd_vma) -1;
2629 }
2630 else if (h->got.refcount > 0)
2631 {
2632 asection *s;
2633 bfd_boolean dyn;
2634 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2635
2636 /* Make sure this symbol is output as a dynamic symbol.
2637 Undefined weak syms won't yet be marked as dynamic. */
2638 if (h->dynindx == -1
2639 && !h->forced_local)
2640 {
2641 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2642 return FALSE;
2643 }
2644
2645 if (GOT_TLS_GDESC_P (tls_type))
2646 {
2647 eh->tlsdesc_got = htab->elf.sgotplt->size
2648 - elf_x86_64_compute_jump_table_size (htab);
2649 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2650 h->got.offset = (bfd_vma) -2;
2651 }
2652 if (! GOT_TLS_GDESC_P (tls_type)
2653 || GOT_TLS_GD_P (tls_type))
2654 {
2655 s = htab->elf.sgot;
2656 h->got.offset = s->size;
2657 s->size += GOT_ENTRY_SIZE;
2658 if (GOT_TLS_GD_P (tls_type))
2659 s->size += GOT_ENTRY_SIZE;
2660 }
2661 dyn = htab->elf.dynamic_sections_created;
2662 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2663 and two if global.
2664 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2665 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2666 || tls_type == GOT_TLS_IE)
2667 htab->elf.srelgot->size += bed->s->sizeof_rela;
2668 else if (GOT_TLS_GD_P (tls_type))
2669 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2670 else if (! GOT_TLS_GDESC_P (tls_type)
2671 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2672 || h->root.type != bfd_link_hash_undefweak)
2673 && (info->shared
2674 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2675 htab->elf.srelgot->size += bed->s->sizeof_rela;
2676 if (GOT_TLS_GDESC_P (tls_type))
2677 {
2678 htab->elf.srelplt->size += bed->s->sizeof_rela;
2679 htab->tlsdesc_plt = (bfd_vma) -1;
2680 }
2681 }
2682 else
2683 h->got.offset = (bfd_vma) -1;
2684
2685 if (eh->dyn_relocs == NULL)
2686 return TRUE;
2687
2688 /* In the shared -Bsymbolic case, discard space allocated for
2689 dynamic pc-relative relocs against symbols which turn out to be
2690 defined in regular objects. For the normal shared case, discard
2691 space for pc-relative relocs that have become local due to symbol
2692 visibility changes. */
2693
2694 if (info->shared)
2695 {
2696 /* Relocs that use pc_count are those that appear on a call
2697 insn, or certain REL relocs that can generated via assembly.
2698 We want calls to protected symbols to resolve directly to the
2699 function rather than going via the plt. If people want
2700 function pointer comparisons to work as expected then they
2701 should avoid writing weird assembly. */
2702 if (SYMBOL_CALLS_LOCAL (info, h))
2703 {
2704 struct elf_dyn_relocs **pp;
2705
2706 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2707 {
2708 p->count -= p->pc_count;
2709 p->pc_count = 0;
2710 if (p->count == 0)
2711 *pp = p->next;
2712 else
2713 pp = &p->next;
2714 }
2715 }
2716
2717 /* Also discard relocs on undefined weak syms with non-default
2718 visibility. */
2719 if (eh->dyn_relocs != NULL)
2720 {
2721 if (h->root.type == bfd_link_hash_undefweak)
2722 {
2723 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2724 eh->dyn_relocs = NULL;
2725
2726 /* Make sure undefined weak symbols are output as a dynamic
2727 symbol in PIEs. */
2728 else if (h->dynindx == -1
2729 && ! h->forced_local
2730 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2731 return FALSE;
2732 }
2733 /* For PIE, discard space for relocs against symbols which
2734 turn out to need copy relocs. */
2735 else if (info->executable
2736 && h->needs_copy
2737 && h->def_dynamic
2738 && !h->def_regular)
2739 eh->dyn_relocs = NULL;
2740 }
2741 }
2742 else if (ELIMINATE_COPY_RELOCS)
2743 {
2744 /* For the non-shared case, discard space for relocs against
2745 symbols which turn out to need copy relocs or are not
2746 dynamic. */
2747
2748 if (!h->non_got_ref
2749 && ((h->def_dynamic
2750 && !h->def_regular)
2751 || (htab->elf.dynamic_sections_created
2752 && (h->root.type == bfd_link_hash_undefweak
2753 || h->root.type == bfd_link_hash_undefined))))
2754 {
2755 /* Make sure this symbol is output as a dynamic symbol.
2756 Undefined weak syms won't yet be marked as dynamic. */
2757 if (h->dynindx == -1
2758 && ! h->forced_local
2759 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2760 return FALSE;
2761
2762 /* If that succeeded, we know we'll be keeping all the
2763 relocs. */
2764 if (h->dynindx != -1)
2765 goto keep;
2766 }
2767
2768 eh->dyn_relocs = NULL;
2769
2770 keep: ;
2771 }
2772
2773 /* Finally, allocate space. */
2774 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2775 {
2776 asection * sreloc;
2777
2778 sreloc = elf_section_data (p->sec)->sreloc;
2779
2780 BFD_ASSERT (sreloc != NULL);
2781
2782 sreloc->size += p->count * bed->s->sizeof_rela;
2783 }
2784
2785 return TRUE;
2786 }
2787
2788 /* Allocate space in .plt, .got and associated reloc sections for
2789 local dynamic relocs. */
2790
2791 static bfd_boolean
2792 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2793 {
2794 struct elf_link_hash_entry *h
2795 = (struct elf_link_hash_entry *) *slot;
2796
2797 if (h->type != STT_GNU_IFUNC
2798 || !h->def_regular
2799 || !h->ref_regular
2800 || !h->forced_local
2801 || h->root.type != bfd_link_hash_defined)
2802 abort ();
2803
2804 return elf_x86_64_allocate_dynrelocs (h, inf);
2805 }
2806
2807 /* Find any dynamic relocs that apply to read-only sections. */
2808
2809 static bfd_boolean
2810 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2811 void * inf)
2812 {
2813 struct elf_x86_64_link_hash_entry *eh;
2814 struct elf_dyn_relocs *p;
2815
2816 /* Skip local IFUNC symbols. */
2817 if (h->forced_local && h->type == STT_GNU_IFUNC)
2818 return TRUE;
2819
2820 eh = (struct elf_x86_64_link_hash_entry *) h;
2821 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2822 {
2823 asection *s = p->sec->output_section;
2824
2825 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2826 {
2827 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2828
2829 info->flags |= DF_TEXTREL;
2830
2831 if (info->warn_shared_textrel && info->shared)
2832 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'.\n"),
2833 p->sec->owner, h->root.root.string,
2834 p->sec);
2835
2836 /* Not an error, just cut short the traversal. */
2837 return FALSE;
2838 }
2839 }
2840 return TRUE;
2841 }
2842
2843 /* Convert
2844 mov foo@GOTPCREL(%rip), %reg
2845 to
2846 lea foo(%rip), %reg
2847 with the local symbol, foo. */
2848
2849 static bfd_boolean
2850 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2851 struct bfd_link_info *link_info)
2852 {
2853 Elf_Internal_Shdr *symtab_hdr;
2854 Elf_Internal_Rela *internal_relocs;
2855 Elf_Internal_Rela *irel, *irelend;
2856 bfd_byte *contents;
2857 struct elf_x86_64_link_hash_table *htab;
2858 bfd_boolean changed_contents;
2859 bfd_boolean changed_relocs;
2860 bfd_signed_vma *local_got_refcounts;
2861
2862 /* Don't even try to convert non-ELF outputs. */
2863 if (!is_elf_hash_table (link_info->hash))
2864 return FALSE;
2865
2866 /* Nothing to do if there are no codes, no relocations or no output. */
2867 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2868 || sec->reloc_count == 0
2869 || bfd_is_abs_section (sec->output_section))
2870 return TRUE;
2871
2872 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2873
2874 /* Load the relocations for this section. */
2875 internal_relocs = (_bfd_elf_link_read_relocs
2876 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2877 link_info->keep_memory));
2878 if (internal_relocs == NULL)
2879 return FALSE;
2880
2881 htab = elf_x86_64_hash_table (link_info);
2882 changed_contents = FALSE;
2883 changed_relocs = FALSE;
2884 local_got_refcounts = elf_local_got_refcounts (abfd);
2885
2886 /* Get the section contents. */
2887 if (elf_section_data (sec)->this_hdr.contents != NULL)
2888 contents = elf_section_data (sec)->this_hdr.contents;
2889 else
2890 {
2891 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2892 goto error_return;
2893 }
2894
2895 irelend = internal_relocs + sec->reloc_count;
2896 for (irel = internal_relocs; irel < irelend; irel++)
2897 {
2898 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2899 unsigned int r_symndx = htab->r_sym (irel->r_info);
2900 unsigned int indx;
2901 struct elf_link_hash_entry *h;
2902
2903 if (r_type != R_X86_64_GOTPCREL)
2904 continue;
2905
2906 /* Get the symbol referred to by the reloc. */
2907 if (r_symndx < symtab_hdr->sh_info)
2908 {
2909 Elf_Internal_Sym *isym;
2910
2911 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2912 abfd, r_symndx);
2913
2914 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. */
2915 if (ELF_ST_TYPE (isym->st_info) != STT_GNU_IFUNC
2916 && irel->r_offset >= 2
2917 && bfd_get_8 (input_bfd,
2918 contents + irel->r_offset - 2) == 0x8b)
2919 {
2920 bfd_put_8 (output_bfd, 0x8d,
2921 contents + irel->r_offset - 2);
2922 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2923 if (local_got_refcounts != NULL
2924 && local_got_refcounts[r_symndx] > 0)
2925 local_got_refcounts[r_symndx] -= 1;
2926 changed_contents = TRUE;
2927 changed_relocs = TRUE;
2928 }
2929 continue;
2930 }
2931
2932 indx = r_symndx - symtab_hdr->sh_info;
2933 h = elf_sym_hashes (abfd)[indx];
2934 BFD_ASSERT (h != NULL);
2935
2936 while (h->root.type == bfd_link_hash_indirect
2937 || h->root.type == bfd_link_hash_warning)
2938 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2939
2940 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
2941 avoid optimizing _DYNAMIC since ld.so may use its link-time
2942 address. */
2943 if (h->def_regular
2944 && h->type != STT_GNU_IFUNC
2945 && h != htab->elf.hdynamic
2946 && SYMBOL_REFERENCES_LOCAL (link_info, h)
2947 && irel->r_offset >= 2
2948 && bfd_get_8 (input_bfd,
2949 contents + irel->r_offset - 2) == 0x8b)
2950 {
2951 bfd_put_8 (output_bfd, 0x8d,
2952 contents + irel->r_offset - 2);
2953 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2954 if (h->got.refcount > 0)
2955 h->got.refcount -= 1;
2956 changed_contents = TRUE;
2957 changed_relocs = TRUE;
2958 }
2959 }
2960
2961 if (contents != NULL
2962 && elf_section_data (sec)->this_hdr.contents != contents)
2963 {
2964 if (!changed_contents && !link_info->keep_memory)
2965 free (contents);
2966 else
2967 {
2968 /* Cache the section contents for elf_link_input_bfd. */
2969 elf_section_data (sec)->this_hdr.contents = contents;
2970 }
2971 }
2972
2973 if (elf_section_data (sec)->relocs != internal_relocs)
2974 {
2975 if (!changed_relocs)
2976 free (internal_relocs);
2977 else
2978 elf_section_data (sec)->relocs = internal_relocs;
2979 }
2980
2981 return TRUE;
2982
2983 error_return:
2984 if (contents != NULL
2985 && elf_section_data (sec)->this_hdr.contents != contents)
2986 free (contents);
2987 if (internal_relocs != NULL
2988 && elf_section_data (sec)->relocs != internal_relocs)
2989 free (internal_relocs);
2990 return FALSE;
2991 }
2992
2993 /* Set the sizes of the dynamic sections. */
2994
2995 static bfd_boolean
2996 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
2997 struct bfd_link_info *info)
2998 {
2999 struct elf_x86_64_link_hash_table *htab;
3000 bfd *dynobj;
3001 asection *s;
3002 bfd_boolean relocs;
3003 bfd *ibfd;
3004 const struct elf_backend_data *bed;
3005
3006 htab = elf_x86_64_hash_table (info);
3007 if (htab == NULL)
3008 return FALSE;
3009 bed = get_elf_backend_data (output_bfd);
3010
3011 dynobj = htab->elf.dynobj;
3012 if (dynobj == NULL)
3013 abort ();
3014
3015 if (htab->elf.dynamic_sections_created)
3016 {
3017 /* Set the contents of the .interp section to the interpreter. */
3018 if (info->executable)
3019 {
3020 s = bfd_get_linker_section (dynobj, ".interp");
3021 if (s == NULL)
3022 abort ();
3023 s->size = htab->dynamic_interpreter_size;
3024 s->contents = (unsigned char *) htab->dynamic_interpreter;
3025 }
3026 }
3027
3028 /* Set up .got offsets for local syms, and space for local dynamic
3029 relocs. */
3030 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3031 {
3032 bfd_signed_vma *local_got;
3033 bfd_signed_vma *end_local_got;
3034 char *local_tls_type;
3035 bfd_vma *local_tlsdesc_gotent;
3036 bfd_size_type locsymcount;
3037 Elf_Internal_Shdr *symtab_hdr;
3038 asection *srel;
3039
3040 if (! is_x86_64_elf (ibfd))
3041 continue;
3042
3043 for (s = ibfd->sections; s != NULL; s = s->next)
3044 {
3045 struct elf_dyn_relocs *p;
3046
3047 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3048 return FALSE;
3049
3050 for (p = (struct elf_dyn_relocs *)
3051 (elf_section_data (s)->local_dynrel);
3052 p != NULL;
3053 p = p->next)
3054 {
3055 if (!bfd_is_abs_section (p->sec)
3056 && bfd_is_abs_section (p->sec->output_section))
3057 {
3058 /* Input section has been discarded, either because
3059 it is a copy of a linkonce section or due to
3060 linker script /DISCARD/, so we'll be discarding
3061 the relocs too. */
3062 }
3063 else if (p->count != 0)
3064 {
3065 srel = elf_section_data (p->sec)->sreloc;
3066 srel->size += p->count * bed->s->sizeof_rela;
3067 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3068 && (info->flags & DF_TEXTREL) == 0)
3069 {
3070 info->flags |= DF_TEXTREL;
3071 if (info->warn_shared_textrel && info->shared)
3072 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'.\n"),
3073 p->sec->owner, p->sec);
3074 }
3075 }
3076 }
3077 }
3078
3079 local_got = elf_local_got_refcounts (ibfd);
3080 if (!local_got)
3081 continue;
3082
3083 symtab_hdr = &elf_symtab_hdr (ibfd);
3084 locsymcount = symtab_hdr->sh_info;
3085 end_local_got = local_got + locsymcount;
3086 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3087 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3088 s = htab->elf.sgot;
3089 srel = htab->elf.srelgot;
3090 for (; local_got < end_local_got;
3091 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3092 {
3093 *local_tlsdesc_gotent = (bfd_vma) -1;
3094 if (*local_got > 0)
3095 {
3096 if (GOT_TLS_GDESC_P (*local_tls_type))
3097 {
3098 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3099 - elf_x86_64_compute_jump_table_size (htab);
3100 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3101 *local_got = (bfd_vma) -2;
3102 }
3103 if (! GOT_TLS_GDESC_P (*local_tls_type)
3104 || GOT_TLS_GD_P (*local_tls_type))
3105 {
3106 *local_got = s->size;
3107 s->size += GOT_ENTRY_SIZE;
3108 if (GOT_TLS_GD_P (*local_tls_type))
3109 s->size += GOT_ENTRY_SIZE;
3110 }
3111 if (info->shared
3112 || GOT_TLS_GD_ANY_P (*local_tls_type)
3113 || *local_tls_type == GOT_TLS_IE)
3114 {
3115 if (GOT_TLS_GDESC_P (*local_tls_type))
3116 {
3117 htab->elf.srelplt->size
3118 += bed->s->sizeof_rela;
3119 htab->tlsdesc_plt = (bfd_vma) -1;
3120 }
3121 if (! GOT_TLS_GDESC_P (*local_tls_type)
3122 || GOT_TLS_GD_P (*local_tls_type))
3123 srel->size += bed->s->sizeof_rela;
3124 }
3125 }
3126 else
3127 *local_got = (bfd_vma) -1;
3128 }
3129 }
3130
3131 if (htab->tls_ld_got.refcount > 0)
3132 {
3133 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3134 relocs. */
3135 htab->tls_ld_got.offset = htab->elf.sgot->size;
3136 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3137 htab->elf.srelgot->size += bed->s->sizeof_rela;
3138 }
3139 else
3140 htab->tls_ld_got.offset = -1;
3141
3142 /* Allocate global sym .plt and .got entries, and space for global
3143 sym dynamic relocs. */
3144 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3145 info);
3146
3147 /* Allocate .plt and .got entries, and space for local symbols. */
3148 htab_traverse (htab->loc_hash_table,
3149 elf_x86_64_allocate_local_dynrelocs,
3150 info);
3151
3152 /* For every jump slot reserved in the sgotplt, reloc_count is
3153 incremented. However, when we reserve space for TLS descriptors,
3154 it's not incremented, so in order to compute the space reserved
3155 for them, it suffices to multiply the reloc count by the jump
3156 slot size.
3157
3158 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3159 so that R_X86_64_IRELATIVE entries come last. */
3160 if (htab->elf.srelplt)
3161 {
3162 htab->sgotplt_jump_table_size
3163 = elf_x86_64_compute_jump_table_size (htab);
3164 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3165 }
3166 else if (htab->elf.irelplt)
3167 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3168
3169 if (htab->tlsdesc_plt)
3170 {
3171 /* If we're not using lazy TLS relocations, don't generate the
3172 PLT and GOT entries they require. */
3173 if ((info->flags & DF_BIND_NOW))
3174 htab->tlsdesc_plt = 0;
3175 else
3176 {
3177 htab->tlsdesc_got = htab->elf.sgot->size;
3178 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3179 /* Reserve room for the initial entry.
3180 FIXME: we could probably do away with it in this case. */
3181 if (htab->elf.splt->size == 0)
3182 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3183 htab->tlsdesc_plt = htab->elf.splt->size;
3184 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3185 }
3186 }
3187
3188 if (htab->elf.sgotplt)
3189 {
3190 /* Don't allocate .got.plt section if there are no GOT nor PLT
3191 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3192 if ((htab->elf.hgot == NULL
3193 || !htab->elf.hgot->ref_regular_nonweak)
3194 && (htab->elf.sgotplt->size
3195 == get_elf_backend_data (output_bfd)->got_header_size)
3196 && (htab->elf.splt == NULL
3197 || htab->elf.splt->size == 0)
3198 && (htab->elf.sgot == NULL
3199 || htab->elf.sgot->size == 0)
3200 && (htab->elf.iplt == NULL
3201 || htab->elf.iplt->size == 0)
3202 && (htab->elf.igotplt == NULL
3203 || htab->elf.igotplt->size == 0))
3204 htab->elf.sgotplt->size = 0;
3205 }
3206
3207 if (htab->plt_eh_frame != NULL
3208 && htab->elf.splt != NULL
3209 && htab->elf.splt->size != 0
3210 && !bfd_is_abs_section (htab->elf.splt->output_section)
3211 && _bfd_elf_eh_frame_present (info))
3212 {
3213 const struct elf_x86_64_backend_data *arch_data
3214 = get_elf_x86_64_arch_data (bed);
3215 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3216 }
3217
3218 /* We now have determined the sizes of the various dynamic sections.
3219 Allocate memory for them. */
3220 relocs = FALSE;
3221 for (s = dynobj->sections; s != NULL; s = s->next)
3222 {
3223 if ((s->flags & SEC_LINKER_CREATED) == 0)
3224 continue;
3225
3226 if (s == htab->elf.splt
3227 || s == htab->elf.sgot
3228 || s == htab->elf.sgotplt
3229 || s == htab->elf.iplt
3230 || s == htab->elf.igotplt
3231 || s == htab->plt_bnd
3232 || s == htab->plt_got
3233 || s == htab->plt_eh_frame
3234 || s == htab->sdynbss)
3235 {
3236 /* Strip this section if we don't need it; see the
3237 comment below. */
3238 }
3239 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3240 {
3241 if (s->size != 0 && s != htab->elf.srelplt)
3242 relocs = TRUE;
3243
3244 /* We use the reloc_count field as a counter if we need
3245 to copy relocs into the output file. */
3246 if (s != htab->elf.srelplt)
3247 s->reloc_count = 0;
3248 }
3249 else
3250 {
3251 /* It's not one of our sections, so don't allocate space. */
3252 continue;
3253 }
3254
3255 if (s->size == 0)
3256 {
3257 /* If we don't need this section, strip it from the
3258 output file. This is mostly to handle .rela.bss and
3259 .rela.plt. We must create both sections in
3260 create_dynamic_sections, because they must be created
3261 before the linker maps input sections to output
3262 sections. The linker does that before
3263 adjust_dynamic_symbol is called, and it is that
3264 function which decides whether anything needs to go
3265 into these sections. */
3266
3267 s->flags |= SEC_EXCLUDE;
3268 continue;
3269 }
3270
3271 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3272 continue;
3273
3274 /* Allocate memory for the section contents. We use bfd_zalloc
3275 here in case unused entries are not reclaimed before the
3276 section's contents are written out. This should not happen,
3277 but this way if it does, we get a R_X86_64_NONE reloc instead
3278 of garbage. */
3279 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3280 if (s->contents == NULL)
3281 return FALSE;
3282 }
3283
3284 if (htab->plt_eh_frame != NULL
3285 && htab->plt_eh_frame->contents != NULL)
3286 {
3287 const struct elf_x86_64_backend_data *arch_data
3288 = get_elf_x86_64_arch_data (bed);
3289
3290 memcpy (htab->plt_eh_frame->contents,
3291 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3292 bfd_put_32 (dynobj, htab->elf.splt->size,
3293 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3294 }
3295
3296 if (htab->elf.dynamic_sections_created)
3297 {
3298 /* Add some entries to the .dynamic section. We fill in the
3299 values later, in elf_x86_64_finish_dynamic_sections, but we
3300 must add the entries now so that we get the correct size for
3301 the .dynamic section. The DT_DEBUG entry is filled in by the
3302 dynamic linker and used by the debugger. */
3303 #define add_dynamic_entry(TAG, VAL) \
3304 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3305
3306 if (info->executable)
3307 {
3308 if (!add_dynamic_entry (DT_DEBUG, 0))
3309 return FALSE;
3310 }
3311
3312 if (htab->elf.splt->size != 0)
3313 {
3314 if (!add_dynamic_entry (DT_PLTGOT, 0)
3315 || !add_dynamic_entry (DT_PLTRELSZ, 0)
3316 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3317 || !add_dynamic_entry (DT_JMPREL, 0))
3318 return FALSE;
3319
3320 if (htab->tlsdesc_plt
3321 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3322 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3323 return FALSE;
3324 }
3325
3326 if (relocs)
3327 {
3328 if (!add_dynamic_entry (DT_RELA, 0)
3329 || !add_dynamic_entry (DT_RELASZ, 0)
3330 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3331 return FALSE;
3332
3333 /* If any dynamic relocs apply to a read-only section,
3334 then we need a DT_TEXTREL entry. */
3335 if ((info->flags & DF_TEXTREL) == 0)
3336 elf_link_hash_traverse (&htab->elf,
3337 elf_x86_64_readonly_dynrelocs,
3338 info);
3339
3340 if ((info->flags & DF_TEXTREL) != 0)
3341 {
3342 if (!add_dynamic_entry (DT_TEXTREL, 0))
3343 return FALSE;
3344 }
3345 }
3346 }
3347 #undef add_dynamic_entry
3348
3349 return TRUE;
3350 }
3351
3352 static bfd_boolean
3353 elf_x86_64_always_size_sections (bfd *output_bfd,
3354 struct bfd_link_info *info)
3355 {
3356 asection *tls_sec = elf_hash_table (info)->tls_sec;
3357
3358 if (tls_sec)
3359 {
3360 struct elf_link_hash_entry *tlsbase;
3361
3362 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3363 "_TLS_MODULE_BASE_",
3364 FALSE, FALSE, FALSE);
3365
3366 if (tlsbase && tlsbase->type == STT_TLS)
3367 {
3368 struct elf_x86_64_link_hash_table *htab;
3369 struct bfd_link_hash_entry *bh = NULL;
3370 const struct elf_backend_data *bed
3371 = get_elf_backend_data (output_bfd);
3372
3373 htab = elf_x86_64_hash_table (info);
3374 if (htab == NULL)
3375 return FALSE;
3376
3377 if (!(_bfd_generic_link_add_one_symbol
3378 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3379 tls_sec, 0, NULL, FALSE,
3380 bed->collect, &bh)))
3381 return FALSE;
3382
3383 htab->tls_module_base = bh;
3384
3385 tlsbase = (struct elf_link_hash_entry *)bh;
3386 tlsbase->def_regular = 1;
3387 tlsbase->other = STV_HIDDEN;
3388 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3389 }
3390 }
3391
3392 return TRUE;
3393 }
3394
3395 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3396 executables. Rather than setting it to the beginning of the TLS
3397 section, we have to set it to the end. This function may be called
3398 multiple times, it is idempotent. */
3399
3400 static void
3401 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3402 {
3403 struct elf_x86_64_link_hash_table *htab;
3404 struct bfd_link_hash_entry *base;
3405
3406 if (!info->executable)
3407 return;
3408
3409 htab = elf_x86_64_hash_table (info);
3410 if (htab == NULL)
3411 return;
3412
3413 base = htab->tls_module_base;
3414 if (base == NULL)
3415 return;
3416
3417 base->u.def.value = htab->elf.tls_size;
3418 }
3419
3420 /* Return the base VMA address which should be subtracted from real addresses
3421 when resolving @dtpoff relocation.
3422 This is PT_TLS segment p_vaddr. */
3423
3424 static bfd_vma
3425 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3426 {
3427 /* If tls_sec is NULL, we should have signalled an error already. */
3428 if (elf_hash_table (info)->tls_sec == NULL)
3429 return 0;
3430 return elf_hash_table (info)->tls_sec->vma;
3431 }
3432
3433 /* Return the relocation value for @tpoff relocation
3434 if STT_TLS virtual address is ADDRESS. */
3435
3436 static bfd_vma
3437 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3438 {
3439 struct elf_link_hash_table *htab = elf_hash_table (info);
3440 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3441 bfd_vma static_tls_size;
3442
3443 /* If tls_segment is NULL, we should have signalled an error already. */
3444 if (htab->tls_sec == NULL)
3445 return 0;
3446
3447 /* Consider special static TLS alignment requirements. */
3448 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3449 return address - static_tls_size - htab->tls_sec->vma;
3450 }
3451
3452 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3453 branch? */
3454
3455 static bfd_boolean
3456 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3457 {
3458 /* Opcode Instruction
3459 0xe8 call
3460 0xe9 jump
3461 0x0f 0x8x conditional jump */
3462 return ((offset > 0
3463 && (contents [offset - 1] == 0xe8
3464 || contents [offset - 1] == 0xe9))
3465 || (offset > 1
3466 && contents [offset - 2] == 0x0f
3467 && (contents [offset - 1] & 0xf0) == 0x80));
3468 }
3469
3470 /* Relocate an x86_64 ELF section. */
3471
3472 static bfd_boolean
3473 elf_x86_64_relocate_section (bfd *output_bfd,
3474 struct bfd_link_info *info,
3475 bfd *input_bfd,
3476 asection *input_section,
3477 bfd_byte *contents,
3478 Elf_Internal_Rela *relocs,
3479 Elf_Internal_Sym *local_syms,
3480 asection **local_sections)
3481 {
3482 struct elf_x86_64_link_hash_table *htab;
3483 Elf_Internal_Shdr *symtab_hdr;
3484 struct elf_link_hash_entry **sym_hashes;
3485 bfd_vma *local_got_offsets;
3486 bfd_vma *local_tlsdesc_gotents;
3487 Elf_Internal_Rela *rel;
3488 Elf_Internal_Rela *relend;
3489 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3490
3491 BFD_ASSERT (is_x86_64_elf (input_bfd));
3492
3493 htab = elf_x86_64_hash_table (info);
3494 if (htab == NULL)
3495 return FALSE;
3496 symtab_hdr = &elf_symtab_hdr (input_bfd);
3497 sym_hashes = elf_sym_hashes (input_bfd);
3498 local_got_offsets = elf_local_got_offsets (input_bfd);
3499 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3500
3501 elf_x86_64_set_tls_module_base (info);
3502
3503 rel = relocs;
3504 relend = relocs + input_section->reloc_count;
3505 for (; rel < relend; rel++)
3506 {
3507 unsigned int r_type;
3508 reloc_howto_type *howto;
3509 unsigned long r_symndx;
3510 struct elf_link_hash_entry *h;
3511 struct elf_x86_64_link_hash_entry *eh;
3512 Elf_Internal_Sym *sym;
3513 asection *sec;
3514 bfd_vma off, offplt, plt_offset;
3515 bfd_vma relocation;
3516 bfd_boolean unresolved_reloc;
3517 bfd_reloc_status_type r;
3518 int tls_type;
3519 asection *base_got, *resolved_plt;
3520 bfd_vma st_size;
3521
3522 r_type = ELF32_R_TYPE (rel->r_info);
3523 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3524 || r_type == (int) R_X86_64_GNU_VTENTRY)
3525 continue;
3526
3527 if (r_type >= (int) R_X86_64_standard)
3528 {
3529 (*_bfd_error_handler)
3530 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3531 input_bfd, input_section, r_type);
3532 bfd_set_error (bfd_error_bad_value);
3533 return FALSE;
3534 }
3535
3536 if (r_type != (int) R_X86_64_32
3537 || ABI_64_P (output_bfd))
3538 howto = x86_64_elf_howto_table + r_type;
3539 else
3540 howto = (x86_64_elf_howto_table
3541 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3542 r_symndx = htab->r_sym (rel->r_info);
3543 h = NULL;
3544 sym = NULL;
3545 sec = NULL;
3546 unresolved_reloc = FALSE;
3547 if (r_symndx < symtab_hdr->sh_info)
3548 {
3549 sym = local_syms + r_symndx;
3550 sec = local_sections[r_symndx];
3551
3552 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3553 &sec, rel);
3554 st_size = sym->st_size;
3555
3556 /* Relocate against local STT_GNU_IFUNC symbol. */
3557 if (!info->relocatable
3558 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3559 {
3560 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3561 rel, FALSE);
3562 if (h == NULL)
3563 abort ();
3564
3565 /* Set STT_GNU_IFUNC symbol value. */
3566 h->root.u.def.value = sym->st_value;
3567 h->root.u.def.section = sec;
3568 }
3569 }
3570 else
3571 {
3572 bfd_boolean warned ATTRIBUTE_UNUSED;
3573 bfd_boolean ignored ATTRIBUTE_UNUSED;
3574
3575 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3576 r_symndx, symtab_hdr, sym_hashes,
3577 h, sec, relocation,
3578 unresolved_reloc, warned, ignored);
3579 st_size = h->size;
3580 }
3581
3582 if (sec != NULL && discarded_section (sec))
3583 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3584 rel, 1, relend, howto, 0, contents);
3585
3586 if (info->relocatable)
3587 continue;
3588
3589 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3590 {
3591 if (r_type == R_X86_64_64)
3592 {
3593 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3594 zero-extend it to 64bit if addend is zero. */
3595 r_type = R_X86_64_32;
3596 memset (contents + rel->r_offset + 4, 0, 4);
3597 }
3598 else if (r_type == R_X86_64_SIZE64)
3599 {
3600 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3601 zero-extend it to 64bit if addend is zero. */
3602 r_type = R_X86_64_SIZE32;
3603 memset (contents + rel->r_offset + 4, 0, 4);
3604 }
3605 }
3606
3607 eh = (struct elf_x86_64_link_hash_entry *) h;
3608
3609 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3610 it here if it is defined in a non-shared object. */
3611 if (h != NULL
3612 && h->type == STT_GNU_IFUNC
3613 && h->def_regular)
3614 {
3615 bfd_vma plt_index;
3616 const char *name;
3617
3618 if ((input_section->flags & SEC_ALLOC) == 0
3619 || h->plt.offset == (bfd_vma) -1)
3620 abort ();
3621
3622 /* STT_GNU_IFUNC symbol must go through PLT. */
3623 if (htab->elf.splt != NULL)
3624 {
3625 if (htab->plt_bnd != NULL)
3626 {
3627 resolved_plt = htab->plt_bnd;
3628 plt_offset = eh->plt_bnd.offset;
3629 }
3630 else
3631 {
3632 resolved_plt = htab->elf.splt;
3633 plt_offset = h->plt.offset;
3634 }
3635 }
3636 else
3637 {
3638 resolved_plt = htab->elf.iplt;
3639 plt_offset = h->plt.offset;
3640 }
3641
3642 relocation = (resolved_plt->output_section->vma
3643 + resolved_plt->output_offset + plt_offset);
3644
3645 switch (r_type)
3646 {
3647 default:
3648 if (h->root.root.string)
3649 name = h->root.root.string;
3650 else
3651 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3652 NULL);
3653 (*_bfd_error_handler)
3654 (_("%B: relocation %s against STT_GNU_IFUNC "
3655 "symbol `%s' isn't handled by %s"), input_bfd,
3656 x86_64_elf_howto_table[r_type].name,
3657 name, __FUNCTION__);
3658 bfd_set_error (bfd_error_bad_value);
3659 return FALSE;
3660
3661 case R_X86_64_32S:
3662 if (info->shared)
3663 abort ();
3664 goto do_relocation;
3665
3666 case R_X86_64_32:
3667 if (ABI_64_P (output_bfd))
3668 goto do_relocation;
3669 /* FALLTHROUGH */
3670 case R_X86_64_64:
3671 if (rel->r_addend != 0)
3672 {
3673 if (h->root.root.string)
3674 name = h->root.root.string;
3675 else
3676 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3677 sym, NULL);
3678 (*_bfd_error_handler)
3679 (_("%B: relocation %s against STT_GNU_IFUNC "
3680 "symbol `%s' has non-zero addend: %d"),
3681 input_bfd, x86_64_elf_howto_table[r_type].name,
3682 name, rel->r_addend);
3683 bfd_set_error (bfd_error_bad_value);
3684 return FALSE;
3685 }
3686
3687 /* Generate dynamic relcoation only when there is a
3688 non-GOT reference in a shared object. */
3689 if (info->shared && h->non_got_ref)
3690 {
3691 Elf_Internal_Rela outrel;
3692 asection *sreloc;
3693
3694 /* Need a dynamic relocation to get the real function
3695 address. */
3696 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3697 info,
3698 input_section,
3699 rel->r_offset);
3700 if (outrel.r_offset == (bfd_vma) -1
3701 || outrel.r_offset == (bfd_vma) -2)
3702 abort ();
3703
3704 outrel.r_offset += (input_section->output_section->vma
3705 + input_section->output_offset);
3706
3707 if (h->dynindx == -1
3708 || h->forced_local
3709 || info->executable)
3710 {
3711 /* This symbol is resolved locally. */
3712 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3713 outrel.r_addend = (h->root.u.def.value
3714 + h->root.u.def.section->output_section->vma
3715 + h->root.u.def.section->output_offset);
3716 }
3717 else
3718 {
3719 outrel.r_info = htab->r_info (h->dynindx, r_type);
3720 outrel.r_addend = 0;
3721 }
3722
3723 sreloc = htab->elf.irelifunc;
3724 elf_append_rela (output_bfd, sreloc, &outrel);
3725
3726 /* If this reloc is against an external symbol, we
3727 do not want to fiddle with the addend. Otherwise,
3728 we need to include the symbol value so that it
3729 becomes an addend for the dynamic reloc. For an
3730 internal symbol, we have updated addend. */
3731 continue;
3732 }
3733 /* FALLTHROUGH */
3734 case R_X86_64_PC32:
3735 case R_X86_64_PC32_BND:
3736 case R_X86_64_PC64:
3737 case R_X86_64_PLT32:
3738 case R_X86_64_PLT32_BND:
3739 goto do_relocation;
3740
3741 case R_X86_64_GOTPCREL:
3742 case R_X86_64_GOTPCREL64:
3743 base_got = htab->elf.sgot;
3744 off = h->got.offset;
3745
3746 if (base_got == NULL)
3747 abort ();
3748
3749 if (off == (bfd_vma) -1)
3750 {
3751 /* We can't use h->got.offset here to save state, or
3752 even just remember the offset, as finish_dynamic_symbol
3753 would use that as offset into .got. */
3754
3755 if (htab->elf.splt != NULL)
3756 {
3757 plt_index = h->plt.offset / plt_entry_size - 1;
3758 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3759 base_got = htab->elf.sgotplt;
3760 }
3761 else
3762 {
3763 plt_index = h->plt.offset / plt_entry_size;
3764 off = plt_index * GOT_ENTRY_SIZE;
3765 base_got = htab->elf.igotplt;
3766 }
3767
3768 if (h->dynindx == -1
3769 || h->forced_local
3770 || info->symbolic)
3771 {
3772 /* This references the local defitionion. We must
3773 initialize this entry in the global offset table.
3774 Since the offset must always be a multiple of 8,
3775 we use the least significant bit to record
3776 whether we have initialized it already.
3777
3778 When doing a dynamic link, we create a .rela.got
3779 relocation entry to initialize the value. This
3780 is done in the finish_dynamic_symbol routine. */
3781 if ((off & 1) != 0)
3782 off &= ~1;
3783 else
3784 {
3785 bfd_put_64 (output_bfd, relocation,
3786 base_got->contents + off);
3787 /* Note that this is harmless for the GOTPLT64
3788 case, as -1 | 1 still is -1. */
3789 h->got.offset |= 1;
3790 }
3791 }
3792 }
3793
3794 relocation = (base_got->output_section->vma
3795 + base_got->output_offset + off);
3796
3797 goto do_relocation;
3798 }
3799 }
3800
3801 /* When generating a shared object, the relocations handled here are
3802 copied into the output file to be resolved at run time. */
3803 switch (r_type)
3804 {
3805 case R_X86_64_GOT32:
3806 case R_X86_64_GOT64:
3807 /* Relocation is to the entry for this symbol in the global
3808 offset table. */
3809 case R_X86_64_GOTPCREL:
3810 case R_X86_64_GOTPCREL64:
3811 /* Use global offset table entry as symbol value. */
3812 case R_X86_64_GOTPLT64:
3813 /* This is obsolete and treated the the same as GOT64. */
3814 base_got = htab->elf.sgot;
3815
3816 if (htab->elf.sgot == NULL)
3817 abort ();
3818
3819 if (h != NULL)
3820 {
3821 bfd_boolean dyn;
3822
3823 off = h->got.offset;
3824 if (h->needs_plt
3825 && h->plt.offset != (bfd_vma)-1
3826 && off == (bfd_vma)-1)
3827 {
3828 /* We can't use h->got.offset here to save
3829 state, or even just remember the offset, as
3830 finish_dynamic_symbol would use that as offset into
3831 .got. */
3832 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3833 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3834 base_got = htab->elf.sgotplt;
3835 }
3836
3837 dyn = htab->elf.dynamic_sections_created;
3838
3839 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3840 || (info->shared
3841 && SYMBOL_REFERENCES_LOCAL (info, h))
3842 || (ELF_ST_VISIBILITY (h->other)
3843 && h->root.type == bfd_link_hash_undefweak))
3844 {
3845 /* This is actually a static link, or it is a -Bsymbolic
3846 link and the symbol is defined locally, or the symbol
3847 was forced to be local because of a version file. We
3848 must initialize this entry in the global offset table.
3849 Since the offset must always be a multiple of 8, we
3850 use the least significant bit to record whether we
3851 have initialized it already.
3852
3853 When doing a dynamic link, we create a .rela.got
3854 relocation entry to initialize the value. This is
3855 done in the finish_dynamic_symbol routine. */
3856 if ((off & 1) != 0)
3857 off &= ~1;
3858 else
3859 {
3860 bfd_put_64 (output_bfd, relocation,
3861 base_got->contents + off);
3862 /* Note that this is harmless for the GOTPLT64 case,
3863 as -1 | 1 still is -1. */
3864 h->got.offset |= 1;
3865 }
3866 }
3867 else
3868 unresolved_reloc = FALSE;
3869 }
3870 else
3871 {
3872 if (local_got_offsets == NULL)
3873 abort ();
3874
3875 off = local_got_offsets[r_symndx];
3876
3877 /* The offset must always be a multiple of 8. We use
3878 the least significant bit to record whether we have
3879 already generated the necessary reloc. */
3880 if ((off & 1) != 0)
3881 off &= ~1;
3882 else
3883 {
3884 bfd_put_64 (output_bfd, relocation,
3885 base_got->contents + off);
3886
3887 if (info->shared)
3888 {
3889 asection *s;
3890 Elf_Internal_Rela outrel;
3891
3892 /* We need to generate a R_X86_64_RELATIVE reloc
3893 for the dynamic linker. */
3894 s = htab->elf.srelgot;
3895 if (s == NULL)
3896 abort ();
3897
3898 outrel.r_offset = (base_got->output_section->vma
3899 + base_got->output_offset
3900 + off);
3901 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3902 outrel.r_addend = relocation;
3903 elf_append_rela (output_bfd, s, &outrel);
3904 }
3905
3906 local_got_offsets[r_symndx] |= 1;
3907 }
3908 }
3909
3910 if (off >= (bfd_vma) -2)
3911 abort ();
3912
3913 relocation = base_got->output_section->vma
3914 + base_got->output_offset + off;
3915 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
3916 relocation -= htab->elf.sgotplt->output_section->vma
3917 - htab->elf.sgotplt->output_offset;
3918
3919 break;
3920
3921 case R_X86_64_GOTOFF64:
3922 /* Relocation is relative to the start of the global offset
3923 table. */
3924
3925 /* Check to make sure it isn't a protected function symbol
3926 for shared library since it may not be local when used
3927 as function address. */
3928 if (!info->executable
3929 && h
3930 && !SYMBOLIC_BIND (info, h)
3931 && h->def_regular
3932 && h->type == STT_FUNC
3933 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3934 {
3935 (*_bfd_error_handler)
3936 (_("%B: relocation R_X86_64_GOTOFF64 against protected function `%s' can not be used when making a shared object"),
3937 input_bfd, h->root.root.string);
3938 bfd_set_error (bfd_error_bad_value);
3939 return FALSE;
3940 }
3941
3942 /* Note that sgot is not involved in this
3943 calculation. We always want the start of .got.plt. If we
3944 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3945 permitted by the ABI, we might have to change this
3946 calculation. */
3947 relocation -= htab->elf.sgotplt->output_section->vma
3948 + htab->elf.sgotplt->output_offset;
3949 break;
3950
3951 case R_X86_64_GOTPC32:
3952 case R_X86_64_GOTPC64:
3953 /* Use global offset table as symbol value. */
3954 relocation = htab->elf.sgotplt->output_section->vma
3955 + htab->elf.sgotplt->output_offset;
3956 unresolved_reloc = FALSE;
3957 break;
3958
3959 case R_X86_64_PLTOFF64:
3960 /* Relocation is PLT entry relative to GOT. For local
3961 symbols it's the symbol itself relative to GOT. */
3962 if (h != NULL
3963 /* See PLT32 handling. */
3964 && h->plt.offset != (bfd_vma) -1
3965 && htab->elf.splt != NULL)
3966 {
3967 if (htab->plt_bnd != NULL)
3968 {
3969 resolved_plt = htab->plt_bnd;
3970 plt_offset = eh->plt_bnd.offset;
3971 }
3972 else
3973 {
3974 resolved_plt = htab->elf.splt;
3975 plt_offset = h->plt.offset;
3976 }
3977
3978 relocation = (resolved_plt->output_section->vma
3979 + resolved_plt->output_offset
3980 + plt_offset);
3981 unresolved_reloc = FALSE;
3982 }
3983
3984 relocation -= htab->elf.sgotplt->output_section->vma
3985 + htab->elf.sgotplt->output_offset;
3986 break;
3987
3988 case R_X86_64_PLT32:
3989 case R_X86_64_PLT32_BND:
3990 /* Relocation is to the entry for this symbol in the
3991 procedure linkage table. */
3992
3993 /* Resolve a PLT32 reloc against a local symbol directly,
3994 without using the procedure linkage table. */
3995 if (h == NULL)
3996 break;
3997
3998 if ((h->plt.offset == (bfd_vma) -1
3999 && eh->plt_got.offset == (bfd_vma) -1)
4000 || htab->elf.splt == NULL)
4001 {
4002 /* We didn't make a PLT entry for this symbol. This
4003 happens when statically linking PIC code, or when
4004 using -Bsymbolic. */
4005 break;
4006 }
4007
4008 if (h->plt.offset != (bfd_vma) -1)
4009 {
4010 if (htab->plt_bnd != NULL)
4011 {
4012 resolved_plt = htab->plt_bnd;
4013 plt_offset = eh->plt_bnd.offset;
4014 }
4015 else
4016 {
4017 resolved_plt = htab->elf.splt;
4018 plt_offset = h->plt.offset;
4019 }
4020 }
4021 else
4022 {
4023 /* Use the GOT PLT. */
4024 resolved_plt = htab->plt_got;
4025 plt_offset = eh->plt_got.offset;
4026 }
4027
4028 relocation = (resolved_plt->output_section->vma
4029 + resolved_plt->output_offset
4030 + plt_offset);
4031 unresolved_reloc = FALSE;
4032 break;
4033
4034 case R_X86_64_SIZE32:
4035 case R_X86_64_SIZE64:
4036 /* Set to symbol size. */
4037 relocation = st_size;
4038 goto direct;
4039
4040 case R_X86_64_PC8:
4041 case R_X86_64_PC16:
4042 case R_X86_64_PC32:
4043 case R_X86_64_PC32_BND:
4044 if (info->shared
4045 && (input_section->flags & SEC_ALLOC) != 0
4046 && (input_section->flags & SEC_READONLY) != 0
4047 && h != NULL)
4048 {
4049 bfd_boolean fail = FALSE;
4050 bfd_boolean branch
4051 = ((r_type == R_X86_64_PC32
4052 || r_type == R_X86_64_PC32_BND)
4053 && is_32bit_relative_branch (contents, rel->r_offset));
4054
4055 if (SYMBOL_REFERENCES_LOCAL (info, h))
4056 {
4057 /* Symbol is referenced locally. Make sure it is
4058 defined locally or for a branch. */
4059 fail = !h->def_regular && !branch;
4060 }
4061 else if (!(info->executable && h->needs_copy))
4062 {
4063 /* Symbol doesn't need copy reloc and isn't referenced
4064 locally. We only allow branch to symbol with
4065 non-default visibility. */
4066 fail = (!branch
4067 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4068 }
4069
4070 if (fail)
4071 {
4072 const char *fmt;
4073 const char *v;
4074 const char *pic = "";
4075
4076 switch (ELF_ST_VISIBILITY (h->other))
4077 {
4078 case STV_HIDDEN:
4079 v = _("hidden symbol");
4080 break;
4081 case STV_INTERNAL:
4082 v = _("internal symbol");
4083 break;
4084 case STV_PROTECTED:
4085 v = _("protected symbol");
4086 break;
4087 default:
4088 v = _("symbol");
4089 pic = _("; recompile with -fPIC");
4090 break;
4091 }
4092
4093 if (h->def_regular)
4094 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4095 else
4096 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4097
4098 (*_bfd_error_handler) (fmt, input_bfd,
4099 x86_64_elf_howto_table[r_type].name,
4100 v, h->root.root.string, pic);
4101 bfd_set_error (bfd_error_bad_value);
4102 return FALSE;
4103 }
4104 }
4105 /* Fall through. */
4106
4107 case R_X86_64_8:
4108 case R_X86_64_16:
4109 case R_X86_64_32:
4110 case R_X86_64_PC64:
4111 case R_X86_64_64:
4112 /* FIXME: The ABI says the linker should make sure the value is
4113 the same when it's zeroextended to 64 bit. */
4114
4115 direct:
4116 if ((input_section->flags & SEC_ALLOC) == 0)
4117 break;
4118
4119 /* Don't copy a pc-relative relocation into the output file
4120 if the symbol needs copy reloc. */
4121 if ((info->shared
4122 && !(info->executable
4123 && h != NULL
4124 && h->needs_copy
4125 && IS_X86_64_PCREL_TYPE (r_type))
4126 && (h == NULL
4127 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4128 || h->root.type != bfd_link_hash_undefweak)
4129 && ((! IS_X86_64_PCREL_TYPE (r_type)
4130 && r_type != R_X86_64_SIZE32
4131 && r_type != R_X86_64_SIZE64)
4132 || ! SYMBOL_CALLS_LOCAL (info, h)))
4133 || (ELIMINATE_COPY_RELOCS
4134 && !info->shared
4135 && h != NULL
4136 && h->dynindx != -1
4137 && !h->non_got_ref
4138 && ((h->def_dynamic
4139 && !h->def_regular)
4140 || h->root.type == bfd_link_hash_undefweak
4141 || h->root.type == bfd_link_hash_undefined)))
4142 {
4143 Elf_Internal_Rela outrel;
4144 bfd_boolean skip, relocate;
4145 asection *sreloc;
4146
4147 /* When generating a shared object, these relocations
4148 are copied into the output file to be resolved at run
4149 time. */
4150 skip = FALSE;
4151 relocate = FALSE;
4152
4153 outrel.r_offset =
4154 _bfd_elf_section_offset (output_bfd, info, input_section,
4155 rel->r_offset);
4156 if (outrel.r_offset == (bfd_vma) -1)
4157 skip = TRUE;
4158 else if (outrel.r_offset == (bfd_vma) -2)
4159 skip = TRUE, relocate = TRUE;
4160
4161 outrel.r_offset += (input_section->output_section->vma
4162 + input_section->output_offset);
4163
4164 if (skip)
4165 memset (&outrel, 0, sizeof outrel);
4166
4167 /* h->dynindx may be -1 if this symbol was marked to
4168 become local. */
4169 else if (h != NULL
4170 && h->dynindx != -1
4171 && (IS_X86_64_PCREL_TYPE (r_type)
4172 || ! info->shared
4173 || ! SYMBOLIC_BIND (info, h)
4174 || ! h->def_regular))
4175 {
4176 outrel.r_info = htab->r_info (h->dynindx, r_type);
4177 outrel.r_addend = rel->r_addend;
4178 }
4179 else
4180 {
4181 /* This symbol is local, or marked to become local. */
4182 if (r_type == htab->pointer_r_type)
4183 {
4184 relocate = TRUE;
4185 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4186 outrel.r_addend = relocation + rel->r_addend;
4187 }
4188 else if (r_type == R_X86_64_64
4189 && !ABI_64_P (output_bfd))
4190 {
4191 relocate = TRUE;
4192 outrel.r_info = htab->r_info (0,
4193 R_X86_64_RELATIVE64);
4194 outrel.r_addend = relocation + rel->r_addend;
4195 /* Check addend overflow. */
4196 if ((outrel.r_addend & 0x80000000)
4197 != (rel->r_addend & 0x80000000))
4198 {
4199 const char *name;
4200 int addend = rel->r_addend;
4201 if (h && h->root.root.string)
4202 name = h->root.root.string;
4203 else
4204 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4205 sym, NULL);
4206 if (addend < 0)
4207 (*_bfd_error_handler)
4208 (_("%B: addend -0x%x in relocation %s against "
4209 "symbol `%s' at 0x%lx in section `%A' is "
4210 "out of range"),
4211 input_bfd, input_section, addend,
4212 x86_64_elf_howto_table[r_type].name,
4213 name, (unsigned long) rel->r_offset);
4214 else
4215 (*_bfd_error_handler)
4216 (_("%B: addend 0x%x in relocation %s against "
4217 "symbol `%s' at 0x%lx in section `%A' is "
4218 "out of range"),
4219 input_bfd, input_section, addend,
4220 x86_64_elf_howto_table[r_type].name,
4221 name, (unsigned long) rel->r_offset);
4222 bfd_set_error (bfd_error_bad_value);
4223 return FALSE;
4224 }
4225 }
4226 else
4227 {
4228 long sindx;
4229
4230 if (bfd_is_abs_section (sec))
4231 sindx = 0;
4232 else if (sec == NULL || sec->owner == NULL)
4233 {
4234 bfd_set_error (bfd_error_bad_value);
4235 return FALSE;
4236 }
4237 else
4238 {
4239 asection *osec;
4240
4241 /* We are turning this relocation into one
4242 against a section symbol. It would be
4243 proper to subtract the symbol's value,
4244 osec->vma, from the emitted reloc addend,
4245 but ld.so expects buggy relocs. */
4246 osec = sec->output_section;
4247 sindx = elf_section_data (osec)->dynindx;
4248 if (sindx == 0)
4249 {
4250 asection *oi = htab->elf.text_index_section;
4251 sindx = elf_section_data (oi)->dynindx;
4252 }
4253 BFD_ASSERT (sindx != 0);
4254 }
4255
4256 outrel.r_info = htab->r_info (sindx, r_type);
4257 outrel.r_addend = relocation + rel->r_addend;
4258 }
4259 }
4260
4261 sreloc = elf_section_data (input_section)->sreloc;
4262
4263 if (sreloc == NULL || sreloc->contents == NULL)
4264 {
4265 r = bfd_reloc_notsupported;
4266 goto check_relocation_error;
4267 }
4268
4269 elf_append_rela (output_bfd, sreloc, &outrel);
4270
4271 /* If this reloc is against an external symbol, we do
4272 not want to fiddle with the addend. Otherwise, we
4273 need to include the symbol value so that it becomes
4274 an addend for the dynamic reloc. */
4275 if (! relocate)
4276 continue;
4277 }
4278
4279 break;
4280
4281 case R_X86_64_TLSGD:
4282 case R_X86_64_GOTPC32_TLSDESC:
4283 case R_X86_64_TLSDESC_CALL:
4284 case R_X86_64_GOTTPOFF:
4285 tls_type = GOT_UNKNOWN;
4286 if (h == NULL && local_got_offsets)
4287 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4288 else if (h != NULL)
4289 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4290
4291 if (! elf_x86_64_tls_transition (info, input_bfd,
4292 input_section, contents,
4293 symtab_hdr, sym_hashes,
4294 &r_type, tls_type, rel,
4295 relend, h, r_symndx))
4296 return FALSE;
4297
4298 if (r_type == R_X86_64_TPOFF32)
4299 {
4300 bfd_vma roff = rel->r_offset;
4301
4302 BFD_ASSERT (! unresolved_reloc);
4303
4304 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4305 {
4306 /* GD->LE transition. For 64bit, change
4307 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4308 .word 0x6666; rex64; call __tls_get_addr
4309 into:
4310 movq %fs:0, %rax
4311 leaq foo@tpoff(%rax), %rax
4312 For 32bit, change
4313 leaq foo@tlsgd(%rip), %rdi
4314 .word 0x6666; rex64; call __tls_get_addr
4315 into:
4316 movl %fs:0, %eax
4317 leaq foo@tpoff(%rax), %rax
4318 For largepic, change:
4319 leaq foo@tlsgd(%rip), %rdi
4320 movabsq $__tls_get_addr@pltoff, %rax
4321 addq %rbx, %rax
4322 call *%rax
4323 into:
4324 movq %fs:0, %rax
4325 leaq foo@tpoff(%rax), %rax
4326 nopw 0x0(%rax,%rax,1) */
4327 int largepic = 0;
4328 if (ABI_64_P (output_bfd)
4329 && contents[roff + 5] == (bfd_byte) '\xb8')
4330 {
4331 memcpy (contents + roff - 3,
4332 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4333 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4334 largepic = 1;
4335 }
4336 else if (ABI_64_P (output_bfd))
4337 memcpy (contents + roff - 4,
4338 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4339 16);
4340 else
4341 memcpy (contents + roff - 3,
4342 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4343 15);
4344 bfd_put_32 (output_bfd,
4345 elf_x86_64_tpoff (info, relocation),
4346 contents + roff + 8 + largepic);
4347 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4348 rel++;
4349 continue;
4350 }
4351 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4352 {
4353 /* GDesc -> LE transition.
4354 It's originally something like:
4355 leaq x@tlsdesc(%rip), %rax
4356
4357 Change it to:
4358 movl $x@tpoff, %rax. */
4359
4360 unsigned int val, type;
4361
4362 type = bfd_get_8 (input_bfd, contents + roff - 3);
4363 val = bfd_get_8 (input_bfd, contents + roff - 1);
4364 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4365 contents + roff - 3);
4366 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4367 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4368 contents + roff - 1);
4369 bfd_put_32 (output_bfd,
4370 elf_x86_64_tpoff (info, relocation),
4371 contents + roff);
4372 continue;
4373 }
4374 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4375 {
4376 /* GDesc -> LE transition.
4377 It's originally:
4378 call *(%rax)
4379 Turn it into:
4380 xchg %ax,%ax. */
4381 bfd_put_8 (output_bfd, 0x66, contents + roff);
4382 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4383 continue;
4384 }
4385 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4386 {
4387 /* IE->LE transition:
4388 For 64bit, originally it can be one of:
4389 movq foo@gottpoff(%rip), %reg
4390 addq foo@gottpoff(%rip), %reg
4391 We change it into:
4392 movq $foo, %reg
4393 leaq foo(%reg), %reg
4394 addq $foo, %reg.
4395 For 32bit, originally it can be one of:
4396 movq foo@gottpoff(%rip), %reg
4397 addl foo@gottpoff(%rip), %reg
4398 We change it into:
4399 movq $foo, %reg
4400 leal foo(%reg), %reg
4401 addl $foo, %reg. */
4402
4403 unsigned int val, type, reg;
4404
4405 if (roff >= 3)
4406 val = bfd_get_8 (input_bfd, contents + roff - 3);
4407 else
4408 val = 0;
4409 type = bfd_get_8 (input_bfd, contents + roff - 2);
4410 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4411 reg >>= 3;
4412 if (type == 0x8b)
4413 {
4414 /* movq */
4415 if (val == 0x4c)
4416 bfd_put_8 (output_bfd, 0x49,
4417 contents + roff - 3);
4418 else if (!ABI_64_P (output_bfd) && val == 0x44)
4419 bfd_put_8 (output_bfd, 0x41,
4420 contents + roff - 3);
4421 bfd_put_8 (output_bfd, 0xc7,
4422 contents + roff - 2);
4423 bfd_put_8 (output_bfd, 0xc0 | reg,
4424 contents + roff - 1);
4425 }
4426 else if (reg == 4)
4427 {
4428 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4429 is special */
4430 if (val == 0x4c)
4431 bfd_put_8 (output_bfd, 0x49,
4432 contents + roff - 3);
4433 else if (!ABI_64_P (output_bfd) && val == 0x44)
4434 bfd_put_8 (output_bfd, 0x41,
4435 contents + roff - 3);
4436 bfd_put_8 (output_bfd, 0x81,
4437 contents + roff - 2);
4438 bfd_put_8 (output_bfd, 0xc0 | reg,
4439 contents + roff - 1);
4440 }
4441 else
4442 {
4443 /* addq/addl -> leaq/leal */
4444 if (val == 0x4c)
4445 bfd_put_8 (output_bfd, 0x4d,
4446 contents + roff - 3);
4447 else if (!ABI_64_P (output_bfd) && val == 0x44)
4448 bfd_put_8 (output_bfd, 0x45,
4449 contents + roff - 3);
4450 bfd_put_8 (output_bfd, 0x8d,
4451 contents + roff - 2);
4452 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4453 contents + roff - 1);
4454 }
4455 bfd_put_32 (output_bfd,
4456 elf_x86_64_tpoff (info, relocation),
4457 contents + roff);
4458 continue;
4459 }
4460 else
4461 BFD_ASSERT (FALSE);
4462 }
4463
4464 if (htab->elf.sgot == NULL)
4465 abort ();
4466
4467 if (h != NULL)
4468 {
4469 off = h->got.offset;
4470 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4471 }
4472 else
4473 {
4474 if (local_got_offsets == NULL)
4475 abort ();
4476
4477 off = local_got_offsets[r_symndx];
4478 offplt = local_tlsdesc_gotents[r_symndx];
4479 }
4480
4481 if ((off & 1) != 0)
4482 off &= ~1;
4483 else
4484 {
4485 Elf_Internal_Rela outrel;
4486 int dr_type, indx;
4487 asection *sreloc;
4488
4489 if (htab->elf.srelgot == NULL)
4490 abort ();
4491
4492 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4493
4494 if (GOT_TLS_GDESC_P (tls_type))
4495 {
4496 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4497 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4498 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4499 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4500 + htab->elf.sgotplt->output_offset
4501 + offplt
4502 + htab->sgotplt_jump_table_size);
4503 sreloc = htab->elf.srelplt;
4504 if (indx == 0)
4505 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4506 else
4507 outrel.r_addend = 0;
4508 elf_append_rela (output_bfd, sreloc, &outrel);
4509 }
4510
4511 sreloc = htab->elf.srelgot;
4512
4513 outrel.r_offset = (htab->elf.sgot->output_section->vma
4514 + htab->elf.sgot->output_offset + off);
4515
4516 if (GOT_TLS_GD_P (tls_type))
4517 dr_type = R_X86_64_DTPMOD64;
4518 else if (GOT_TLS_GDESC_P (tls_type))
4519 goto dr_done;
4520 else
4521 dr_type = R_X86_64_TPOFF64;
4522
4523 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4524 outrel.r_addend = 0;
4525 if ((dr_type == R_X86_64_TPOFF64
4526 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4527 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4528 outrel.r_info = htab->r_info (indx, dr_type);
4529
4530 elf_append_rela (output_bfd, sreloc, &outrel);
4531
4532 if (GOT_TLS_GD_P (tls_type))
4533 {
4534 if (indx == 0)
4535 {
4536 BFD_ASSERT (! unresolved_reloc);
4537 bfd_put_64 (output_bfd,
4538 relocation - elf_x86_64_dtpoff_base (info),
4539 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4540 }
4541 else
4542 {
4543 bfd_put_64 (output_bfd, 0,
4544 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4545 outrel.r_info = htab->r_info (indx,
4546 R_X86_64_DTPOFF64);
4547 outrel.r_offset += GOT_ENTRY_SIZE;
4548 elf_append_rela (output_bfd, sreloc,
4549 &outrel);
4550 }
4551 }
4552
4553 dr_done:
4554 if (h != NULL)
4555 h->got.offset |= 1;
4556 else
4557 local_got_offsets[r_symndx] |= 1;
4558 }
4559
4560 if (off >= (bfd_vma) -2
4561 && ! GOT_TLS_GDESC_P (tls_type))
4562 abort ();
4563 if (r_type == ELF32_R_TYPE (rel->r_info))
4564 {
4565 if (r_type == R_X86_64_GOTPC32_TLSDESC
4566 || r_type == R_X86_64_TLSDESC_CALL)
4567 relocation = htab->elf.sgotplt->output_section->vma
4568 + htab->elf.sgotplt->output_offset
4569 + offplt + htab->sgotplt_jump_table_size;
4570 else
4571 relocation = htab->elf.sgot->output_section->vma
4572 + htab->elf.sgot->output_offset + off;
4573 unresolved_reloc = FALSE;
4574 }
4575 else
4576 {
4577 bfd_vma roff = rel->r_offset;
4578
4579 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4580 {
4581 /* GD->IE transition. For 64bit, change
4582 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4583 .word 0x6666; rex64; call __tls_get_addr@plt
4584 into:
4585 movq %fs:0, %rax
4586 addq foo@gottpoff(%rip), %rax
4587 For 32bit, change
4588 leaq foo@tlsgd(%rip), %rdi
4589 .word 0x6666; rex64; call __tls_get_addr@plt
4590 into:
4591 movl %fs:0, %eax
4592 addq foo@gottpoff(%rip), %rax
4593 For largepic, change:
4594 leaq foo@tlsgd(%rip), %rdi
4595 movabsq $__tls_get_addr@pltoff, %rax
4596 addq %rbx, %rax
4597 call *%rax
4598 into:
4599 movq %fs:0, %rax
4600 addq foo@gottpoff(%rax), %rax
4601 nopw 0x0(%rax,%rax,1) */
4602 int largepic = 0;
4603 if (ABI_64_P (output_bfd)
4604 && contents[roff + 5] == (bfd_byte) '\xb8')
4605 {
4606 memcpy (contents + roff - 3,
4607 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4608 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4609 largepic = 1;
4610 }
4611 else if (ABI_64_P (output_bfd))
4612 memcpy (contents + roff - 4,
4613 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4614 16);
4615 else
4616 memcpy (contents + roff - 3,
4617 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4618 15);
4619
4620 relocation = (htab->elf.sgot->output_section->vma
4621 + htab->elf.sgot->output_offset + off
4622 - roff
4623 - largepic
4624 - input_section->output_section->vma
4625 - input_section->output_offset
4626 - 12);
4627 bfd_put_32 (output_bfd, relocation,
4628 contents + roff + 8 + largepic);
4629 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4630 rel++;
4631 continue;
4632 }
4633 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4634 {
4635 /* GDesc -> IE transition.
4636 It's originally something like:
4637 leaq x@tlsdesc(%rip), %rax
4638
4639 Change it to:
4640 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4641
4642 /* Now modify the instruction as appropriate. To
4643 turn a leaq into a movq in the form we use it, it
4644 suffices to change the second byte from 0x8d to
4645 0x8b. */
4646 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4647
4648 bfd_put_32 (output_bfd,
4649 htab->elf.sgot->output_section->vma
4650 + htab->elf.sgot->output_offset + off
4651 - rel->r_offset
4652 - input_section->output_section->vma
4653 - input_section->output_offset
4654 - 4,
4655 contents + roff);
4656 continue;
4657 }
4658 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4659 {
4660 /* GDesc -> IE transition.
4661 It's originally:
4662 call *(%rax)
4663
4664 Change it to:
4665 xchg %ax, %ax. */
4666
4667 bfd_put_8 (output_bfd, 0x66, contents + roff);
4668 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4669 continue;
4670 }
4671 else
4672 BFD_ASSERT (FALSE);
4673 }
4674 break;
4675
4676 case R_X86_64_TLSLD:
4677 if (! elf_x86_64_tls_transition (info, input_bfd,
4678 input_section, contents,
4679 symtab_hdr, sym_hashes,
4680 &r_type, GOT_UNKNOWN,
4681 rel, relend, h, r_symndx))
4682 return FALSE;
4683
4684 if (r_type != R_X86_64_TLSLD)
4685 {
4686 /* LD->LE transition:
4687 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4688 For 64bit, we change it into:
4689 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4690 For 32bit, we change it into:
4691 nopl 0x0(%rax); movl %fs:0, %eax.
4692 For largepic, change:
4693 leaq foo@tlsgd(%rip), %rdi
4694 movabsq $__tls_get_addr@pltoff, %rax
4695 addq %rbx, %rax
4696 call *%rax
4697 into:
4698 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4699 movq %fs:0, %eax */
4700
4701 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4702 if (ABI_64_P (output_bfd)
4703 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4704 memcpy (contents + rel->r_offset - 3,
4705 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4706 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4707 else if (ABI_64_P (output_bfd))
4708 memcpy (contents + rel->r_offset - 3,
4709 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4710 else
4711 memcpy (contents + rel->r_offset - 3,
4712 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4713 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4714 rel++;
4715 continue;
4716 }
4717
4718 if (htab->elf.sgot == NULL)
4719 abort ();
4720
4721 off = htab->tls_ld_got.offset;
4722 if (off & 1)
4723 off &= ~1;
4724 else
4725 {
4726 Elf_Internal_Rela outrel;
4727
4728 if (htab->elf.srelgot == NULL)
4729 abort ();
4730
4731 outrel.r_offset = (htab->elf.sgot->output_section->vma
4732 + htab->elf.sgot->output_offset + off);
4733
4734 bfd_put_64 (output_bfd, 0,
4735 htab->elf.sgot->contents + off);
4736 bfd_put_64 (output_bfd, 0,
4737 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4738 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4739 outrel.r_addend = 0;
4740 elf_append_rela (output_bfd, htab->elf.srelgot,
4741 &outrel);
4742 htab->tls_ld_got.offset |= 1;
4743 }
4744 relocation = htab->elf.sgot->output_section->vma
4745 + htab->elf.sgot->output_offset + off;
4746 unresolved_reloc = FALSE;
4747 break;
4748
4749 case R_X86_64_DTPOFF32:
4750 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4751 relocation -= elf_x86_64_dtpoff_base (info);
4752 else
4753 relocation = elf_x86_64_tpoff (info, relocation);
4754 break;
4755
4756 case R_X86_64_TPOFF32:
4757 case R_X86_64_TPOFF64:
4758 BFD_ASSERT (info->executable);
4759 relocation = elf_x86_64_tpoff (info, relocation);
4760 break;
4761
4762 case R_X86_64_DTPOFF64:
4763 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4764 relocation -= elf_x86_64_dtpoff_base (info);
4765 break;
4766
4767 default:
4768 break;
4769 }
4770
4771 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4772 because such sections are not SEC_ALLOC and thus ld.so will
4773 not process them. */
4774 if (unresolved_reloc
4775 && !((input_section->flags & SEC_DEBUGGING) != 0
4776 && h->def_dynamic)
4777 && _bfd_elf_section_offset (output_bfd, info, input_section,
4778 rel->r_offset) != (bfd_vma) -1)
4779 {
4780 (*_bfd_error_handler)
4781 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4782 input_bfd,
4783 input_section,
4784 (long) rel->r_offset,
4785 howto->name,
4786 h->root.root.string);
4787 return FALSE;
4788 }
4789
4790 do_relocation:
4791 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4792 contents, rel->r_offset,
4793 relocation, rel->r_addend);
4794
4795 check_relocation_error:
4796 if (r != bfd_reloc_ok)
4797 {
4798 const char *name;
4799
4800 if (h != NULL)
4801 name = h->root.root.string;
4802 else
4803 {
4804 name = bfd_elf_string_from_elf_section (input_bfd,
4805 symtab_hdr->sh_link,
4806 sym->st_name);
4807 if (name == NULL)
4808 return FALSE;
4809 if (*name == '\0')
4810 name = bfd_section_name (input_bfd, sec);
4811 }
4812
4813 if (r == bfd_reloc_overflow)
4814 {
4815 if (! ((*info->callbacks->reloc_overflow)
4816 (info, (h ? &h->root : NULL), name, howto->name,
4817 (bfd_vma) 0, input_bfd, input_section,
4818 rel->r_offset)))
4819 return FALSE;
4820 }
4821 else
4822 {
4823 (*_bfd_error_handler)
4824 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
4825 input_bfd, input_section,
4826 (long) rel->r_offset, name, (int) r);
4827 return FALSE;
4828 }
4829 }
4830 }
4831
4832 return TRUE;
4833 }
4834
4835 /* Finish up dynamic symbol handling. We set the contents of various
4836 dynamic sections here. */
4837
4838 static bfd_boolean
4839 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4840 struct bfd_link_info *info,
4841 struct elf_link_hash_entry *h,
4842 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
4843 {
4844 struct elf_x86_64_link_hash_table *htab;
4845 const struct elf_x86_64_backend_data *abed;
4846 bfd_boolean use_plt_bnd;
4847 struct elf_x86_64_link_hash_entry *eh;
4848
4849 htab = elf_x86_64_hash_table (info);
4850 if (htab == NULL)
4851 return FALSE;
4852
4853 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
4854 section only if there is .plt section. */
4855 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
4856 abed = (use_plt_bnd
4857 ? &elf_x86_64_bnd_arch_bed
4858 : get_elf_x86_64_backend_data (output_bfd));
4859
4860 eh = (struct elf_x86_64_link_hash_entry *) h;
4861
4862 if (h->plt.offset != (bfd_vma) -1)
4863 {
4864 bfd_vma plt_index;
4865 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
4866 bfd_vma plt_plt_insn_end, plt_got_insn_size;
4867 Elf_Internal_Rela rela;
4868 bfd_byte *loc;
4869 asection *plt, *gotplt, *relplt, *resolved_plt;
4870 const struct elf_backend_data *bed;
4871 bfd_vma plt_got_pcrel_offset;
4872
4873 /* When building a static executable, use .iplt, .igot.plt and
4874 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4875 if (htab->elf.splt != NULL)
4876 {
4877 plt = htab->elf.splt;
4878 gotplt = htab->elf.sgotplt;
4879 relplt = htab->elf.srelplt;
4880 }
4881 else
4882 {
4883 plt = htab->elf.iplt;
4884 gotplt = htab->elf.igotplt;
4885 relplt = htab->elf.irelplt;
4886 }
4887
4888 /* This symbol has an entry in the procedure linkage table. Set
4889 it up. */
4890 if ((h->dynindx == -1
4891 && !((h->forced_local || info->executable)
4892 && h->def_regular
4893 && h->type == STT_GNU_IFUNC))
4894 || plt == NULL
4895 || gotplt == NULL
4896 || relplt == NULL)
4897 abort ();
4898
4899 /* Get the index in the procedure linkage table which
4900 corresponds to this symbol. This is the index of this symbol
4901 in all the symbols for which we are making plt entries. The
4902 first entry in the procedure linkage table is reserved.
4903
4904 Get the offset into the .got table of the entry that
4905 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4906 bytes. The first three are reserved for the dynamic linker.
4907
4908 For static executables, we don't reserve anything. */
4909
4910 if (plt == htab->elf.splt)
4911 {
4912 got_offset = h->plt.offset / abed->plt_entry_size - 1;
4913 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4914 }
4915 else
4916 {
4917 got_offset = h->plt.offset / abed->plt_entry_size;
4918 got_offset = got_offset * GOT_ENTRY_SIZE;
4919 }
4920
4921 plt_plt_insn_end = abed->plt_plt_insn_end;
4922 plt_plt_offset = abed->plt_plt_offset;
4923 plt_got_insn_size = abed->plt_got_insn_size;
4924 plt_got_offset = abed->plt_got_offset;
4925 if (use_plt_bnd)
4926 {
4927 /* Use the second PLT with BND relocations. */
4928 const bfd_byte *plt_entry, *plt2_entry;
4929
4930 if (eh->has_bnd_reloc)
4931 {
4932 plt_entry = elf_x86_64_bnd_plt_entry;
4933 plt2_entry = elf_x86_64_bnd_plt2_entry;
4934 }
4935 else
4936 {
4937 plt_entry = elf_x86_64_legacy_plt_entry;
4938 plt2_entry = elf_x86_64_legacy_plt2_entry;
4939
4940 /* Subtract 1 since there is no BND prefix. */
4941 plt_plt_insn_end -= 1;
4942 plt_plt_offset -= 1;
4943 plt_got_insn_size -= 1;
4944 plt_got_offset -= 1;
4945 }
4946
4947 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
4948 == sizeof (elf_x86_64_legacy_plt_entry));
4949
4950 /* Fill in the entry in the procedure linkage table. */
4951 memcpy (plt->contents + h->plt.offset,
4952 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
4953 /* Fill in the entry in the second PLT. */
4954 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
4955 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
4956
4957 resolved_plt = htab->plt_bnd;
4958 plt_offset = eh->plt_bnd.offset;
4959 }
4960 else
4961 {
4962 /* Fill in the entry in the procedure linkage table. */
4963 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
4964 abed->plt_entry_size);
4965
4966 resolved_plt = plt;
4967 plt_offset = h->plt.offset;
4968 }
4969
4970 /* Insert the relocation positions of the plt section. */
4971
4972 /* Put offset the PC-relative instruction referring to the GOT entry,
4973 subtracting the size of that instruction. */
4974 plt_got_pcrel_offset = (gotplt->output_section->vma
4975 + gotplt->output_offset
4976 + got_offset
4977 - resolved_plt->output_section->vma
4978 - resolved_plt->output_offset
4979 - plt_offset
4980 - plt_got_insn_size);
4981
4982 /* Check PC-relative offset overflow in PLT entry. */
4983 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4984 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
4985 output_bfd, h->root.root.string);
4986
4987 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4988 resolved_plt->contents + plt_offset + plt_got_offset);
4989
4990 /* Fill in the entry in the global offset table, initially this
4991 points to the second part of the PLT entry. */
4992 bfd_put_64 (output_bfd, (plt->output_section->vma
4993 + plt->output_offset
4994 + h->plt.offset + abed->plt_lazy_offset),
4995 gotplt->contents + got_offset);
4996
4997 /* Fill in the entry in the .rela.plt section. */
4998 rela.r_offset = (gotplt->output_section->vma
4999 + gotplt->output_offset
5000 + got_offset);
5001 if (h->dynindx == -1
5002 || ((info->executable
5003 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5004 && h->def_regular
5005 && h->type == STT_GNU_IFUNC))
5006 {
5007 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5008 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5009 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5010 rela.r_addend = (h->root.u.def.value
5011 + h->root.u.def.section->output_section->vma
5012 + h->root.u.def.section->output_offset);
5013 /* R_X86_64_IRELATIVE comes last. */
5014 plt_index = htab->next_irelative_index--;
5015 }
5016 else
5017 {
5018 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5019 rela.r_addend = 0;
5020 plt_index = htab->next_jump_slot_index++;
5021 }
5022
5023 /* Don't fill PLT entry for static executables. */
5024 if (plt == htab->elf.splt)
5025 {
5026 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5027
5028 /* Put relocation index. */
5029 bfd_put_32 (output_bfd, plt_index,
5030 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5031
5032 /* Put offset for jmp .PLT0 and check for overflow. We don't
5033 check relocation index for overflow since branch displacement
5034 will overflow first. */
5035 if (plt0_offset > 0x80000000)
5036 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5037 output_bfd, h->root.root.string);
5038 bfd_put_32 (output_bfd, - plt0_offset,
5039 plt->contents + h->plt.offset + plt_plt_offset);
5040 }
5041
5042 bed = get_elf_backend_data (output_bfd);
5043 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5044 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5045 }
5046 else if (eh->plt_got.offset != (bfd_vma) -1)
5047 {
5048 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5049 asection *plt, *got;
5050 bfd_boolean got_after_plt;
5051 int32_t got_pcrel_offset;
5052 const bfd_byte *got_plt_entry;
5053
5054 /* Set the entry in the GOT procedure linkage table. */
5055 plt = htab->plt_got;
5056 got = htab->elf.sgot;
5057 got_offset = h->got.offset;
5058
5059 if (got_offset == (bfd_vma) -1
5060 || h->type == STT_GNU_IFUNC
5061 || plt == NULL
5062 || got == NULL)
5063 abort ();
5064
5065 /* Use the second PLT entry template for the GOT PLT since they
5066 are the identical. */
5067 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5068 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5069 if (eh->has_bnd_reloc)
5070 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5071 else
5072 {
5073 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5074
5075 /* Subtract 1 since there is no BND prefix. */
5076 plt_got_insn_size -= 1;
5077 plt_got_offset -= 1;
5078 }
5079
5080 /* Fill in the entry in the GOT procedure linkage table. */
5081 plt_offset = eh->plt_got.offset;
5082 memcpy (plt->contents + plt_offset,
5083 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5084
5085 /* Put offset the PC-relative instruction referring to the GOT
5086 entry, subtracting the size of that instruction. */
5087 got_pcrel_offset = (got->output_section->vma
5088 + got->output_offset
5089 + got_offset
5090 - plt->output_section->vma
5091 - plt->output_offset
5092 - plt_offset
5093 - plt_got_insn_size);
5094
5095 /* Check PC-relative offset overflow in GOT PLT entry. */
5096 got_after_plt = got->output_section->vma > plt->output_section->vma;
5097 if ((got_after_plt && got_pcrel_offset < 0)
5098 || (!got_after_plt && got_pcrel_offset > 0))
5099 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5100 output_bfd, h->root.root.string);
5101
5102 bfd_put_32 (output_bfd, got_pcrel_offset,
5103 plt->contents + plt_offset + plt_got_offset);
5104 }
5105
5106 if (!h->def_regular
5107 && (h->plt.offset != (bfd_vma) -1
5108 || eh->plt_got.offset != (bfd_vma) -1))
5109 {
5110 /* Mark the symbol as undefined, rather than as defined in
5111 the .plt section. Leave the value if there were any
5112 relocations where pointer equality matters (this is a clue
5113 for the dynamic linker, to make function pointer
5114 comparisons work between an application and shared
5115 library), otherwise set it to zero. If a function is only
5116 called from a binary, there is no need to slow down
5117 shared libraries because of that. */
5118 sym->st_shndx = SHN_UNDEF;
5119 if (!h->pointer_equality_needed)
5120 sym->st_value = 0;
5121 }
5122
5123 if (h->got.offset != (bfd_vma) -1
5124 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5125 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5126 {
5127 Elf_Internal_Rela rela;
5128
5129 /* This symbol has an entry in the global offset table. Set it
5130 up. */
5131 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5132 abort ();
5133
5134 rela.r_offset = (htab->elf.sgot->output_section->vma
5135 + htab->elf.sgot->output_offset
5136 + (h->got.offset &~ (bfd_vma) 1));
5137
5138 /* If this is a static link, or it is a -Bsymbolic link and the
5139 symbol is defined locally or was forced to be local because
5140 of a version file, we just want to emit a RELATIVE reloc.
5141 The entry in the global offset table will already have been
5142 initialized in the relocate_section function. */
5143 if (h->def_regular
5144 && h->type == STT_GNU_IFUNC)
5145 {
5146 if (info->shared)
5147 {
5148 /* Generate R_X86_64_GLOB_DAT. */
5149 goto do_glob_dat;
5150 }
5151 else
5152 {
5153 asection *plt;
5154
5155 if (!h->pointer_equality_needed)
5156 abort ();
5157
5158 /* For non-shared object, we can't use .got.plt, which
5159 contains the real function addres if we need pointer
5160 equality. We load the GOT entry with the PLT entry. */
5161 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5162 bfd_put_64 (output_bfd, (plt->output_section->vma
5163 + plt->output_offset
5164 + h->plt.offset),
5165 htab->elf.sgot->contents + h->got.offset);
5166 return TRUE;
5167 }
5168 }
5169 else if (info->shared
5170 && SYMBOL_REFERENCES_LOCAL (info, h))
5171 {
5172 if (!h->def_regular)
5173 return FALSE;
5174 BFD_ASSERT((h->got.offset & 1) != 0);
5175 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5176 rela.r_addend = (h->root.u.def.value
5177 + h->root.u.def.section->output_section->vma
5178 + h->root.u.def.section->output_offset);
5179 }
5180 else
5181 {
5182 BFD_ASSERT((h->got.offset & 1) == 0);
5183 do_glob_dat:
5184 bfd_put_64 (output_bfd, (bfd_vma) 0,
5185 htab->elf.sgot->contents + h->got.offset);
5186 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5187 rela.r_addend = 0;
5188 }
5189
5190 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5191 }
5192
5193 if (h->needs_copy)
5194 {
5195 Elf_Internal_Rela rela;
5196
5197 /* This symbol needs a copy reloc. Set it up. */
5198
5199 if (h->dynindx == -1
5200 || (h->root.type != bfd_link_hash_defined
5201 && h->root.type != bfd_link_hash_defweak)
5202 || htab->srelbss == NULL)
5203 abort ();
5204
5205 rela.r_offset = (h->root.u.def.value
5206 + h->root.u.def.section->output_section->vma
5207 + h->root.u.def.section->output_offset);
5208 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5209 rela.r_addend = 0;
5210 elf_append_rela (output_bfd, htab->srelbss, &rela);
5211 }
5212
5213 return TRUE;
5214 }
5215
5216 /* Finish up local dynamic symbol handling. We set the contents of
5217 various dynamic sections here. */
5218
5219 static bfd_boolean
5220 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5221 {
5222 struct elf_link_hash_entry *h
5223 = (struct elf_link_hash_entry *) *slot;
5224 struct bfd_link_info *info
5225 = (struct bfd_link_info *) inf;
5226
5227 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5228 info, h, NULL);
5229 }
5230
5231 /* Used to decide how to sort relocs in an optimal manner for the
5232 dynamic linker, before writing them out. */
5233
5234 static enum elf_reloc_type_class
5235 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5236 const asection *rel_sec ATTRIBUTE_UNUSED,
5237 const Elf_Internal_Rela *rela)
5238 {
5239 switch ((int) ELF32_R_TYPE (rela->r_info))
5240 {
5241 case R_X86_64_RELATIVE:
5242 case R_X86_64_RELATIVE64:
5243 return reloc_class_relative;
5244 case R_X86_64_JUMP_SLOT:
5245 return reloc_class_plt;
5246 case R_X86_64_COPY:
5247 return reloc_class_copy;
5248 default:
5249 return reloc_class_normal;
5250 }
5251 }
5252
5253 /* Finish up the dynamic sections. */
5254
5255 static bfd_boolean
5256 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5257 struct bfd_link_info *info)
5258 {
5259 struct elf_x86_64_link_hash_table *htab;
5260 bfd *dynobj;
5261 asection *sdyn;
5262 const struct elf_x86_64_backend_data *abed;
5263
5264 htab = elf_x86_64_hash_table (info);
5265 if (htab == NULL)
5266 return FALSE;
5267
5268 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5269 section only if there is .plt section. */
5270 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5271 ? &elf_x86_64_bnd_arch_bed
5272 : get_elf_x86_64_backend_data (output_bfd));
5273
5274 dynobj = htab->elf.dynobj;
5275 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5276
5277 if (htab->elf.dynamic_sections_created)
5278 {
5279 bfd_byte *dyncon, *dynconend;
5280 const struct elf_backend_data *bed;
5281 bfd_size_type sizeof_dyn;
5282
5283 if (sdyn == NULL || htab->elf.sgot == NULL)
5284 abort ();
5285
5286 bed = get_elf_backend_data (dynobj);
5287 sizeof_dyn = bed->s->sizeof_dyn;
5288 dyncon = sdyn->contents;
5289 dynconend = sdyn->contents + sdyn->size;
5290 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5291 {
5292 Elf_Internal_Dyn dyn;
5293 asection *s;
5294
5295 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5296
5297 switch (dyn.d_tag)
5298 {
5299 default:
5300 continue;
5301
5302 case DT_PLTGOT:
5303 s = htab->elf.sgotplt;
5304 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5305 break;
5306
5307 case DT_JMPREL:
5308 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5309 break;
5310
5311 case DT_PLTRELSZ:
5312 s = htab->elf.srelplt->output_section;
5313 dyn.d_un.d_val = s->size;
5314 break;
5315
5316 case DT_RELASZ:
5317 /* The procedure linkage table relocs (DT_JMPREL) should
5318 not be included in the overall relocs (DT_RELA).
5319 Therefore, we override the DT_RELASZ entry here to
5320 make it not include the JMPREL relocs. Since the
5321 linker script arranges for .rela.plt to follow all
5322 other relocation sections, we don't have to worry
5323 about changing the DT_RELA entry. */
5324 if (htab->elf.srelplt != NULL)
5325 {
5326 s = htab->elf.srelplt->output_section;
5327 dyn.d_un.d_val -= s->size;
5328 }
5329 break;
5330
5331 case DT_TLSDESC_PLT:
5332 s = htab->elf.splt;
5333 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5334 + htab->tlsdesc_plt;
5335 break;
5336
5337 case DT_TLSDESC_GOT:
5338 s = htab->elf.sgot;
5339 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5340 + htab->tlsdesc_got;
5341 break;
5342 }
5343
5344 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5345 }
5346
5347 /* Fill in the special first entry in the procedure linkage table. */
5348 if (htab->elf.splt && htab->elf.splt->size > 0)
5349 {
5350 /* Fill in the first entry in the procedure linkage table. */
5351 memcpy (htab->elf.splt->contents,
5352 abed->plt0_entry, abed->plt_entry_size);
5353 /* Add offset for pushq GOT+8(%rip), since the instruction
5354 uses 6 bytes subtract this value. */
5355 bfd_put_32 (output_bfd,
5356 (htab->elf.sgotplt->output_section->vma
5357 + htab->elf.sgotplt->output_offset
5358 + 8
5359 - htab->elf.splt->output_section->vma
5360 - htab->elf.splt->output_offset
5361 - 6),
5362 htab->elf.splt->contents + abed->plt0_got1_offset);
5363 /* Add offset for the PC-relative instruction accessing GOT+16,
5364 subtracting the offset to the end of that instruction. */
5365 bfd_put_32 (output_bfd,
5366 (htab->elf.sgotplt->output_section->vma
5367 + htab->elf.sgotplt->output_offset
5368 + 16
5369 - htab->elf.splt->output_section->vma
5370 - htab->elf.splt->output_offset
5371 - abed->plt0_got2_insn_end),
5372 htab->elf.splt->contents + abed->plt0_got2_offset);
5373
5374 elf_section_data (htab->elf.splt->output_section)
5375 ->this_hdr.sh_entsize = abed->plt_entry_size;
5376
5377 if (htab->tlsdesc_plt)
5378 {
5379 bfd_put_64 (output_bfd, (bfd_vma) 0,
5380 htab->elf.sgot->contents + htab->tlsdesc_got);
5381
5382 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5383 abed->plt0_entry, abed->plt_entry_size);
5384
5385 /* Add offset for pushq GOT+8(%rip), since the
5386 instruction uses 6 bytes subtract this value. */
5387 bfd_put_32 (output_bfd,
5388 (htab->elf.sgotplt->output_section->vma
5389 + htab->elf.sgotplt->output_offset
5390 + 8
5391 - htab->elf.splt->output_section->vma
5392 - htab->elf.splt->output_offset
5393 - htab->tlsdesc_plt
5394 - 6),
5395 htab->elf.splt->contents
5396 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5397 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5398 where TGD stands for htab->tlsdesc_got, subtracting the offset
5399 to the end of that instruction. */
5400 bfd_put_32 (output_bfd,
5401 (htab->elf.sgot->output_section->vma
5402 + htab->elf.sgot->output_offset
5403 + htab->tlsdesc_got
5404 - htab->elf.splt->output_section->vma
5405 - htab->elf.splt->output_offset
5406 - htab->tlsdesc_plt
5407 - abed->plt0_got2_insn_end),
5408 htab->elf.splt->contents
5409 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5410 }
5411 }
5412 }
5413
5414 if (htab->plt_bnd != NULL)
5415 elf_section_data (htab->plt_bnd->output_section)
5416 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5417
5418 if (htab->elf.sgotplt)
5419 {
5420 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5421 {
5422 (*_bfd_error_handler)
5423 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5424 return FALSE;
5425 }
5426
5427 /* Fill in the first three entries in the global offset table. */
5428 if (htab->elf.sgotplt->size > 0)
5429 {
5430 /* Set the first entry in the global offset table to the address of
5431 the dynamic section. */
5432 if (sdyn == NULL)
5433 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5434 else
5435 bfd_put_64 (output_bfd,
5436 sdyn->output_section->vma + sdyn->output_offset,
5437 htab->elf.sgotplt->contents);
5438 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5439 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5440 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5441 }
5442
5443 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5444 GOT_ENTRY_SIZE;
5445 }
5446
5447 /* Adjust .eh_frame for .plt section. */
5448 if (htab->plt_eh_frame != NULL
5449 && htab->plt_eh_frame->contents != NULL)
5450 {
5451 if (htab->elf.splt != NULL
5452 && htab->elf.splt->size != 0
5453 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5454 && htab->elf.splt->output_section != NULL
5455 && htab->plt_eh_frame->output_section != NULL)
5456 {
5457 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5458 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5459 + htab->plt_eh_frame->output_offset
5460 + PLT_FDE_START_OFFSET;
5461 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5462 htab->plt_eh_frame->contents
5463 + PLT_FDE_START_OFFSET);
5464 }
5465 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5466 {
5467 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5468 htab->plt_eh_frame,
5469 htab->plt_eh_frame->contents))
5470 return FALSE;
5471 }
5472 }
5473
5474 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5475 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5476 = GOT_ENTRY_SIZE;
5477
5478 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5479 htab_traverse (htab->loc_hash_table,
5480 elf_x86_64_finish_local_dynamic_symbol,
5481 info);
5482
5483 return TRUE;
5484 }
5485
5486 /* Return address in section PLT for the Ith GOTPLT relocation, for
5487 relocation REL or (bfd_vma) -1 if it should not be included. */
5488
5489 static bfd_vma
5490 elf_x86_64_plt_sym_val (bfd_vma i, const asection *plt,
5491 const arelent *rel)
5492 {
5493 bfd *abfd;
5494 const struct elf_x86_64_backend_data *bed;
5495 bfd_vma plt_offset;
5496
5497 /* Only match R_X86_64_JUMP_SLOT and R_X86_64_IRELATIVE. */
5498 if (rel->howto->type != R_X86_64_JUMP_SLOT
5499 && rel->howto->type != R_X86_64_IRELATIVE)
5500 return (bfd_vma) -1;
5501
5502 abfd = plt->owner;
5503 bed = get_elf_x86_64_backend_data (abfd);
5504 plt_offset = bed->plt_entry_size;
5505
5506 if (elf_elfheader (abfd)->e_ident[EI_OSABI] != ELFOSABI_GNU)
5507 return plt->vma + (i + 1) * plt_offset;
5508
5509 while (plt_offset < plt->size)
5510 {
5511 bfd_vma reloc_index;
5512 bfd_byte reloc_index_raw[4];
5513
5514 if (!bfd_get_section_contents (abfd, (asection *) plt,
5515 reloc_index_raw,
5516 plt_offset + bed->plt_reloc_offset,
5517 sizeof (reloc_index_raw)))
5518 return (bfd_vma) -1;
5519
5520 reloc_index = H_GET_32 (abfd, reloc_index_raw);
5521 if (reloc_index == i)
5522 return plt->vma + plt_offset;
5523 plt_offset += bed->plt_entry_size;
5524 }
5525
5526 abort ();
5527 }
5528
5529 /* Return offset in .plt.bnd section for the Ith GOTPLT relocation with
5530 PLT section, or (bfd_vma) -1 if it should not be included. */
5531
5532 static bfd_vma
5533 elf_x86_64_plt_sym_val_offset_plt_bnd (bfd_vma i, const asection *plt)
5534 {
5535 const struct elf_x86_64_backend_data *bed = &elf_x86_64_bnd_arch_bed;
5536 bfd *abfd = plt->owner;
5537 bfd_vma plt_offset = bed->plt_entry_size;
5538
5539 if (elf_elfheader (abfd)->e_ident[EI_OSABI] != ELFOSABI_GNU)
5540 return i * sizeof (elf_x86_64_legacy_plt2_entry);
5541
5542 while (plt_offset < plt->size)
5543 {
5544 bfd_vma reloc_index;
5545 bfd_byte reloc_index_raw[4];
5546
5547 if (!bfd_get_section_contents (abfd, (asection *) plt,
5548 reloc_index_raw,
5549 plt_offset + bed->plt_reloc_offset,
5550 sizeof (reloc_index_raw)))
5551 return (bfd_vma) -1;
5552
5553 reloc_index = H_GET_32 (abfd, reloc_index_raw);
5554 if (reloc_index == i)
5555 {
5556 /* This is the index in .plt section. */
5557 long plt_index = plt_offset / bed->plt_entry_size;
5558 /* Return the offset in .plt.bnd section. */
5559 return (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry);
5560 }
5561 plt_offset += bed->plt_entry_size;
5562 }
5563
5564 abort ();
5565 }
5566
5567 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5568 support. */
5569
5570 static long
5571 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5572 long symcount,
5573 asymbol **syms,
5574 long dynsymcount,
5575 asymbol **dynsyms,
5576 asymbol **ret)
5577 {
5578 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5579 asection *relplt;
5580 asymbol *s;
5581 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5582 arelent *p;
5583 long count, i, n;
5584 size_t size;
5585 Elf_Internal_Shdr *hdr;
5586 char *names;
5587 asection *plt, *plt_push;
5588
5589 plt_push = bfd_get_section_by_name (abfd, ".plt");
5590 if (plt_push == NULL)
5591 return 0;
5592
5593 plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5594 /* Use the generic ELF version if there is no .plt.bnd section. */
5595 if (plt == NULL)
5596 return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms,
5597 dynsymcount, dynsyms, ret);
5598
5599 *ret = NULL;
5600
5601 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
5602 return 0;
5603
5604 if (dynsymcount <= 0)
5605 return 0;
5606
5607 relplt = bfd_get_section_by_name (abfd, ".rela.plt");
5608 if (relplt == NULL)
5609 return 0;
5610
5611 hdr = &elf_section_data (relplt)->this_hdr;
5612 if (hdr->sh_link != elf_dynsymtab (abfd)
5613 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
5614 return 0;
5615
5616 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5617 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5618 return -1;
5619
5620 count = relplt->size / hdr->sh_entsize;
5621 size = count * sizeof (asymbol);
5622 p = relplt->relocation;
5623 for (i = 0; i < count; i++, p += bed->s->int_rels_per_ext_rel)
5624 {
5625 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
5626 if (p->addend != 0)
5627 size += sizeof ("+0x") - 1 + 8 + 8;
5628 }
5629
5630 s = *ret = (asymbol *) bfd_malloc (size);
5631 if (s == NULL)
5632 return -1;
5633
5634 names = (char *) (s + count);
5635 p = relplt->relocation;
5636 n = 0;
5637 for (i = 0; i < count; i++, p++)
5638 {
5639 bfd_vma offset;
5640 size_t len;
5641
5642 if (p->howto->type != R_X86_64_JUMP_SLOT
5643 && p->howto->type != R_X86_64_IRELATIVE)
5644 continue;
5645
5646 offset = elf_x86_64_plt_sym_val_offset_plt_bnd (i, plt_push);
5647
5648 *s = **p->sym_ptr_ptr;
5649 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
5650 we are defining a symbol, ensure one of them is set. */
5651 if ((s->flags & BSF_LOCAL) == 0)
5652 s->flags |= BSF_GLOBAL;
5653 s->flags |= BSF_SYNTHETIC;
5654 s->section = plt;
5655 s->value = offset;
5656 s->name = names;
5657 s->udata.p = NULL;
5658 len = strlen ((*p->sym_ptr_ptr)->name);
5659 memcpy (names, (*p->sym_ptr_ptr)->name, len);
5660 names += len;
5661 if (p->addend != 0)
5662 {
5663 char buf[30], *a;
5664
5665 memcpy (names, "+0x", sizeof ("+0x") - 1);
5666 names += sizeof ("+0x") - 1;
5667 bfd_sprintf_vma (abfd, buf, p->addend);
5668 for (a = buf; *a == '0'; ++a)
5669 ;
5670 len = strlen (a);
5671 memcpy (names, a, len);
5672 names += len;
5673 }
5674 memcpy (names, "@plt", sizeof ("@plt"));
5675 names += sizeof ("@plt");
5676 ++s, ++n;
5677 }
5678
5679 return n;
5680 }
5681
5682 /* Handle an x86-64 specific section when reading an object file. This
5683 is called when elfcode.h finds a section with an unknown type. */
5684
5685 static bfd_boolean
5686 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5687 const char *name, int shindex)
5688 {
5689 if (hdr->sh_type != SHT_X86_64_UNWIND)
5690 return FALSE;
5691
5692 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5693 return FALSE;
5694
5695 return TRUE;
5696 }
5697
5698 /* Hook called by the linker routine which adds symbols from an object
5699 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5700 of .bss. */
5701
5702 static bfd_boolean
5703 elf_x86_64_add_symbol_hook (bfd *abfd,
5704 struct bfd_link_info *info,
5705 Elf_Internal_Sym *sym,
5706 const char **namep ATTRIBUTE_UNUSED,
5707 flagword *flagsp ATTRIBUTE_UNUSED,
5708 asection **secp,
5709 bfd_vma *valp)
5710 {
5711 asection *lcomm;
5712
5713 switch (sym->st_shndx)
5714 {
5715 case SHN_X86_64_LCOMMON:
5716 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5717 if (lcomm == NULL)
5718 {
5719 lcomm = bfd_make_section_with_flags (abfd,
5720 "LARGE_COMMON",
5721 (SEC_ALLOC
5722 | SEC_IS_COMMON
5723 | SEC_LINKER_CREATED));
5724 if (lcomm == NULL)
5725 return FALSE;
5726 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5727 }
5728 *secp = lcomm;
5729 *valp = sym->st_size;
5730 return TRUE;
5731 }
5732
5733 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
5734 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
5735 && (abfd->flags & DYNAMIC) == 0
5736 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5737 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
5738
5739 return TRUE;
5740 }
5741
5742
5743 /* Given a BFD section, try to locate the corresponding ELF section
5744 index. */
5745
5746 static bfd_boolean
5747 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5748 asection *sec, int *index_return)
5749 {
5750 if (sec == &_bfd_elf_large_com_section)
5751 {
5752 *index_return = SHN_X86_64_LCOMMON;
5753 return TRUE;
5754 }
5755 return FALSE;
5756 }
5757
5758 /* Process a symbol. */
5759
5760 static void
5761 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5762 asymbol *asym)
5763 {
5764 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5765
5766 switch (elfsym->internal_elf_sym.st_shndx)
5767 {
5768 case SHN_X86_64_LCOMMON:
5769 asym->section = &_bfd_elf_large_com_section;
5770 asym->value = elfsym->internal_elf_sym.st_size;
5771 /* Common symbol doesn't set BSF_GLOBAL. */
5772 asym->flags &= ~BSF_GLOBAL;
5773 break;
5774 }
5775 }
5776
5777 static bfd_boolean
5778 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5779 {
5780 return (sym->st_shndx == SHN_COMMON
5781 || sym->st_shndx == SHN_X86_64_LCOMMON);
5782 }
5783
5784 static unsigned int
5785 elf_x86_64_common_section_index (asection *sec)
5786 {
5787 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5788 return SHN_COMMON;
5789 else
5790 return SHN_X86_64_LCOMMON;
5791 }
5792
5793 static asection *
5794 elf_x86_64_common_section (asection *sec)
5795 {
5796 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5797 return bfd_com_section_ptr;
5798 else
5799 return &_bfd_elf_large_com_section;
5800 }
5801
5802 static bfd_boolean
5803 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5804 const Elf_Internal_Sym *sym,
5805 asection **psec,
5806 bfd_boolean newdef,
5807 bfd_boolean olddef,
5808 bfd *oldbfd,
5809 const asection *oldsec)
5810 {
5811 /* A normal common symbol and a large common symbol result in a
5812 normal common symbol. We turn the large common symbol into a
5813 normal one. */
5814 if (!olddef
5815 && h->root.type == bfd_link_hash_common
5816 && !newdef
5817 && bfd_is_com_section (*psec)
5818 && oldsec != *psec)
5819 {
5820 if (sym->st_shndx == SHN_COMMON
5821 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5822 {
5823 h->root.u.c.p->section
5824 = bfd_make_section_old_way (oldbfd, "COMMON");
5825 h->root.u.c.p->section->flags = SEC_ALLOC;
5826 }
5827 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5828 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5829 *psec = bfd_com_section_ptr;
5830 }
5831
5832 return TRUE;
5833 }
5834
5835 static int
5836 elf_x86_64_additional_program_headers (bfd *abfd,
5837 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5838 {
5839 asection *s;
5840 int count = 0;
5841
5842 /* Check to see if we need a large readonly segment. */
5843 s = bfd_get_section_by_name (abfd, ".lrodata");
5844 if (s && (s->flags & SEC_LOAD))
5845 count++;
5846
5847 /* Check to see if we need a large data segment. Since .lbss sections
5848 is placed right after the .bss section, there should be no need for
5849 a large data segment just because of .lbss. */
5850 s = bfd_get_section_by_name (abfd, ".ldata");
5851 if (s && (s->flags & SEC_LOAD))
5852 count++;
5853
5854 return count;
5855 }
5856
5857 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5858
5859 static bfd_boolean
5860 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5861 {
5862 if (h->plt.offset != (bfd_vma) -1
5863 && !h->def_regular
5864 && !h->pointer_equality_needed)
5865 return FALSE;
5866
5867 return _bfd_elf_hash_symbol (h);
5868 }
5869
5870 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5871
5872 static bfd_boolean
5873 elf_x86_64_relocs_compatible (const bfd_target *input,
5874 const bfd_target *output)
5875 {
5876 return ((xvec_get_elf_backend_data (input)->s->elfclass
5877 == xvec_get_elf_backend_data (output)->s->elfclass)
5878 && _bfd_elf_relocs_compatible (input, output));
5879 }
5880
5881 static const struct bfd_elf_special_section
5882 elf_x86_64_special_sections[]=
5883 {
5884 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5885 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5886 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5887 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5888 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5889 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5890 { NULL, 0, 0, 0, 0 }
5891 };
5892
5893 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5894 #define TARGET_LITTLE_NAME "elf64-x86-64"
5895 #define ELF_ARCH bfd_arch_i386
5896 #define ELF_TARGET_ID X86_64_ELF_DATA
5897 #define ELF_MACHINE_CODE EM_X86_64
5898 #define ELF_MAXPAGESIZE 0x200000
5899 #define ELF_MINPAGESIZE 0x1000
5900 #define ELF_COMMONPAGESIZE 0x1000
5901
5902 #define elf_backend_can_gc_sections 1
5903 #define elf_backend_can_refcount 1
5904 #define elf_backend_want_got_plt 1
5905 #define elf_backend_plt_readonly 1
5906 #define elf_backend_want_plt_sym 0
5907 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5908 #define elf_backend_rela_normal 1
5909 #define elf_backend_plt_alignment 4
5910
5911 #define elf_info_to_howto elf_x86_64_info_to_howto
5912
5913 #define bfd_elf64_bfd_link_hash_table_create \
5914 elf_x86_64_link_hash_table_create
5915 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5916 #define bfd_elf64_bfd_reloc_name_lookup \
5917 elf_x86_64_reloc_name_lookup
5918
5919 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
5920 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5921 #define elf_backend_check_relocs elf_x86_64_check_relocs
5922 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
5923 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
5924 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5925 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5926 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
5927 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
5928 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5929 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5930 #ifdef CORE_HEADER
5931 #define elf_backend_write_core_note elf_x86_64_write_core_note
5932 #endif
5933 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5934 #define elf_backend_relocate_section elf_x86_64_relocate_section
5935 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
5936 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5937 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5938 #define elf_backend_plt_sym_val elf_x86_64_plt_sym_val
5939 #define elf_backend_object_p elf64_x86_64_elf_object_p
5940 #define bfd_elf64_mkobject elf_x86_64_mkobject
5941 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5942
5943 #define elf_backend_section_from_shdr \
5944 elf_x86_64_section_from_shdr
5945
5946 #define elf_backend_section_from_bfd_section \
5947 elf_x86_64_elf_section_from_bfd_section
5948 #define elf_backend_add_symbol_hook \
5949 elf_x86_64_add_symbol_hook
5950 #define elf_backend_symbol_processing \
5951 elf_x86_64_symbol_processing
5952 #define elf_backend_common_section_index \
5953 elf_x86_64_common_section_index
5954 #define elf_backend_common_section \
5955 elf_x86_64_common_section
5956 #define elf_backend_common_definition \
5957 elf_x86_64_common_definition
5958 #define elf_backend_merge_symbol \
5959 elf_x86_64_merge_symbol
5960 #define elf_backend_special_sections \
5961 elf_x86_64_special_sections
5962 #define elf_backend_additional_program_headers \
5963 elf_x86_64_additional_program_headers
5964 #define elf_backend_hash_symbol \
5965 elf_x86_64_hash_symbol
5966
5967 #include "elf64-target.h"
5968
5969 /* FreeBSD support. */
5970
5971 #undef TARGET_LITTLE_SYM
5972 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5973 #undef TARGET_LITTLE_NAME
5974 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5975
5976 #undef ELF_OSABI
5977 #define ELF_OSABI ELFOSABI_FREEBSD
5978
5979 #undef elf64_bed
5980 #define elf64_bed elf64_x86_64_fbsd_bed
5981
5982 #include "elf64-target.h"
5983
5984 /* Solaris 2 support. */
5985
5986 #undef TARGET_LITTLE_SYM
5987 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5988 #undef TARGET_LITTLE_NAME
5989 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5990
5991 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5992 objects won't be recognized. */
5993 #undef ELF_OSABI
5994
5995 #undef elf64_bed
5996 #define elf64_bed elf64_x86_64_sol2_bed
5997
5998 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5999 boundary. */
6000 #undef elf_backend_static_tls_alignment
6001 #define elf_backend_static_tls_alignment 16
6002
6003 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6004
6005 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6006 File, p.63. */
6007 #undef elf_backend_want_plt_sym
6008 #define elf_backend_want_plt_sym 1
6009
6010 #include "elf64-target.h"
6011
6012 #undef bfd_elf64_get_synthetic_symtab
6013
6014 /* Native Client support. */
6015
6016 static bfd_boolean
6017 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6018 {
6019 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6020 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6021 return TRUE;
6022 }
6023
6024 #undef TARGET_LITTLE_SYM
6025 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6026 #undef TARGET_LITTLE_NAME
6027 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6028 #undef elf64_bed
6029 #define elf64_bed elf64_x86_64_nacl_bed
6030
6031 #undef ELF_MAXPAGESIZE
6032 #undef ELF_MINPAGESIZE
6033 #undef ELF_COMMONPAGESIZE
6034 #define ELF_MAXPAGESIZE 0x10000
6035 #define ELF_MINPAGESIZE 0x10000
6036 #define ELF_COMMONPAGESIZE 0x10000
6037
6038 /* Restore defaults. */
6039 #undef ELF_OSABI
6040 #undef elf_backend_static_tls_alignment
6041 #undef elf_backend_want_plt_sym
6042 #define elf_backend_want_plt_sym 0
6043
6044 /* NaCl uses substantially different PLT entries for the same effects. */
6045
6046 #undef elf_backend_plt_alignment
6047 #define elf_backend_plt_alignment 5
6048 #define NACL_PLT_ENTRY_SIZE 64
6049 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6050
6051 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6052 {
6053 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6054 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6055 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6056 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6057 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6058
6059 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6060 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6061
6062 /* 32 bytes of nop to pad out to the standard size. */
6063 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6064 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6065 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6066 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6067 0x66, /* excess data32 prefix */
6068 0x90 /* nop */
6069 };
6070
6071 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6072 {
6073 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6074 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6075 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6076 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6077
6078 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6079 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6080 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6081
6082 /* Lazy GOT entries point here (32-byte aligned). */
6083 0x68, /* pushq immediate */
6084 0, 0, 0, 0, /* replaced with index into relocation table. */
6085 0xe9, /* jmp relative */
6086 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6087
6088 /* 22 bytes of nop to pad out to the standard size. */
6089 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6090 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6091 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6092 };
6093
6094 /* .eh_frame covering the .plt section. */
6095
6096 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6097 {
6098 #if (PLT_CIE_LENGTH != 20 \
6099 || PLT_FDE_LENGTH != 36 \
6100 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6101 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6102 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6103 #endif
6104 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6105 0, 0, 0, 0, /* CIE ID */
6106 1, /* CIE version */
6107 'z', 'R', 0, /* Augmentation string */
6108 1, /* Code alignment factor */
6109 0x78, /* Data alignment factor */
6110 16, /* Return address column */
6111 1, /* Augmentation size */
6112 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6113 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6114 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6115 DW_CFA_nop, DW_CFA_nop,
6116
6117 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6118 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6119 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6120 0, 0, 0, 0, /* .plt size goes here */
6121 0, /* Augmentation size */
6122 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6123 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6124 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6125 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6126 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6127 13, /* Block length */
6128 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6129 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6130 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6131 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6132 DW_CFA_nop, DW_CFA_nop
6133 };
6134
6135 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6136 {
6137 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6138 elf_x86_64_nacl_plt_entry, /* plt_entry */
6139 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6140 2, /* plt0_got1_offset */
6141 9, /* plt0_got2_offset */
6142 13, /* plt0_got2_insn_end */
6143 3, /* plt_got_offset */
6144 33, /* plt_reloc_offset */
6145 38, /* plt_plt_offset */
6146 7, /* plt_got_insn_size */
6147 42, /* plt_plt_insn_end */
6148 32, /* plt_lazy_offset */
6149 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6150 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6151 };
6152
6153 #undef elf_backend_arch_data
6154 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6155
6156 #undef elf_backend_object_p
6157 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6158 #undef elf_backend_modify_segment_map
6159 #define elf_backend_modify_segment_map nacl_modify_segment_map
6160 #undef elf_backend_modify_program_headers
6161 #define elf_backend_modify_program_headers nacl_modify_program_headers
6162 #undef elf_backend_final_write_processing
6163 #define elf_backend_final_write_processing nacl_final_write_processing
6164
6165 #include "elf64-target.h"
6166
6167 /* Native Client x32 support. */
6168
6169 static bfd_boolean
6170 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6171 {
6172 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6173 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6174 return TRUE;
6175 }
6176
6177 #undef TARGET_LITTLE_SYM
6178 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6179 #undef TARGET_LITTLE_NAME
6180 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6181 #undef elf32_bed
6182 #define elf32_bed elf32_x86_64_nacl_bed
6183
6184 #define bfd_elf32_bfd_link_hash_table_create \
6185 elf_x86_64_link_hash_table_create
6186 #define bfd_elf32_bfd_reloc_type_lookup \
6187 elf_x86_64_reloc_type_lookup
6188 #define bfd_elf32_bfd_reloc_name_lookup \
6189 elf_x86_64_reloc_name_lookup
6190 #define bfd_elf32_mkobject \
6191 elf_x86_64_mkobject
6192
6193 #undef elf_backend_object_p
6194 #define elf_backend_object_p \
6195 elf32_x86_64_nacl_elf_object_p
6196
6197 #undef elf_backend_bfd_from_remote_memory
6198 #define elf_backend_bfd_from_remote_memory \
6199 _bfd_elf32_bfd_from_remote_memory
6200
6201 #undef elf_backend_size_info
6202 #define elf_backend_size_info \
6203 _bfd_elf32_size_info
6204
6205 #include "elf32-target.h"
6206
6207 /* Restore defaults. */
6208 #undef elf_backend_object_p
6209 #define elf_backend_object_p elf64_x86_64_elf_object_p
6210 #undef elf_backend_bfd_from_remote_memory
6211 #undef elf_backend_size_info
6212 #undef elf_backend_modify_segment_map
6213 #undef elf_backend_modify_program_headers
6214 #undef elf_backend_final_write_processing
6215
6216 /* Intel L1OM support. */
6217
6218 static bfd_boolean
6219 elf64_l1om_elf_object_p (bfd *abfd)
6220 {
6221 /* Set the right machine number for an L1OM elf64 file. */
6222 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6223 return TRUE;
6224 }
6225
6226 #undef TARGET_LITTLE_SYM
6227 #define TARGET_LITTLE_SYM l1om_elf64_vec
6228 #undef TARGET_LITTLE_NAME
6229 #define TARGET_LITTLE_NAME "elf64-l1om"
6230 #undef ELF_ARCH
6231 #define ELF_ARCH bfd_arch_l1om
6232
6233 #undef ELF_MACHINE_CODE
6234 #define ELF_MACHINE_CODE EM_L1OM
6235
6236 #undef ELF_OSABI
6237
6238 #undef elf64_bed
6239 #define elf64_bed elf64_l1om_bed
6240
6241 #undef elf_backend_object_p
6242 #define elf_backend_object_p elf64_l1om_elf_object_p
6243
6244 /* Restore defaults. */
6245 #undef ELF_MAXPAGESIZE
6246 #undef ELF_MINPAGESIZE
6247 #undef ELF_COMMONPAGESIZE
6248 #define ELF_MAXPAGESIZE 0x200000
6249 #define ELF_MINPAGESIZE 0x1000
6250 #define ELF_COMMONPAGESIZE 0x1000
6251 #undef elf_backend_plt_alignment
6252 #define elf_backend_plt_alignment 4
6253 #undef elf_backend_arch_data
6254 #define elf_backend_arch_data &elf_x86_64_arch_bed
6255
6256 #include "elf64-target.h"
6257
6258 /* FreeBSD L1OM support. */
6259
6260 #undef TARGET_LITTLE_SYM
6261 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6262 #undef TARGET_LITTLE_NAME
6263 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6264
6265 #undef ELF_OSABI
6266 #define ELF_OSABI ELFOSABI_FREEBSD
6267
6268 #undef elf64_bed
6269 #define elf64_bed elf64_l1om_fbsd_bed
6270
6271 #include "elf64-target.h"
6272
6273 /* Intel K1OM support. */
6274
6275 static bfd_boolean
6276 elf64_k1om_elf_object_p (bfd *abfd)
6277 {
6278 /* Set the right machine number for an K1OM elf64 file. */
6279 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6280 return TRUE;
6281 }
6282
6283 #undef TARGET_LITTLE_SYM
6284 #define TARGET_LITTLE_SYM k1om_elf64_vec
6285 #undef TARGET_LITTLE_NAME
6286 #define TARGET_LITTLE_NAME "elf64-k1om"
6287 #undef ELF_ARCH
6288 #define ELF_ARCH bfd_arch_k1om
6289
6290 #undef ELF_MACHINE_CODE
6291 #define ELF_MACHINE_CODE EM_K1OM
6292
6293 #undef ELF_OSABI
6294
6295 #undef elf64_bed
6296 #define elf64_bed elf64_k1om_bed
6297
6298 #undef elf_backend_object_p
6299 #define elf_backend_object_p elf64_k1om_elf_object_p
6300
6301 #undef elf_backend_static_tls_alignment
6302
6303 #undef elf_backend_want_plt_sym
6304 #define elf_backend_want_plt_sym 0
6305
6306 #include "elf64-target.h"
6307
6308 /* FreeBSD K1OM support. */
6309
6310 #undef TARGET_LITTLE_SYM
6311 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6312 #undef TARGET_LITTLE_NAME
6313 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6314
6315 #undef ELF_OSABI
6316 #define ELF_OSABI ELFOSABI_FREEBSD
6317
6318 #undef elf64_bed
6319 #define elf64_bed elf64_k1om_fbsd_bed
6320
6321 #include "elf64-target.h"
6322
6323 /* 32bit x86-64 support. */
6324
6325 #undef TARGET_LITTLE_SYM
6326 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6327 #undef TARGET_LITTLE_NAME
6328 #define TARGET_LITTLE_NAME "elf32-x86-64"
6329 #undef elf32_bed
6330
6331 #undef ELF_ARCH
6332 #define ELF_ARCH bfd_arch_i386
6333
6334 #undef ELF_MACHINE_CODE
6335 #define ELF_MACHINE_CODE EM_X86_64
6336
6337 #undef ELF_OSABI
6338
6339 #undef elf_backend_object_p
6340 #define elf_backend_object_p \
6341 elf32_x86_64_elf_object_p
6342
6343 #undef elf_backend_bfd_from_remote_memory
6344 #define elf_backend_bfd_from_remote_memory \
6345 _bfd_elf32_bfd_from_remote_memory
6346
6347 #undef elf_backend_size_info
6348 #define elf_backend_size_info \
6349 _bfd_elf32_size_info
6350
6351 #include "elf32-target.h"