Issue an error for read-only segment with dynamic IFUNC relocations
[binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return NULL;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if a weak symbol with a real definition needs a copy reloc.
761 When there is a weak symbol with a real definition, the processor
762 independent code will have arranged for us to see the real
763 definition first. We need to copy the needs_copy bit from the
764 real definition and check it when allowing copy reloc in PIE. */
765 unsigned int needs_copy : 1;
766
767 /* TRUE if symbol has at least one BND relocation. */
768 unsigned int has_bnd_reloc : 1;
769
770 /* Information about the GOT PLT entry. Filled when there are both
771 GOT and PLT relocations against the same function. */
772 union gotplt_union plt_got;
773
774 /* Information about the second PLT entry. Filled when has_bnd_reloc is
775 set. */
776 union gotplt_union plt_bnd;
777
778 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
779 starting at the end of the jump table. */
780 bfd_vma tlsdesc_got;
781 };
782
783 #define elf_x86_64_hash_entry(ent) \
784 ((struct elf_x86_64_link_hash_entry *)(ent))
785
786 struct elf_x86_64_obj_tdata
787 {
788 struct elf_obj_tdata root;
789
790 /* tls_type for each local got entry. */
791 char *local_got_tls_type;
792
793 /* GOTPLT entries for TLS descriptors. */
794 bfd_vma *local_tlsdesc_gotent;
795 };
796
797 #define elf_x86_64_tdata(abfd) \
798 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
799
800 #define elf_x86_64_local_got_tls_type(abfd) \
801 (elf_x86_64_tdata (abfd)->local_got_tls_type)
802
803 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
804 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
805
806 #define is_x86_64_elf(bfd) \
807 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
808 && elf_tdata (bfd) != NULL \
809 && elf_object_id (bfd) == X86_64_ELF_DATA)
810
811 static bfd_boolean
812 elf_x86_64_mkobject (bfd *abfd)
813 {
814 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
815 X86_64_ELF_DATA);
816 }
817
818 /* x86-64 ELF linker hash table. */
819
820 struct elf_x86_64_link_hash_table
821 {
822 struct elf_link_hash_table elf;
823
824 /* Short-cuts to get to dynamic linker sections. */
825 asection *sdynbss;
826 asection *srelbss;
827 asection *plt_eh_frame;
828 asection *plt_bnd;
829 asection *plt_got;
830
831 union
832 {
833 bfd_signed_vma refcount;
834 bfd_vma offset;
835 } tls_ld_got;
836
837 /* The amount of space used by the jump slots in the GOT. */
838 bfd_vma sgotplt_jump_table_size;
839
840 /* Small local sym cache. */
841 struct sym_cache sym_cache;
842
843 bfd_vma (*r_info) (bfd_vma, bfd_vma);
844 bfd_vma (*r_sym) (bfd_vma);
845 unsigned int pointer_r_type;
846 const char *dynamic_interpreter;
847 int dynamic_interpreter_size;
848
849 /* _TLS_MODULE_BASE_ symbol. */
850 struct bfd_link_hash_entry *tls_module_base;
851
852 /* Used by local STT_GNU_IFUNC symbols. */
853 htab_t loc_hash_table;
854 void * loc_hash_memory;
855
856 /* The offset into splt of the PLT entry for the TLS descriptor
857 resolver. Special values are 0, if not necessary (or not found
858 to be necessary yet), and -1 if needed but not determined
859 yet. */
860 bfd_vma tlsdesc_plt;
861 /* The offset into sgot of the GOT entry used by the PLT entry
862 above. */
863 bfd_vma tlsdesc_got;
864
865 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
866 bfd_vma next_jump_slot_index;
867 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
868 bfd_vma next_irelative_index;
869 };
870
871 /* Get the x86-64 ELF linker hash table from a link_info structure. */
872
873 #define elf_x86_64_hash_table(p) \
874 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
875 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
876
877 #define elf_x86_64_compute_jump_table_size(htab) \
878 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
879
880 /* Create an entry in an x86-64 ELF linker hash table. */
881
882 static struct bfd_hash_entry *
883 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
884 struct bfd_hash_table *table,
885 const char *string)
886 {
887 /* Allocate the structure if it has not already been allocated by a
888 subclass. */
889 if (entry == NULL)
890 {
891 entry = (struct bfd_hash_entry *)
892 bfd_hash_allocate (table,
893 sizeof (struct elf_x86_64_link_hash_entry));
894 if (entry == NULL)
895 return entry;
896 }
897
898 /* Call the allocation method of the superclass. */
899 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
900 if (entry != NULL)
901 {
902 struct elf_x86_64_link_hash_entry *eh;
903
904 eh = (struct elf_x86_64_link_hash_entry *) entry;
905 eh->dyn_relocs = NULL;
906 eh->tls_type = GOT_UNKNOWN;
907 eh->needs_copy = 0;
908 eh->has_bnd_reloc = 0;
909 eh->plt_bnd.offset = (bfd_vma) -1;
910 eh->plt_got.offset = (bfd_vma) -1;
911 eh->tlsdesc_got = (bfd_vma) -1;
912 }
913
914 return entry;
915 }
916
917 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
918 for local symbol so that we can handle local STT_GNU_IFUNC symbols
919 as global symbol. We reuse indx and dynstr_index for local symbol
920 hash since they aren't used by global symbols in this backend. */
921
922 static hashval_t
923 elf_x86_64_local_htab_hash (const void *ptr)
924 {
925 struct elf_link_hash_entry *h
926 = (struct elf_link_hash_entry *) ptr;
927 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
928 }
929
930 /* Compare local hash entries. */
931
932 static int
933 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
934 {
935 struct elf_link_hash_entry *h1
936 = (struct elf_link_hash_entry *) ptr1;
937 struct elf_link_hash_entry *h2
938 = (struct elf_link_hash_entry *) ptr2;
939
940 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
941 }
942
943 /* Find and/or create a hash entry for local symbol. */
944
945 static struct elf_link_hash_entry *
946 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
947 bfd *abfd, const Elf_Internal_Rela *rel,
948 bfd_boolean create)
949 {
950 struct elf_x86_64_link_hash_entry e, *ret;
951 asection *sec = abfd->sections;
952 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
953 htab->r_sym (rel->r_info));
954 void **slot;
955
956 e.elf.indx = sec->id;
957 e.elf.dynstr_index = htab->r_sym (rel->r_info);
958 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
959 create ? INSERT : NO_INSERT);
960
961 if (!slot)
962 return NULL;
963
964 if (*slot)
965 {
966 ret = (struct elf_x86_64_link_hash_entry *) *slot;
967 return &ret->elf;
968 }
969
970 ret = (struct elf_x86_64_link_hash_entry *)
971 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
972 sizeof (struct elf_x86_64_link_hash_entry));
973 if (ret)
974 {
975 memset (ret, 0, sizeof (*ret));
976 ret->elf.indx = sec->id;
977 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
978 ret->elf.dynindx = -1;
979 ret->plt_got.offset = (bfd_vma) -1;
980 *slot = ret;
981 }
982 return &ret->elf;
983 }
984
985 /* Destroy an X86-64 ELF linker hash table. */
986
987 static void
988 elf_x86_64_link_hash_table_free (bfd *obfd)
989 {
990 struct elf_x86_64_link_hash_table *htab
991 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
992
993 if (htab->loc_hash_table)
994 htab_delete (htab->loc_hash_table);
995 if (htab->loc_hash_memory)
996 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
997 _bfd_elf_link_hash_table_free (obfd);
998 }
999
1000 /* Create an X86-64 ELF linker hash table. */
1001
1002 static struct bfd_link_hash_table *
1003 elf_x86_64_link_hash_table_create (bfd *abfd)
1004 {
1005 struct elf_x86_64_link_hash_table *ret;
1006 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1007
1008 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1009 if (ret == NULL)
1010 return NULL;
1011
1012 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1013 elf_x86_64_link_hash_newfunc,
1014 sizeof (struct elf_x86_64_link_hash_entry),
1015 X86_64_ELF_DATA))
1016 {
1017 free (ret);
1018 return NULL;
1019 }
1020
1021 if (ABI_64_P (abfd))
1022 {
1023 ret->r_info = elf64_r_info;
1024 ret->r_sym = elf64_r_sym;
1025 ret->pointer_r_type = R_X86_64_64;
1026 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1027 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1028 }
1029 else
1030 {
1031 ret->r_info = elf32_r_info;
1032 ret->r_sym = elf32_r_sym;
1033 ret->pointer_r_type = R_X86_64_32;
1034 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1035 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1036 }
1037
1038 ret->loc_hash_table = htab_try_create (1024,
1039 elf_x86_64_local_htab_hash,
1040 elf_x86_64_local_htab_eq,
1041 NULL);
1042 ret->loc_hash_memory = objalloc_create ();
1043 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1044 {
1045 elf_x86_64_link_hash_table_free (abfd);
1046 return NULL;
1047 }
1048 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1049
1050 return &ret->elf.root;
1051 }
1052
1053 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1054 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1055 hash table. */
1056
1057 static bfd_boolean
1058 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1059 struct bfd_link_info *info)
1060 {
1061 struct elf_x86_64_link_hash_table *htab;
1062
1063 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1064 return FALSE;
1065
1066 htab = elf_x86_64_hash_table (info);
1067 if (htab == NULL)
1068 return FALSE;
1069
1070 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1071 if (!htab->sdynbss)
1072 abort ();
1073
1074 if (info->executable)
1075 {
1076 /* Always allow copy relocs for building executables. */
1077 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1078 if (s == NULL)
1079 {
1080 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1081 s = bfd_make_section_anyway_with_flags (dynobj,
1082 ".rela.bss",
1083 (bed->dynamic_sec_flags
1084 | SEC_READONLY));
1085 if (s == NULL
1086 || ! bfd_set_section_alignment (dynobj, s,
1087 bed->s->log_file_align))
1088 return FALSE;
1089 }
1090 htab->srelbss = s;
1091 }
1092
1093 if (!info->no_ld_generated_unwind_info
1094 && htab->plt_eh_frame == NULL
1095 && htab->elf.splt != NULL)
1096 {
1097 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1098 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1099 | SEC_LINKER_CREATED);
1100 htab->plt_eh_frame
1101 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1102 if (htab->plt_eh_frame == NULL
1103 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1104 return FALSE;
1105 }
1106 return TRUE;
1107 }
1108
1109 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1110
1111 static void
1112 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1113 struct elf_link_hash_entry *dir,
1114 struct elf_link_hash_entry *ind)
1115 {
1116 struct elf_x86_64_link_hash_entry *edir, *eind;
1117
1118 edir = (struct elf_x86_64_link_hash_entry *) dir;
1119 eind = (struct elf_x86_64_link_hash_entry *) ind;
1120
1121 if (!edir->has_bnd_reloc)
1122 edir->has_bnd_reloc = eind->has_bnd_reloc;
1123
1124 if (eind->dyn_relocs != NULL)
1125 {
1126 if (edir->dyn_relocs != NULL)
1127 {
1128 struct elf_dyn_relocs **pp;
1129 struct elf_dyn_relocs *p;
1130
1131 /* Add reloc counts against the indirect sym to the direct sym
1132 list. Merge any entries against the same section. */
1133 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1134 {
1135 struct elf_dyn_relocs *q;
1136
1137 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1138 if (q->sec == p->sec)
1139 {
1140 q->pc_count += p->pc_count;
1141 q->count += p->count;
1142 *pp = p->next;
1143 break;
1144 }
1145 if (q == NULL)
1146 pp = &p->next;
1147 }
1148 *pp = edir->dyn_relocs;
1149 }
1150
1151 edir->dyn_relocs = eind->dyn_relocs;
1152 eind->dyn_relocs = NULL;
1153 }
1154
1155 if (ind->root.type == bfd_link_hash_indirect
1156 && dir->got.refcount <= 0)
1157 {
1158 edir->tls_type = eind->tls_type;
1159 eind->tls_type = GOT_UNKNOWN;
1160 }
1161
1162 if (ELIMINATE_COPY_RELOCS
1163 && ind->root.type != bfd_link_hash_indirect
1164 && dir->dynamic_adjusted)
1165 {
1166 /* If called to transfer flags for a weakdef during processing
1167 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1168 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1169 dir->ref_dynamic |= ind->ref_dynamic;
1170 dir->ref_regular |= ind->ref_regular;
1171 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1172 dir->needs_plt |= ind->needs_plt;
1173 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1174 }
1175 else
1176 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1177 }
1178
1179 static bfd_boolean
1180 elf64_x86_64_elf_object_p (bfd *abfd)
1181 {
1182 /* Set the right machine number for an x86-64 elf64 file. */
1183 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1184 return TRUE;
1185 }
1186
1187 static bfd_boolean
1188 elf32_x86_64_elf_object_p (bfd *abfd)
1189 {
1190 /* Set the right machine number for an x86-64 elf32 file. */
1191 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1192 return TRUE;
1193 }
1194
1195 /* Return TRUE if the TLS access code sequence support transition
1196 from R_TYPE. */
1197
1198 static bfd_boolean
1199 elf_x86_64_check_tls_transition (bfd *abfd,
1200 struct bfd_link_info *info,
1201 asection *sec,
1202 bfd_byte *contents,
1203 Elf_Internal_Shdr *symtab_hdr,
1204 struct elf_link_hash_entry **sym_hashes,
1205 unsigned int r_type,
1206 const Elf_Internal_Rela *rel,
1207 const Elf_Internal_Rela *relend)
1208 {
1209 unsigned int val;
1210 unsigned long r_symndx;
1211 bfd_boolean largepic = FALSE;
1212 struct elf_link_hash_entry *h;
1213 bfd_vma offset;
1214 struct elf_x86_64_link_hash_table *htab;
1215
1216 /* Get the section contents. */
1217 if (contents == NULL)
1218 {
1219 if (elf_section_data (sec)->this_hdr.contents != NULL)
1220 contents = elf_section_data (sec)->this_hdr.contents;
1221 else
1222 {
1223 /* FIXME: How to better handle error condition? */
1224 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1225 return FALSE;
1226
1227 /* Cache the section contents for elf_link_input_bfd. */
1228 elf_section_data (sec)->this_hdr.contents = contents;
1229 }
1230 }
1231
1232 htab = elf_x86_64_hash_table (info);
1233 offset = rel->r_offset;
1234 switch (r_type)
1235 {
1236 case R_X86_64_TLSGD:
1237 case R_X86_64_TLSLD:
1238 if ((rel + 1) >= relend)
1239 return FALSE;
1240
1241 if (r_type == R_X86_64_TLSGD)
1242 {
1243 /* Check transition from GD access model. For 64bit, only
1244 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1245 .word 0x6666; rex64; call __tls_get_addr
1246 can transit to different access model. For 32bit, only
1247 leaq foo@tlsgd(%rip), %rdi
1248 .word 0x6666; rex64; call __tls_get_addr
1249 can transit to different access model. For largepic
1250 we also support:
1251 leaq foo@tlsgd(%rip), %rdi
1252 movabsq $__tls_get_addr@pltoff, %rax
1253 addq $rbx, %rax
1254 call *%rax. */
1255
1256 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1257 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1258
1259 if ((offset + 12) > sec->size)
1260 return FALSE;
1261
1262 if (memcmp (contents + offset + 4, call, 4) != 0)
1263 {
1264 if (!ABI_64_P (abfd)
1265 || (offset + 19) > sec->size
1266 || offset < 3
1267 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1268 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1269 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1270 != 0)
1271 return FALSE;
1272 largepic = TRUE;
1273 }
1274 else if (ABI_64_P (abfd))
1275 {
1276 if (offset < 4
1277 || memcmp (contents + offset - 4, leaq, 4) != 0)
1278 return FALSE;
1279 }
1280 else
1281 {
1282 if (offset < 3
1283 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1284 return FALSE;
1285 }
1286 }
1287 else
1288 {
1289 /* Check transition from LD access model. Only
1290 leaq foo@tlsld(%rip), %rdi;
1291 call __tls_get_addr
1292 can transit to different access model. For largepic
1293 we also support:
1294 leaq foo@tlsld(%rip), %rdi
1295 movabsq $__tls_get_addr@pltoff, %rax
1296 addq $rbx, %rax
1297 call *%rax. */
1298
1299 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1300
1301 if (offset < 3 || (offset + 9) > sec->size)
1302 return FALSE;
1303
1304 if (memcmp (contents + offset - 3, lea, 3) != 0)
1305 return FALSE;
1306
1307 if (0xe8 != *(contents + offset + 4))
1308 {
1309 if (!ABI_64_P (abfd)
1310 || (offset + 19) > sec->size
1311 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1312 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1313 != 0)
1314 return FALSE;
1315 largepic = TRUE;
1316 }
1317 }
1318
1319 r_symndx = htab->r_sym (rel[1].r_info);
1320 if (r_symndx < symtab_hdr->sh_info)
1321 return FALSE;
1322
1323 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1324 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1325 may be versioned. */
1326 return (h != NULL
1327 && h->root.root.string != NULL
1328 && (largepic
1329 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1330 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1331 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1332 && (strncmp (h->root.root.string,
1333 "__tls_get_addr", 14) == 0));
1334
1335 case R_X86_64_GOTTPOFF:
1336 /* Check transition from IE access model:
1337 mov foo@gottpoff(%rip), %reg
1338 add foo@gottpoff(%rip), %reg
1339 */
1340
1341 /* Check REX prefix first. */
1342 if (offset >= 3 && (offset + 4) <= sec->size)
1343 {
1344 val = bfd_get_8 (abfd, contents + offset - 3);
1345 if (val != 0x48 && val != 0x4c)
1346 {
1347 /* X32 may have 0x44 REX prefix or no REX prefix. */
1348 if (ABI_64_P (abfd))
1349 return FALSE;
1350 }
1351 }
1352 else
1353 {
1354 /* X32 may not have any REX prefix. */
1355 if (ABI_64_P (abfd))
1356 return FALSE;
1357 if (offset < 2 || (offset + 3) > sec->size)
1358 return FALSE;
1359 }
1360
1361 val = bfd_get_8 (abfd, contents + offset - 2);
1362 if (val != 0x8b && val != 0x03)
1363 return FALSE;
1364
1365 val = bfd_get_8 (abfd, contents + offset - 1);
1366 return (val & 0xc7) == 5;
1367
1368 case R_X86_64_GOTPC32_TLSDESC:
1369 /* Check transition from GDesc access model:
1370 leaq x@tlsdesc(%rip), %rax
1371
1372 Make sure it's a leaq adding rip to a 32-bit offset
1373 into any register, although it's probably almost always
1374 going to be rax. */
1375
1376 if (offset < 3 || (offset + 4) > sec->size)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 3);
1380 if ((val & 0xfb) != 0x48)
1381 return FALSE;
1382
1383 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1384 return FALSE;
1385
1386 val = bfd_get_8 (abfd, contents + offset - 1);
1387 return (val & 0xc7) == 0x05;
1388
1389 case R_X86_64_TLSDESC_CALL:
1390 /* Check transition from GDesc access model:
1391 call *x@tlsdesc(%rax)
1392 */
1393 if (offset + 2 <= sec->size)
1394 {
1395 /* Make sure that it's a call *x@tlsdesc(%rax). */
1396 static const unsigned char call[] = { 0xff, 0x10 };
1397 return memcmp (contents + offset, call, 2) == 0;
1398 }
1399
1400 return FALSE;
1401
1402 default:
1403 abort ();
1404 }
1405 }
1406
1407 /* Return TRUE if the TLS access transition is OK or no transition
1408 will be performed. Update R_TYPE if there is a transition. */
1409
1410 static bfd_boolean
1411 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1412 asection *sec, bfd_byte *contents,
1413 Elf_Internal_Shdr *symtab_hdr,
1414 struct elf_link_hash_entry **sym_hashes,
1415 unsigned int *r_type, int tls_type,
1416 const Elf_Internal_Rela *rel,
1417 const Elf_Internal_Rela *relend,
1418 struct elf_link_hash_entry *h,
1419 unsigned long r_symndx)
1420 {
1421 unsigned int from_type = *r_type;
1422 unsigned int to_type = from_type;
1423 bfd_boolean check = TRUE;
1424
1425 /* Skip TLS transition for functions. */
1426 if (h != NULL
1427 && (h->type == STT_FUNC
1428 || h->type == STT_GNU_IFUNC))
1429 return TRUE;
1430
1431 switch (from_type)
1432 {
1433 case R_X86_64_TLSGD:
1434 case R_X86_64_GOTPC32_TLSDESC:
1435 case R_X86_64_TLSDESC_CALL:
1436 case R_X86_64_GOTTPOFF:
1437 if (info->executable)
1438 {
1439 if (h == NULL)
1440 to_type = R_X86_64_TPOFF32;
1441 else
1442 to_type = R_X86_64_GOTTPOFF;
1443 }
1444
1445 /* When we are called from elf_x86_64_relocate_section,
1446 CONTENTS isn't NULL and there may be additional transitions
1447 based on TLS_TYPE. */
1448 if (contents != NULL)
1449 {
1450 unsigned int new_to_type = to_type;
1451
1452 if (info->executable
1453 && h != NULL
1454 && h->dynindx == -1
1455 && tls_type == GOT_TLS_IE)
1456 new_to_type = R_X86_64_TPOFF32;
1457
1458 if (to_type == R_X86_64_TLSGD
1459 || to_type == R_X86_64_GOTPC32_TLSDESC
1460 || to_type == R_X86_64_TLSDESC_CALL)
1461 {
1462 if (tls_type == GOT_TLS_IE)
1463 new_to_type = R_X86_64_GOTTPOFF;
1464 }
1465
1466 /* We checked the transition before when we were called from
1467 elf_x86_64_check_relocs. We only want to check the new
1468 transition which hasn't been checked before. */
1469 check = new_to_type != to_type && from_type == to_type;
1470 to_type = new_to_type;
1471 }
1472
1473 break;
1474
1475 case R_X86_64_TLSLD:
1476 if (info->executable)
1477 to_type = R_X86_64_TPOFF32;
1478 break;
1479
1480 default:
1481 return TRUE;
1482 }
1483
1484 /* Return TRUE if there is no transition. */
1485 if (from_type == to_type)
1486 return TRUE;
1487
1488 /* Check if the transition can be performed. */
1489 if (check
1490 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1491 symtab_hdr, sym_hashes,
1492 from_type, rel, relend))
1493 {
1494 reloc_howto_type *from, *to;
1495 const char *name;
1496
1497 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1498 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1499
1500 if (h)
1501 name = h->root.root.string;
1502 else
1503 {
1504 struct elf_x86_64_link_hash_table *htab;
1505
1506 htab = elf_x86_64_hash_table (info);
1507 if (htab == NULL)
1508 name = "*unknown*";
1509 else
1510 {
1511 Elf_Internal_Sym *isym;
1512
1513 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1514 abfd, r_symndx);
1515 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1516 }
1517 }
1518
1519 (*_bfd_error_handler)
1520 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1521 "in section `%A' failed"),
1522 abfd, sec, from->name, to->name, name,
1523 (unsigned long) rel->r_offset);
1524 bfd_set_error (bfd_error_bad_value);
1525 return FALSE;
1526 }
1527
1528 *r_type = to_type;
1529 return TRUE;
1530 }
1531
1532 /* Rename some of the generic section flags to better document how they
1533 are used here. */
1534 #define need_convert_mov_to_lea sec_flg0
1535
1536 /* Look through the relocs for a section during the first phase, and
1537 calculate needed space in the global offset table, procedure
1538 linkage table, and dynamic reloc sections. */
1539
1540 static bfd_boolean
1541 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1542 asection *sec,
1543 const Elf_Internal_Rela *relocs)
1544 {
1545 struct elf_x86_64_link_hash_table *htab;
1546 Elf_Internal_Shdr *symtab_hdr;
1547 struct elf_link_hash_entry **sym_hashes;
1548 const Elf_Internal_Rela *rel;
1549 const Elf_Internal_Rela *rel_end;
1550 asection *sreloc;
1551 bfd_boolean use_plt_got;
1552
1553 if (info->relocatable)
1554 return TRUE;
1555
1556 BFD_ASSERT (is_x86_64_elf (abfd));
1557
1558 htab = elf_x86_64_hash_table (info);
1559 if (htab == NULL)
1560 return FALSE;
1561
1562 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1563
1564 symtab_hdr = &elf_symtab_hdr (abfd);
1565 sym_hashes = elf_sym_hashes (abfd);
1566
1567 sreloc = NULL;
1568
1569 rel_end = relocs + sec->reloc_count;
1570 for (rel = relocs; rel < rel_end; rel++)
1571 {
1572 unsigned int r_type;
1573 unsigned long r_symndx;
1574 struct elf_link_hash_entry *h;
1575 Elf_Internal_Sym *isym;
1576 const char *name;
1577 bfd_boolean size_reloc;
1578
1579 r_symndx = htab->r_sym (rel->r_info);
1580 r_type = ELF32_R_TYPE (rel->r_info);
1581
1582 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1583 {
1584 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1585 abfd, r_symndx);
1586 return FALSE;
1587 }
1588
1589 if (r_symndx < symtab_hdr->sh_info)
1590 {
1591 /* A local symbol. */
1592 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1593 abfd, r_symndx);
1594 if (isym == NULL)
1595 return FALSE;
1596
1597 /* Check relocation against local STT_GNU_IFUNC symbol. */
1598 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1599 {
1600 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1601 TRUE);
1602 if (h == NULL)
1603 return FALSE;
1604
1605 /* Fake a STT_GNU_IFUNC symbol. */
1606 h->type = STT_GNU_IFUNC;
1607 h->def_regular = 1;
1608 h->ref_regular = 1;
1609 h->forced_local = 1;
1610 h->root.type = bfd_link_hash_defined;
1611 }
1612 else
1613 h = NULL;
1614 }
1615 else
1616 {
1617 isym = NULL;
1618 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1619 while (h->root.type == bfd_link_hash_indirect
1620 || h->root.type == bfd_link_hash_warning)
1621 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1622 }
1623
1624 /* Check invalid x32 relocations. */
1625 if (!ABI_64_P (abfd))
1626 switch (r_type)
1627 {
1628 default:
1629 break;
1630
1631 case R_X86_64_DTPOFF64:
1632 case R_X86_64_TPOFF64:
1633 case R_X86_64_PC64:
1634 case R_X86_64_GOTOFF64:
1635 case R_X86_64_GOT64:
1636 case R_X86_64_GOTPCREL64:
1637 case R_X86_64_GOTPC64:
1638 case R_X86_64_GOTPLT64:
1639 case R_X86_64_PLTOFF64:
1640 {
1641 if (h)
1642 name = h->root.root.string;
1643 else
1644 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1645 NULL);
1646 (*_bfd_error_handler)
1647 (_("%B: relocation %s against symbol `%s' isn't "
1648 "supported in x32 mode"), abfd,
1649 x86_64_elf_howto_table[r_type].name, name);
1650 bfd_set_error (bfd_error_bad_value);
1651 return FALSE;
1652 }
1653 break;
1654 }
1655
1656 if (h != NULL)
1657 {
1658 /* Create the ifunc sections for static executables. If we
1659 never see an indirect function symbol nor we are building
1660 a static executable, those sections will be empty and
1661 won't appear in output. */
1662 switch (r_type)
1663 {
1664 default:
1665 break;
1666
1667 case R_X86_64_PC32_BND:
1668 case R_X86_64_PLT32_BND:
1669 case R_X86_64_PC32:
1670 case R_X86_64_PLT32:
1671 case R_X86_64_32:
1672 case R_X86_64_64:
1673 /* MPX PLT is supported only if elf_x86_64_arch_bed
1674 is used in 64-bit mode. */
1675 if (ABI_64_P (abfd)
1676 && info->bndplt
1677 && (get_elf_x86_64_backend_data (abfd)
1678 == &elf_x86_64_arch_bed))
1679 {
1680 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
1681
1682 /* Create the second PLT for Intel MPX support. */
1683 if (htab->plt_bnd == NULL)
1684 {
1685 unsigned int plt_bnd_align;
1686 const struct elf_backend_data *bed;
1687
1688 bed = get_elf_backend_data (info->output_bfd);
1689 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1690 && (sizeof (elf_x86_64_bnd_plt2_entry)
1691 == sizeof (elf_x86_64_legacy_plt2_entry)));
1692 plt_bnd_align = 3;
1693
1694 if (htab->elf.dynobj == NULL)
1695 htab->elf.dynobj = abfd;
1696 htab->plt_bnd
1697 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1698 ".plt.bnd",
1699 (bed->dynamic_sec_flags
1700 | SEC_ALLOC
1701 | SEC_CODE
1702 | SEC_LOAD
1703 | SEC_READONLY));
1704 if (htab->plt_bnd == NULL
1705 || !bfd_set_section_alignment (htab->elf.dynobj,
1706 htab->plt_bnd,
1707 plt_bnd_align))
1708 return FALSE;
1709 }
1710 }
1711
1712 case R_X86_64_32S:
1713 case R_X86_64_PC64:
1714 case R_X86_64_GOTPCREL:
1715 case R_X86_64_GOTPCREL64:
1716 if (htab->elf.dynobj == NULL)
1717 htab->elf.dynobj = abfd;
1718 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1719 return FALSE;
1720 break;
1721 }
1722
1723 /* It is referenced by a non-shared object. */
1724 h->ref_regular = 1;
1725 h->root.non_ir_ref = 1;
1726
1727 if (h->type == STT_GNU_IFUNC)
1728 elf_tdata (info->output_bfd)->has_gnu_symbols
1729 |= elf_gnu_symbol_ifunc;
1730 }
1731
1732 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1733 symtab_hdr, sym_hashes,
1734 &r_type, GOT_UNKNOWN,
1735 rel, rel_end, h, r_symndx))
1736 return FALSE;
1737
1738 switch (r_type)
1739 {
1740 case R_X86_64_TLSLD:
1741 htab->tls_ld_got.refcount += 1;
1742 goto create_got;
1743
1744 case R_X86_64_TPOFF32:
1745 if (!info->executable && ABI_64_P (abfd))
1746 {
1747 if (h)
1748 name = h->root.root.string;
1749 else
1750 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1751 NULL);
1752 (*_bfd_error_handler)
1753 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1754 abfd,
1755 x86_64_elf_howto_table[r_type].name, name);
1756 bfd_set_error (bfd_error_bad_value);
1757 return FALSE;
1758 }
1759 break;
1760
1761 case R_X86_64_GOTTPOFF:
1762 if (!info->executable)
1763 info->flags |= DF_STATIC_TLS;
1764 /* Fall through */
1765
1766 case R_X86_64_GOT32:
1767 case R_X86_64_GOTPCREL:
1768 case R_X86_64_TLSGD:
1769 case R_X86_64_GOT64:
1770 case R_X86_64_GOTPCREL64:
1771 case R_X86_64_GOTPLT64:
1772 case R_X86_64_GOTPC32_TLSDESC:
1773 case R_X86_64_TLSDESC_CALL:
1774 /* This symbol requires a global offset table entry. */
1775 {
1776 int tls_type, old_tls_type;
1777
1778 switch (r_type)
1779 {
1780 default: tls_type = GOT_NORMAL; break;
1781 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1782 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1783 case R_X86_64_GOTPC32_TLSDESC:
1784 case R_X86_64_TLSDESC_CALL:
1785 tls_type = GOT_TLS_GDESC; break;
1786 }
1787
1788 if (h != NULL)
1789 {
1790 h->got.refcount += 1;
1791 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1792 }
1793 else
1794 {
1795 bfd_signed_vma *local_got_refcounts;
1796
1797 /* This is a global offset table entry for a local symbol. */
1798 local_got_refcounts = elf_local_got_refcounts (abfd);
1799 if (local_got_refcounts == NULL)
1800 {
1801 bfd_size_type size;
1802
1803 size = symtab_hdr->sh_info;
1804 size *= sizeof (bfd_signed_vma)
1805 + sizeof (bfd_vma) + sizeof (char);
1806 local_got_refcounts = ((bfd_signed_vma *)
1807 bfd_zalloc (abfd, size));
1808 if (local_got_refcounts == NULL)
1809 return FALSE;
1810 elf_local_got_refcounts (abfd) = local_got_refcounts;
1811 elf_x86_64_local_tlsdesc_gotent (abfd)
1812 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1813 elf_x86_64_local_got_tls_type (abfd)
1814 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1815 }
1816 local_got_refcounts[r_symndx] += 1;
1817 old_tls_type
1818 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1819 }
1820
1821 /* If a TLS symbol is accessed using IE at least once,
1822 there is no point to use dynamic model for it. */
1823 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1824 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1825 || tls_type != GOT_TLS_IE))
1826 {
1827 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1828 tls_type = old_tls_type;
1829 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1830 && GOT_TLS_GD_ANY_P (tls_type))
1831 tls_type |= old_tls_type;
1832 else
1833 {
1834 if (h)
1835 name = h->root.root.string;
1836 else
1837 name = bfd_elf_sym_name (abfd, symtab_hdr,
1838 isym, NULL);
1839 (*_bfd_error_handler)
1840 (_("%B: '%s' accessed both as normal and thread local symbol"),
1841 abfd, name);
1842 bfd_set_error (bfd_error_bad_value);
1843 return FALSE;
1844 }
1845 }
1846
1847 if (old_tls_type != tls_type)
1848 {
1849 if (h != NULL)
1850 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1851 else
1852 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1853 }
1854 }
1855 /* Fall through */
1856
1857 case R_X86_64_GOTOFF64:
1858 case R_X86_64_GOTPC32:
1859 case R_X86_64_GOTPC64:
1860 create_got:
1861 if (htab->elf.sgot == NULL)
1862 {
1863 if (htab->elf.dynobj == NULL)
1864 htab->elf.dynobj = abfd;
1865 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1866 info))
1867 return FALSE;
1868 }
1869 break;
1870
1871 case R_X86_64_PLT32:
1872 case R_X86_64_PLT32_BND:
1873 /* This symbol requires a procedure linkage table entry. We
1874 actually build the entry in adjust_dynamic_symbol,
1875 because this might be a case of linking PIC code which is
1876 never referenced by a dynamic object, in which case we
1877 don't need to generate a procedure linkage table entry
1878 after all. */
1879
1880 /* If this is a local symbol, we resolve it directly without
1881 creating a procedure linkage table entry. */
1882 if (h == NULL)
1883 continue;
1884
1885 h->needs_plt = 1;
1886 h->plt.refcount += 1;
1887 break;
1888
1889 case R_X86_64_PLTOFF64:
1890 /* This tries to form the 'address' of a function relative
1891 to GOT. For global symbols we need a PLT entry. */
1892 if (h != NULL)
1893 {
1894 h->needs_plt = 1;
1895 h->plt.refcount += 1;
1896 }
1897 goto create_got;
1898
1899 case R_X86_64_SIZE32:
1900 case R_X86_64_SIZE64:
1901 size_reloc = TRUE;
1902 goto do_size;
1903
1904 case R_X86_64_32:
1905 if (!ABI_64_P (abfd))
1906 goto pointer;
1907 case R_X86_64_8:
1908 case R_X86_64_16:
1909 case R_X86_64_32S:
1910 /* Let's help debug shared library creation. These relocs
1911 cannot be used in shared libs. Don't error out for
1912 sections we don't care about, such as debug sections or
1913 non-constant sections. */
1914 if (info->shared
1915 && (sec->flags & SEC_ALLOC) != 0
1916 && (sec->flags & SEC_READONLY) != 0)
1917 {
1918 if (h)
1919 name = h->root.root.string;
1920 else
1921 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1922 (*_bfd_error_handler)
1923 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1924 abfd, x86_64_elf_howto_table[r_type].name, name);
1925 bfd_set_error (bfd_error_bad_value);
1926 return FALSE;
1927 }
1928 /* Fall through. */
1929
1930 case R_X86_64_PC8:
1931 case R_X86_64_PC16:
1932 case R_X86_64_PC32:
1933 case R_X86_64_PC32_BND:
1934 case R_X86_64_PC64:
1935 case R_X86_64_64:
1936 pointer:
1937 if (h != NULL && info->executable)
1938 {
1939 /* If this reloc is in a read-only section, we might
1940 need a copy reloc. We can't check reliably at this
1941 stage whether the section is read-only, as input
1942 sections have not yet been mapped to output sections.
1943 Tentatively set the flag for now, and correct in
1944 adjust_dynamic_symbol. */
1945 h->non_got_ref = 1;
1946
1947 /* We may need a .plt entry if the function this reloc
1948 refers to is in a shared lib. */
1949 h->plt.refcount += 1;
1950 if (r_type != R_X86_64_PC32
1951 && r_type != R_X86_64_PC32_BND
1952 && r_type != R_X86_64_PC64)
1953 h->pointer_equality_needed = 1;
1954 }
1955
1956 size_reloc = FALSE;
1957 do_size:
1958 /* If we are creating a shared library, and this is a reloc
1959 against a global symbol, or a non PC relative reloc
1960 against a local symbol, then we need to copy the reloc
1961 into the shared library. However, if we are linking with
1962 -Bsymbolic, we do not need to copy a reloc against a
1963 global symbol which is defined in an object we are
1964 including in the link (i.e., DEF_REGULAR is set). At
1965 this point we have not seen all the input files, so it is
1966 possible that DEF_REGULAR is not set now but will be set
1967 later (it is never cleared). In case of a weak definition,
1968 DEF_REGULAR may be cleared later by a strong definition in
1969 a shared library. We account for that possibility below by
1970 storing information in the relocs_copied field of the hash
1971 table entry. A similar situation occurs when creating
1972 shared libraries and symbol visibility changes render the
1973 symbol local.
1974
1975 If on the other hand, we are creating an executable, we
1976 may need to keep relocations for symbols satisfied by a
1977 dynamic library if we manage to avoid copy relocs for the
1978 symbol. */
1979 if ((info->shared
1980 && (sec->flags & SEC_ALLOC) != 0
1981 && (! IS_X86_64_PCREL_TYPE (r_type)
1982 || (h != NULL
1983 && (! SYMBOLIC_BIND (info, h)
1984 || h->root.type == bfd_link_hash_defweak
1985 || !h->def_regular))))
1986 || (ELIMINATE_COPY_RELOCS
1987 && !info->shared
1988 && (sec->flags & SEC_ALLOC) != 0
1989 && h != NULL
1990 && (h->root.type == bfd_link_hash_defweak
1991 || !h->def_regular)))
1992 {
1993 struct elf_dyn_relocs *p;
1994 struct elf_dyn_relocs **head;
1995
1996 /* We must copy these reloc types into the output file.
1997 Create a reloc section in dynobj and make room for
1998 this reloc. */
1999 if (sreloc == NULL)
2000 {
2001 if (htab->elf.dynobj == NULL)
2002 htab->elf.dynobj = abfd;
2003
2004 sreloc = _bfd_elf_make_dynamic_reloc_section
2005 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2006 abfd, /*rela?*/ TRUE);
2007
2008 if (sreloc == NULL)
2009 return FALSE;
2010 }
2011
2012 /* If this is a global symbol, we count the number of
2013 relocations we need for this symbol. */
2014 if (h != NULL)
2015 {
2016 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2017 }
2018 else
2019 {
2020 /* Track dynamic relocs needed for local syms too.
2021 We really need local syms available to do this
2022 easily. Oh well. */
2023 asection *s;
2024 void **vpp;
2025
2026 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2027 abfd, r_symndx);
2028 if (isym == NULL)
2029 return FALSE;
2030
2031 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2032 if (s == NULL)
2033 s = sec;
2034
2035 /* Beware of type punned pointers vs strict aliasing
2036 rules. */
2037 vpp = &(elf_section_data (s)->local_dynrel);
2038 head = (struct elf_dyn_relocs **)vpp;
2039 }
2040
2041 p = *head;
2042 if (p == NULL || p->sec != sec)
2043 {
2044 bfd_size_type amt = sizeof *p;
2045
2046 p = ((struct elf_dyn_relocs *)
2047 bfd_alloc (htab->elf.dynobj, amt));
2048 if (p == NULL)
2049 return FALSE;
2050 p->next = *head;
2051 *head = p;
2052 p->sec = sec;
2053 p->count = 0;
2054 p->pc_count = 0;
2055 }
2056
2057 p->count += 1;
2058 /* Count size relocation as PC-relative relocation. */
2059 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2060 p->pc_count += 1;
2061 }
2062 break;
2063
2064 /* This relocation describes the C++ object vtable hierarchy.
2065 Reconstruct it for later use during GC. */
2066 case R_X86_64_GNU_VTINHERIT:
2067 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2068 return FALSE;
2069 break;
2070
2071 /* This relocation describes which C++ vtable entries are actually
2072 used. Record for later use during GC. */
2073 case R_X86_64_GNU_VTENTRY:
2074 BFD_ASSERT (h != NULL);
2075 if (h != NULL
2076 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2077 return FALSE;
2078 break;
2079
2080 default:
2081 break;
2082 }
2083
2084 if (use_plt_got
2085 && h != NULL
2086 && h->plt.refcount > 0
2087 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2088 || h->got.refcount > 0)
2089 && htab->plt_got == NULL)
2090 {
2091 /* Create the GOT procedure linkage table. */
2092 unsigned int plt_got_align;
2093 const struct elf_backend_data *bed;
2094
2095 bed = get_elf_backend_data (info->output_bfd);
2096 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2097 && (sizeof (elf_x86_64_bnd_plt2_entry)
2098 == sizeof (elf_x86_64_legacy_plt2_entry)));
2099 plt_got_align = 3;
2100
2101 if (htab->elf.dynobj == NULL)
2102 htab->elf.dynobj = abfd;
2103 htab->plt_got
2104 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2105 ".plt.got",
2106 (bed->dynamic_sec_flags
2107 | SEC_ALLOC
2108 | SEC_CODE
2109 | SEC_LOAD
2110 | SEC_READONLY));
2111 if (htab->plt_got == NULL
2112 || !bfd_set_section_alignment (htab->elf.dynobj,
2113 htab->plt_got,
2114 plt_got_align))
2115 return FALSE;
2116 }
2117
2118 if (r_type == R_X86_64_GOTPCREL
2119 && (h == NULL || h->type != STT_GNU_IFUNC))
2120 sec->need_convert_mov_to_lea = 1;
2121 }
2122
2123 return TRUE;
2124 }
2125
2126 /* Return the section that should be marked against GC for a given
2127 relocation. */
2128
2129 static asection *
2130 elf_x86_64_gc_mark_hook (asection *sec,
2131 struct bfd_link_info *info,
2132 Elf_Internal_Rela *rel,
2133 struct elf_link_hash_entry *h,
2134 Elf_Internal_Sym *sym)
2135 {
2136 if (h != NULL)
2137 switch (ELF32_R_TYPE (rel->r_info))
2138 {
2139 case R_X86_64_GNU_VTINHERIT:
2140 case R_X86_64_GNU_VTENTRY:
2141 return NULL;
2142 }
2143
2144 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2145 }
2146
2147 /* Update the got entry reference counts for the section being removed. */
2148
2149 static bfd_boolean
2150 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2151 asection *sec,
2152 const Elf_Internal_Rela *relocs)
2153 {
2154 struct elf_x86_64_link_hash_table *htab;
2155 Elf_Internal_Shdr *symtab_hdr;
2156 struct elf_link_hash_entry **sym_hashes;
2157 bfd_signed_vma *local_got_refcounts;
2158 const Elf_Internal_Rela *rel, *relend;
2159
2160 if (info->relocatable)
2161 return TRUE;
2162
2163 htab = elf_x86_64_hash_table (info);
2164 if (htab == NULL)
2165 return FALSE;
2166
2167 elf_section_data (sec)->local_dynrel = NULL;
2168
2169 symtab_hdr = &elf_symtab_hdr (abfd);
2170 sym_hashes = elf_sym_hashes (abfd);
2171 local_got_refcounts = elf_local_got_refcounts (abfd);
2172
2173 htab = elf_x86_64_hash_table (info);
2174 relend = relocs + sec->reloc_count;
2175 for (rel = relocs; rel < relend; rel++)
2176 {
2177 unsigned long r_symndx;
2178 unsigned int r_type;
2179 struct elf_link_hash_entry *h = NULL;
2180
2181 r_symndx = htab->r_sym (rel->r_info);
2182 if (r_symndx >= symtab_hdr->sh_info)
2183 {
2184 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2185 while (h->root.type == bfd_link_hash_indirect
2186 || h->root.type == bfd_link_hash_warning)
2187 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2188 }
2189 else
2190 {
2191 /* A local symbol. */
2192 Elf_Internal_Sym *isym;
2193
2194 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2195 abfd, r_symndx);
2196
2197 /* Check relocation against local STT_GNU_IFUNC symbol. */
2198 if (isym != NULL
2199 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2200 {
2201 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2202 if (h == NULL)
2203 abort ();
2204 }
2205 }
2206
2207 if (h)
2208 {
2209 struct elf_x86_64_link_hash_entry *eh;
2210 struct elf_dyn_relocs **pp;
2211 struct elf_dyn_relocs *p;
2212
2213 eh = (struct elf_x86_64_link_hash_entry *) h;
2214
2215 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2216 if (p->sec == sec)
2217 {
2218 /* Everything must go for SEC. */
2219 *pp = p->next;
2220 break;
2221 }
2222 }
2223
2224 r_type = ELF32_R_TYPE (rel->r_info);
2225 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2226 symtab_hdr, sym_hashes,
2227 &r_type, GOT_UNKNOWN,
2228 rel, relend, h, r_symndx))
2229 return FALSE;
2230
2231 switch (r_type)
2232 {
2233 case R_X86_64_TLSLD:
2234 if (htab->tls_ld_got.refcount > 0)
2235 htab->tls_ld_got.refcount -= 1;
2236 break;
2237
2238 case R_X86_64_TLSGD:
2239 case R_X86_64_GOTPC32_TLSDESC:
2240 case R_X86_64_TLSDESC_CALL:
2241 case R_X86_64_GOTTPOFF:
2242 case R_X86_64_GOT32:
2243 case R_X86_64_GOTPCREL:
2244 case R_X86_64_GOT64:
2245 case R_X86_64_GOTPCREL64:
2246 case R_X86_64_GOTPLT64:
2247 if (h != NULL)
2248 {
2249 if (h->got.refcount > 0)
2250 h->got.refcount -= 1;
2251 if (h->type == STT_GNU_IFUNC)
2252 {
2253 if (h->plt.refcount > 0)
2254 h->plt.refcount -= 1;
2255 }
2256 }
2257 else if (local_got_refcounts != NULL)
2258 {
2259 if (local_got_refcounts[r_symndx] > 0)
2260 local_got_refcounts[r_symndx] -= 1;
2261 }
2262 break;
2263
2264 case R_X86_64_8:
2265 case R_X86_64_16:
2266 case R_X86_64_32:
2267 case R_X86_64_64:
2268 case R_X86_64_32S:
2269 case R_X86_64_PC8:
2270 case R_X86_64_PC16:
2271 case R_X86_64_PC32:
2272 case R_X86_64_PC32_BND:
2273 case R_X86_64_PC64:
2274 case R_X86_64_SIZE32:
2275 case R_X86_64_SIZE64:
2276 if (info->shared
2277 && (h == NULL || h->type != STT_GNU_IFUNC))
2278 break;
2279 /* Fall thru */
2280
2281 case R_X86_64_PLT32:
2282 case R_X86_64_PLT32_BND:
2283 case R_X86_64_PLTOFF64:
2284 if (h != NULL)
2285 {
2286 if (h->plt.refcount > 0)
2287 h->plt.refcount -= 1;
2288 }
2289 break;
2290
2291 default:
2292 break;
2293 }
2294 }
2295
2296 return TRUE;
2297 }
2298
2299 /* Adjust a symbol defined by a dynamic object and referenced by a
2300 regular object. The current definition is in some section of the
2301 dynamic object, but we're not including those sections. We have to
2302 change the definition to something the rest of the link can
2303 understand. */
2304
2305 static bfd_boolean
2306 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2307 struct elf_link_hash_entry *h)
2308 {
2309 struct elf_x86_64_link_hash_table *htab;
2310 asection *s;
2311 struct elf_x86_64_link_hash_entry *eh;
2312 struct elf_dyn_relocs *p;
2313
2314 /* STT_GNU_IFUNC symbol must go through PLT. */
2315 if (h->type == STT_GNU_IFUNC)
2316 {
2317 /* All local STT_GNU_IFUNC references must be treate as local
2318 calls via local PLT. */
2319 if (h->ref_regular
2320 && SYMBOL_CALLS_LOCAL (info, h))
2321 {
2322 bfd_size_type pc_count = 0, count = 0;
2323 struct elf_dyn_relocs **pp;
2324
2325 eh = (struct elf_x86_64_link_hash_entry *) h;
2326 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2327 {
2328 pc_count += p->pc_count;
2329 p->count -= p->pc_count;
2330 p->pc_count = 0;
2331 count += p->count;
2332 if (p->count == 0)
2333 *pp = p->next;
2334 else
2335 pp = &p->next;
2336 }
2337
2338 if (pc_count || count)
2339 {
2340 h->needs_plt = 1;
2341 h->non_got_ref = 1;
2342 if (h->plt.refcount <= 0)
2343 h->plt.refcount = 1;
2344 else
2345 h->plt.refcount += 1;
2346 }
2347 }
2348
2349 if (h->plt.refcount <= 0)
2350 {
2351 h->plt.offset = (bfd_vma) -1;
2352 h->needs_plt = 0;
2353 }
2354 return TRUE;
2355 }
2356
2357 /* If this is a function, put it in the procedure linkage table. We
2358 will fill in the contents of the procedure linkage table later,
2359 when we know the address of the .got section. */
2360 if (h->type == STT_FUNC
2361 || h->needs_plt)
2362 {
2363 if (h->plt.refcount <= 0
2364 || SYMBOL_CALLS_LOCAL (info, h)
2365 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2366 && h->root.type == bfd_link_hash_undefweak))
2367 {
2368 /* This case can occur if we saw a PLT32 reloc in an input
2369 file, but the symbol was never referred to by a dynamic
2370 object, or if all references were garbage collected. In
2371 such a case, we don't actually need to build a procedure
2372 linkage table, and we can just do a PC32 reloc instead. */
2373 h->plt.offset = (bfd_vma) -1;
2374 h->needs_plt = 0;
2375 }
2376
2377 return TRUE;
2378 }
2379 else
2380 /* It's possible that we incorrectly decided a .plt reloc was
2381 needed for an R_X86_64_PC32 reloc to a non-function sym in
2382 check_relocs. We can't decide accurately between function and
2383 non-function syms in check-relocs; Objects loaded later in
2384 the link may change h->type. So fix it now. */
2385 h->plt.offset = (bfd_vma) -1;
2386
2387 /* If this is a weak symbol, and there is a real definition, the
2388 processor independent code will have arranged for us to see the
2389 real definition first, and we can just use the same value. */
2390 if (h->u.weakdef != NULL)
2391 {
2392 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2393 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2394 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2395 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2396 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2397 {
2398 eh = (struct elf_x86_64_link_hash_entry *) h;
2399 h->non_got_ref = h->u.weakdef->non_got_ref;
2400 eh->needs_copy = h->u.weakdef->needs_copy;
2401 }
2402 return TRUE;
2403 }
2404
2405 /* This is a reference to a symbol defined by a dynamic object which
2406 is not a function. */
2407
2408 /* If we are creating a shared library, we must presume that the
2409 only references to the symbol are via the global offset table.
2410 For such cases we need not do anything here; the relocations will
2411 be handled correctly by relocate_section. */
2412 if (!info->executable)
2413 return TRUE;
2414
2415 /* If there are no references to this symbol that do not use the
2416 GOT, we don't need to generate a copy reloc. */
2417 if (!h->non_got_ref)
2418 return TRUE;
2419
2420 /* If -z nocopyreloc was given, we won't generate them either. */
2421 if (info->nocopyreloc)
2422 {
2423 h->non_got_ref = 0;
2424 return TRUE;
2425 }
2426
2427 if (ELIMINATE_COPY_RELOCS)
2428 {
2429 eh = (struct elf_x86_64_link_hash_entry *) h;
2430 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2431 {
2432 s = p->sec->output_section;
2433 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2434 break;
2435 }
2436
2437 /* If we didn't find any dynamic relocs in read-only sections, then
2438 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2439 if (p == NULL)
2440 {
2441 h->non_got_ref = 0;
2442 return TRUE;
2443 }
2444 }
2445
2446 /* We must allocate the symbol in our .dynbss section, which will
2447 become part of the .bss section of the executable. There will be
2448 an entry for this symbol in the .dynsym section. The dynamic
2449 object will contain position independent code, so all references
2450 from the dynamic object to this symbol will go through the global
2451 offset table. The dynamic linker will use the .dynsym entry to
2452 determine the address it must put in the global offset table, so
2453 both the dynamic object and the regular object will refer to the
2454 same memory location for the variable. */
2455
2456 htab = elf_x86_64_hash_table (info);
2457 if (htab == NULL)
2458 return FALSE;
2459
2460 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2461 to copy the initial value out of the dynamic object and into the
2462 runtime process image. */
2463 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2464 {
2465 const struct elf_backend_data *bed;
2466 bed = get_elf_backend_data (info->output_bfd);
2467 htab->srelbss->size += bed->s->sizeof_rela;
2468 h->needs_copy = 1;
2469 }
2470
2471 s = htab->sdynbss;
2472
2473 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2474 }
2475
2476 /* Allocate space in .plt, .got and associated reloc sections for
2477 dynamic relocs. */
2478
2479 static bfd_boolean
2480 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2481 {
2482 struct bfd_link_info *info;
2483 struct elf_x86_64_link_hash_table *htab;
2484 struct elf_x86_64_link_hash_entry *eh;
2485 struct elf_dyn_relocs *p;
2486 const struct elf_backend_data *bed;
2487 unsigned int plt_entry_size;
2488
2489 if (h->root.type == bfd_link_hash_indirect)
2490 return TRUE;
2491
2492 eh = (struct elf_x86_64_link_hash_entry *) h;
2493
2494 info = (struct bfd_link_info *) inf;
2495 htab = elf_x86_64_hash_table (info);
2496 if (htab == NULL)
2497 return FALSE;
2498 bed = get_elf_backend_data (info->output_bfd);
2499 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2500
2501 /* We can't use the GOT PLT if pointer equality is needed since
2502 finish_dynamic_symbol won't clear symbol value and the dynamic
2503 linker won't update the GOT slot. We will get into an infinite
2504 loop at run-time. */
2505 if (htab->plt_got != NULL
2506 && h->type != STT_GNU_IFUNC
2507 && !h->pointer_equality_needed
2508 && h->plt.refcount > 0
2509 && h->got.refcount > 0)
2510 {
2511 /* Don't use the regular PLT if there are both GOT and GOTPLT
2512 reloctions. */
2513 h->plt.offset = (bfd_vma) -1;
2514
2515 /* Use the GOT PLT. */
2516 eh->plt_got.refcount = 1;
2517 }
2518
2519 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2520 here if it is defined and referenced in a non-shared object. */
2521 if (h->type == STT_GNU_IFUNC
2522 && h->def_regular)
2523 {
2524 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2525 &eh->dyn_relocs,
2526 plt_entry_size,
2527 plt_entry_size,
2528 GOT_ENTRY_SIZE))
2529 {
2530 asection *s = htab->plt_bnd;
2531 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2532 {
2533 /* Use the .plt.bnd section if it is created. */
2534 eh->plt_bnd.offset = s->size;
2535
2536 /* Make room for this entry in the .plt.bnd section. */
2537 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2538 }
2539
2540 return TRUE;
2541 }
2542 else
2543 return FALSE;
2544 }
2545 else if (htab->elf.dynamic_sections_created
2546 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0))
2547 {
2548 bfd_boolean use_plt_got;
2549
2550 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2551 {
2552 /* Don't use the regular PLT for DF_BIND_NOW. */
2553 h->plt.offset = (bfd_vma) -1;
2554
2555 /* Use the GOT PLT. */
2556 h->got.refcount = 1;
2557 eh->plt_got.refcount = 1;
2558 }
2559
2560 use_plt_got = eh->plt_got.refcount > 0;
2561
2562 /* Make sure this symbol is output as a dynamic symbol.
2563 Undefined weak syms won't yet be marked as dynamic. */
2564 if (h->dynindx == -1
2565 && !h->forced_local)
2566 {
2567 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2568 return FALSE;
2569 }
2570
2571 if (info->shared
2572 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2573 {
2574 asection *s = htab->elf.splt;
2575 asection *bnd_s = htab->plt_bnd;
2576 asection *got_s = htab->plt_got;
2577
2578 /* If this is the first .plt entry, make room for the special
2579 first entry. The .plt section is used by prelink to undo
2580 prelinking for dynamic relocations. */
2581 if (s->size == 0)
2582 s->size = plt_entry_size;
2583
2584 if (use_plt_got)
2585 eh->plt_got.offset = got_s->size;
2586 else
2587 {
2588 h->plt.offset = s->size;
2589 if (bnd_s)
2590 eh->plt_bnd.offset = bnd_s->size;
2591 }
2592
2593 /* If this symbol is not defined in a regular file, and we are
2594 not generating a shared library, then set the symbol to this
2595 location in the .plt. This is required to make function
2596 pointers compare as equal between the normal executable and
2597 the shared library. */
2598 if (! info->shared
2599 && !h->def_regular)
2600 {
2601 if (use_plt_got)
2602 {
2603 /* We need to make a call to the entry of the GOT PLT
2604 instead of regular PLT entry. */
2605 h->root.u.def.section = got_s;
2606 h->root.u.def.value = eh->plt_got.offset;
2607 }
2608 else
2609 {
2610 if (bnd_s)
2611 {
2612 /* We need to make a call to the entry of the second
2613 PLT instead of regular PLT entry. */
2614 h->root.u.def.section = bnd_s;
2615 h->root.u.def.value = eh->plt_bnd.offset;
2616 }
2617 else
2618 {
2619 h->root.u.def.section = s;
2620 h->root.u.def.value = h->plt.offset;
2621 }
2622 }
2623 }
2624
2625 /* Make room for this entry. */
2626 if (use_plt_got)
2627 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2628 else
2629 {
2630 s->size += plt_entry_size;
2631 if (bnd_s)
2632 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2633
2634 /* We also need to make an entry in the .got.plt section,
2635 which will be placed in the .got section by the linker
2636 script. */
2637 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2638
2639 /* We also need to make an entry in the .rela.plt
2640 section. */
2641 htab->elf.srelplt->size += bed->s->sizeof_rela;
2642 htab->elf.srelplt->reloc_count++;
2643 }
2644 }
2645 else
2646 {
2647 h->plt.offset = (bfd_vma) -1;
2648 h->needs_plt = 0;
2649 }
2650 }
2651 else
2652 {
2653 h->plt.offset = (bfd_vma) -1;
2654 h->needs_plt = 0;
2655 }
2656
2657 eh->tlsdesc_got = (bfd_vma) -1;
2658
2659 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2660 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2661 if (h->got.refcount > 0
2662 && info->executable
2663 && h->dynindx == -1
2664 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2665 {
2666 h->got.offset = (bfd_vma) -1;
2667 }
2668 else if (h->got.refcount > 0)
2669 {
2670 asection *s;
2671 bfd_boolean dyn;
2672 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2673
2674 /* Make sure this symbol is output as a dynamic symbol.
2675 Undefined weak syms won't yet be marked as dynamic. */
2676 if (h->dynindx == -1
2677 && !h->forced_local)
2678 {
2679 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2680 return FALSE;
2681 }
2682
2683 if (GOT_TLS_GDESC_P (tls_type))
2684 {
2685 eh->tlsdesc_got = htab->elf.sgotplt->size
2686 - elf_x86_64_compute_jump_table_size (htab);
2687 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2688 h->got.offset = (bfd_vma) -2;
2689 }
2690 if (! GOT_TLS_GDESC_P (tls_type)
2691 || GOT_TLS_GD_P (tls_type))
2692 {
2693 s = htab->elf.sgot;
2694 h->got.offset = s->size;
2695 s->size += GOT_ENTRY_SIZE;
2696 if (GOT_TLS_GD_P (tls_type))
2697 s->size += GOT_ENTRY_SIZE;
2698 }
2699 dyn = htab->elf.dynamic_sections_created;
2700 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2701 and two if global.
2702 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2703 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2704 || tls_type == GOT_TLS_IE)
2705 htab->elf.srelgot->size += bed->s->sizeof_rela;
2706 else if (GOT_TLS_GD_P (tls_type))
2707 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2708 else if (! GOT_TLS_GDESC_P (tls_type)
2709 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2710 || h->root.type != bfd_link_hash_undefweak)
2711 && (info->shared
2712 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2713 htab->elf.srelgot->size += bed->s->sizeof_rela;
2714 if (GOT_TLS_GDESC_P (tls_type))
2715 {
2716 htab->elf.srelplt->size += bed->s->sizeof_rela;
2717 htab->tlsdesc_plt = (bfd_vma) -1;
2718 }
2719 }
2720 else
2721 h->got.offset = (bfd_vma) -1;
2722
2723 if (eh->dyn_relocs == NULL)
2724 return TRUE;
2725
2726 /* In the shared -Bsymbolic case, discard space allocated for
2727 dynamic pc-relative relocs against symbols which turn out to be
2728 defined in regular objects. For the normal shared case, discard
2729 space for pc-relative relocs that have become local due to symbol
2730 visibility changes. */
2731
2732 if (info->shared)
2733 {
2734 /* Relocs that use pc_count are those that appear on a call
2735 insn, or certain REL relocs that can generated via assembly.
2736 We want calls to protected symbols to resolve directly to the
2737 function rather than going via the plt. If people want
2738 function pointer comparisons to work as expected then they
2739 should avoid writing weird assembly. */
2740 if (SYMBOL_CALLS_LOCAL (info, h))
2741 {
2742 struct elf_dyn_relocs **pp;
2743
2744 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2745 {
2746 p->count -= p->pc_count;
2747 p->pc_count = 0;
2748 if (p->count == 0)
2749 *pp = p->next;
2750 else
2751 pp = &p->next;
2752 }
2753 }
2754
2755 /* Also discard relocs on undefined weak syms with non-default
2756 visibility. */
2757 if (eh->dyn_relocs != NULL)
2758 {
2759 if (h->root.type == bfd_link_hash_undefweak)
2760 {
2761 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2762 eh->dyn_relocs = NULL;
2763
2764 /* Make sure undefined weak symbols are output as a dynamic
2765 symbol in PIEs. */
2766 else if (h->dynindx == -1
2767 && ! h->forced_local
2768 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2769 return FALSE;
2770 }
2771 /* For PIE, discard space for pc-relative relocs against
2772 symbols which turn out to need copy relocs. */
2773 else if (info->executable
2774 && (h->needs_copy || eh->needs_copy)
2775 && h->def_dynamic
2776 && !h->def_regular)
2777 {
2778 struct elf_dyn_relocs **pp;
2779
2780 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2781 {
2782 if (p->pc_count != 0)
2783 *pp = p->next;
2784 else
2785 pp = &p->next;
2786 }
2787 }
2788 }
2789 }
2790 else if (ELIMINATE_COPY_RELOCS)
2791 {
2792 /* For the non-shared case, discard space for relocs against
2793 symbols which turn out to need copy relocs or are not
2794 dynamic. */
2795
2796 if (!h->non_got_ref
2797 && ((h->def_dynamic
2798 && !h->def_regular)
2799 || (htab->elf.dynamic_sections_created
2800 && (h->root.type == bfd_link_hash_undefweak
2801 || h->root.type == bfd_link_hash_undefined))))
2802 {
2803 /* Make sure this symbol is output as a dynamic symbol.
2804 Undefined weak syms won't yet be marked as dynamic. */
2805 if (h->dynindx == -1
2806 && ! h->forced_local
2807 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2808 return FALSE;
2809
2810 /* If that succeeded, we know we'll be keeping all the
2811 relocs. */
2812 if (h->dynindx != -1)
2813 goto keep;
2814 }
2815
2816 eh->dyn_relocs = NULL;
2817
2818 keep: ;
2819 }
2820
2821 /* Finally, allocate space. */
2822 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2823 {
2824 asection * sreloc;
2825
2826 sreloc = elf_section_data (p->sec)->sreloc;
2827
2828 BFD_ASSERT (sreloc != NULL);
2829
2830 sreloc->size += p->count * bed->s->sizeof_rela;
2831 }
2832
2833 return TRUE;
2834 }
2835
2836 /* Allocate space in .plt, .got and associated reloc sections for
2837 local dynamic relocs. */
2838
2839 static bfd_boolean
2840 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2841 {
2842 struct elf_link_hash_entry *h
2843 = (struct elf_link_hash_entry *) *slot;
2844
2845 if (h->type != STT_GNU_IFUNC
2846 || !h->def_regular
2847 || !h->ref_regular
2848 || !h->forced_local
2849 || h->root.type != bfd_link_hash_defined)
2850 abort ();
2851
2852 return elf_x86_64_allocate_dynrelocs (h, inf);
2853 }
2854
2855 /* Find any dynamic relocs that apply to read-only sections. */
2856
2857 static bfd_boolean
2858 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2859 void * inf)
2860 {
2861 struct elf_x86_64_link_hash_entry *eh;
2862 struct elf_dyn_relocs *p;
2863
2864 /* Skip local IFUNC symbols. */
2865 if (h->forced_local && h->type == STT_GNU_IFUNC)
2866 return TRUE;
2867
2868 eh = (struct elf_x86_64_link_hash_entry *) h;
2869 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2870 {
2871 asection *s = p->sec->output_section;
2872
2873 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2874 {
2875 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2876
2877 info->flags |= DF_TEXTREL;
2878
2879 if ((info->warn_shared_textrel && info->shared)
2880 || info->error_textrel)
2881 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
2882 p->sec->owner, h->root.root.string,
2883 p->sec);
2884
2885 /* Not an error, just cut short the traversal. */
2886 return FALSE;
2887 }
2888 }
2889 return TRUE;
2890 }
2891
2892 /* Convert
2893 mov foo@GOTPCREL(%rip), %reg
2894 to
2895 lea foo(%rip), %reg
2896 with the local symbol, foo. */
2897
2898 static bfd_boolean
2899 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2900 struct bfd_link_info *link_info)
2901 {
2902 Elf_Internal_Shdr *symtab_hdr;
2903 Elf_Internal_Rela *internal_relocs;
2904 Elf_Internal_Rela *irel, *irelend;
2905 bfd_byte *contents;
2906 struct elf_x86_64_link_hash_table *htab;
2907 bfd_boolean changed_contents;
2908 bfd_boolean changed_relocs;
2909 bfd_signed_vma *local_got_refcounts;
2910 bfd_vma maxpagesize;
2911
2912 /* Don't even try to convert non-ELF outputs. */
2913 if (!is_elf_hash_table (link_info->hash))
2914 return FALSE;
2915
2916 /* Nothing to do if there is no need or no output. */
2917 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2918 || sec->need_convert_mov_to_lea == 0
2919 || bfd_is_abs_section (sec->output_section))
2920 return TRUE;
2921
2922 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2923
2924 /* Load the relocations for this section. */
2925 internal_relocs = (_bfd_elf_link_read_relocs
2926 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2927 link_info->keep_memory));
2928 if (internal_relocs == NULL)
2929 return FALSE;
2930
2931 htab = elf_x86_64_hash_table (link_info);
2932 changed_contents = FALSE;
2933 changed_relocs = FALSE;
2934 local_got_refcounts = elf_local_got_refcounts (abfd);
2935 maxpagesize = get_elf_backend_data (abfd)->maxpagesize;
2936
2937 /* Get the section contents. */
2938 if (elf_section_data (sec)->this_hdr.contents != NULL)
2939 contents = elf_section_data (sec)->this_hdr.contents;
2940 else
2941 {
2942 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2943 goto error_return;
2944 }
2945
2946 irelend = internal_relocs + sec->reloc_count;
2947 for (irel = internal_relocs; irel < irelend; irel++)
2948 {
2949 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2950 unsigned int r_symndx = htab->r_sym (irel->r_info);
2951 unsigned int indx;
2952 struct elf_link_hash_entry *h;
2953 asection *tsec;
2954 char symtype;
2955 bfd_vma toff, roff;
2956 enum {
2957 none, local, global
2958 } convert_mov_to_lea;
2959 unsigned int opcode;
2960
2961 if (r_type != R_X86_64_GOTPCREL)
2962 continue;
2963
2964 roff = irel->r_offset;
2965
2966 if (roff < 2)
2967 continue;
2968
2969 opcode = bfd_get_8 (abfd, contents + roff - 2);
2970
2971 /* PR ld/18591: Don't convert R_X86_64_GOTPCREL relocation if it
2972 isn't for mov instruction. */
2973 if (opcode != 0x8b)
2974 continue;
2975
2976 tsec = NULL;
2977 convert_mov_to_lea = none;
2978
2979 /* Get the symbol referred to by the reloc. */
2980 if (r_symndx < symtab_hdr->sh_info)
2981 {
2982 Elf_Internal_Sym *isym;
2983
2984 /* Silence older GCC warning. */
2985 h = NULL;
2986
2987 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2988 abfd, r_symndx);
2989
2990 symtype = ELF_ST_TYPE (isym->st_info);
2991
2992 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation and
2993 skip relocation against undefined symbols. */
2994 if (symtype != STT_GNU_IFUNC && isym->st_shndx != SHN_UNDEF)
2995 {
2996 if (isym->st_shndx == SHN_ABS)
2997 tsec = bfd_abs_section_ptr;
2998 else if (isym->st_shndx == SHN_COMMON)
2999 tsec = bfd_com_section_ptr;
3000 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
3001 tsec = &_bfd_elf_large_com_section;
3002 else
3003 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3004
3005 toff = isym->st_value;
3006 convert_mov_to_lea = local;
3007 }
3008 }
3009 else
3010 {
3011 indx = r_symndx - symtab_hdr->sh_info;
3012 h = elf_sym_hashes (abfd)[indx];
3013 BFD_ASSERT (h != NULL);
3014
3015 while (h->root.type == bfd_link_hash_indirect
3016 || h->root.type == bfd_link_hash_warning)
3017 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3018
3019 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
3020 avoid optimizing _DYNAMIC since ld.so may use its link-time
3021 address. */
3022 if (h->def_regular
3023 && h->type != STT_GNU_IFUNC
3024 && h != htab->elf.hdynamic
3025 && SYMBOL_REFERENCES_LOCAL (link_info, h))
3026 {
3027 tsec = h->root.u.def.section;
3028 toff = h->root.u.def.value;
3029 symtype = h->type;
3030 convert_mov_to_lea = global;
3031 }
3032 }
3033
3034 if (convert_mov_to_lea == none)
3035 continue;
3036
3037 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
3038 {
3039 /* At this stage in linking, no SEC_MERGE symbol has been
3040 adjusted, so all references to such symbols need to be
3041 passed through _bfd_merged_section_offset. (Later, in
3042 relocate_section, all SEC_MERGE symbols *except* for
3043 section symbols have been adjusted.)
3044
3045 gas may reduce relocations against symbols in SEC_MERGE
3046 sections to a relocation against the section symbol when
3047 the original addend was zero. When the reloc is against
3048 a section symbol we should include the addend in the
3049 offset passed to _bfd_merged_section_offset, since the
3050 location of interest is the original symbol. On the
3051 other hand, an access to "sym+addend" where "sym" is not
3052 a section symbol should not include the addend; Such an
3053 access is presumed to be an offset from "sym"; The
3054 location of interest is just "sym". */
3055 if (symtype == STT_SECTION)
3056 toff += irel->r_addend;
3057
3058 toff = _bfd_merged_section_offset (abfd, &tsec,
3059 elf_section_data (tsec)->sec_info,
3060 toff);
3061
3062 if (symtype != STT_SECTION)
3063 toff += irel->r_addend;
3064 }
3065 else
3066 toff += irel->r_addend;
3067
3068 /* Don't convert if R_X86_64_PC32 relocation overflows. */
3069 if (tsec->output_section == sec->output_section)
3070 {
3071 if ((toff - roff + 0x80000000) > 0xffffffff)
3072 continue;
3073 }
3074 else
3075 {
3076 asection *asect;
3077 bfd_size_type size;
3078
3079 /* At this point, we don't know the load addresses of TSEC
3080 section nor SEC section. We estimate the distrance between
3081 SEC and TSEC. */
3082 size = 0;
3083 for (asect = sec->output_section;
3084 asect != NULL && asect != tsec->output_section;
3085 asect = asect->next)
3086 {
3087 asection *i;
3088 for (i = asect->output_section->map_head.s;
3089 i != NULL;
3090 i = i->map_head.s)
3091 {
3092 size = align_power (size, i->alignment_power);
3093 size += i->size;
3094 }
3095 }
3096
3097 /* Don't convert R_X86_64_GOTPCREL if TSEC isn't placed after
3098 SEC. */
3099 if (asect == NULL)
3100 continue;
3101
3102 /* Take PT_GNU_RELRO segment into account by adding
3103 maxpagesize. */
3104 if ((toff + size + maxpagesize - roff + 0x80000000)
3105 > 0xffffffff)
3106 continue;
3107 }
3108
3109 bfd_put_8 (abfd, 0x8d, contents + roff - 2);
3110 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
3111 changed_contents = TRUE;
3112 changed_relocs = TRUE;
3113
3114 if (convert_mov_to_lea == local)
3115 {
3116 if (local_got_refcounts != NULL
3117 && local_got_refcounts[r_symndx] > 0)
3118 local_got_refcounts[r_symndx] -= 1;
3119 }
3120 else
3121 {
3122 if (h->got.refcount > 0)
3123 h->got.refcount -= 1;
3124 }
3125 }
3126
3127 if (contents != NULL
3128 && elf_section_data (sec)->this_hdr.contents != contents)
3129 {
3130 if (!changed_contents && !link_info->keep_memory)
3131 free (contents);
3132 else
3133 {
3134 /* Cache the section contents for elf_link_input_bfd. */
3135 elf_section_data (sec)->this_hdr.contents = contents;
3136 }
3137 }
3138
3139 if (elf_section_data (sec)->relocs != internal_relocs)
3140 {
3141 if (!changed_relocs)
3142 free (internal_relocs);
3143 else
3144 elf_section_data (sec)->relocs = internal_relocs;
3145 }
3146
3147 return TRUE;
3148
3149 error_return:
3150 if (contents != NULL
3151 && elf_section_data (sec)->this_hdr.contents != contents)
3152 free (contents);
3153 if (internal_relocs != NULL
3154 && elf_section_data (sec)->relocs != internal_relocs)
3155 free (internal_relocs);
3156 return FALSE;
3157 }
3158
3159 /* Set the sizes of the dynamic sections. */
3160
3161 static bfd_boolean
3162 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3163 struct bfd_link_info *info)
3164 {
3165 struct elf_x86_64_link_hash_table *htab;
3166 bfd *dynobj;
3167 asection *s;
3168 bfd_boolean relocs;
3169 bfd *ibfd;
3170 const struct elf_backend_data *bed;
3171
3172 htab = elf_x86_64_hash_table (info);
3173 if (htab == NULL)
3174 return FALSE;
3175 bed = get_elf_backend_data (output_bfd);
3176
3177 dynobj = htab->elf.dynobj;
3178 if (dynobj == NULL)
3179 abort ();
3180
3181 if (htab->elf.dynamic_sections_created)
3182 {
3183 /* Set the contents of the .interp section to the interpreter. */
3184 if (info->executable)
3185 {
3186 s = bfd_get_linker_section (dynobj, ".interp");
3187 if (s == NULL)
3188 abort ();
3189 s->size = htab->dynamic_interpreter_size;
3190 s->contents = (unsigned char *) htab->dynamic_interpreter;
3191 }
3192 }
3193
3194 /* Set up .got offsets for local syms, and space for local dynamic
3195 relocs. */
3196 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3197 {
3198 bfd_signed_vma *local_got;
3199 bfd_signed_vma *end_local_got;
3200 char *local_tls_type;
3201 bfd_vma *local_tlsdesc_gotent;
3202 bfd_size_type locsymcount;
3203 Elf_Internal_Shdr *symtab_hdr;
3204 asection *srel;
3205
3206 if (! is_x86_64_elf (ibfd))
3207 continue;
3208
3209 for (s = ibfd->sections; s != NULL; s = s->next)
3210 {
3211 struct elf_dyn_relocs *p;
3212
3213 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3214 return FALSE;
3215
3216 for (p = (struct elf_dyn_relocs *)
3217 (elf_section_data (s)->local_dynrel);
3218 p != NULL;
3219 p = p->next)
3220 {
3221 if (!bfd_is_abs_section (p->sec)
3222 && bfd_is_abs_section (p->sec->output_section))
3223 {
3224 /* Input section has been discarded, either because
3225 it is a copy of a linkonce section or due to
3226 linker script /DISCARD/, so we'll be discarding
3227 the relocs too. */
3228 }
3229 else if (p->count != 0)
3230 {
3231 srel = elf_section_data (p->sec)->sreloc;
3232 srel->size += p->count * bed->s->sizeof_rela;
3233 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3234 && (info->flags & DF_TEXTREL) == 0)
3235 {
3236 info->flags |= DF_TEXTREL;
3237 if ((info->warn_shared_textrel && info->shared)
3238 || info->error_textrel)
3239 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3240 p->sec->owner, p->sec);
3241 }
3242 }
3243 }
3244 }
3245
3246 local_got = elf_local_got_refcounts (ibfd);
3247 if (!local_got)
3248 continue;
3249
3250 symtab_hdr = &elf_symtab_hdr (ibfd);
3251 locsymcount = symtab_hdr->sh_info;
3252 end_local_got = local_got + locsymcount;
3253 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3254 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3255 s = htab->elf.sgot;
3256 srel = htab->elf.srelgot;
3257 for (; local_got < end_local_got;
3258 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3259 {
3260 *local_tlsdesc_gotent = (bfd_vma) -1;
3261 if (*local_got > 0)
3262 {
3263 if (GOT_TLS_GDESC_P (*local_tls_type))
3264 {
3265 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3266 - elf_x86_64_compute_jump_table_size (htab);
3267 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3268 *local_got = (bfd_vma) -2;
3269 }
3270 if (! GOT_TLS_GDESC_P (*local_tls_type)
3271 || GOT_TLS_GD_P (*local_tls_type))
3272 {
3273 *local_got = s->size;
3274 s->size += GOT_ENTRY_SIZE;
3275 if (GOT_TLS_GD_P (*local_tls_type))
3276 s->size += GOT_ENTRY_SIZE;
3277 }
3278 if (info->shared
3279 || GOT_TLS_GD_ANY_P (*local_tls_type)
3280 || *local_tls_type == GOT_TLS_IE)
3281 {
3282 if (GOT_TLS_GDESC_P (*local_tls_type))
3283 {
3284 htab->elf.srelplt->size
3285 += bed->s->sizeof_rela;
3286 htab->tlsdesc_plt = (bfd_vma) -1;
3287 }
3288 if (! GOT_TLS_GDESC_P (*local_tls_type)
3289 || GOT_TLS_GD_P (*local_tls_type))
3290 srel->size += bed->s->sizeof_rela;
3291 }
3292 }
3293 else
3294 *local_got = (bfd_vma) -1;
3295 }
3296 }
3297
3298 if (htab->tls_ld_got.refcount > 0)
3299 {
3300 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3301 relocs. */
3302 htab->tls_ld_got.offset = htab->elf.sgot->size;
3303 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3304 htab->elf.srelgot->size += bed->s->sizeof_rela;
3305 }
3306 else
3307 htab->tls_ld_got.offset = -1;
3308
3309 /* Allocate global sym .plt and .got entries, and space for global
3310 sym dynamic relocs. */
3311 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3312 info);
3313
3314 /* Allocate .plt and .got entries, and space for local symbols. */
3315 htab_traverse (htab->loc_hash_table,
3316 elf_x86_64_allocate_local_dynrelocs,
3317 info);
3318
3319 /* For every jump slot reserved in the sgotplt, reloc_count is
3320 incremented. However, when we reserve space for TLS descriptors,
3321 it's not incremented, so in order to compute the space reserved
3322 for them, it suffices to multiply the reloc count by the jump
3323 slot size.
3324
3325 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3326 so that R_X86_64_IRELATIVE entries come last. */
3327 if (htab->elf.srelplt)
3328 {
3329 htab->sgotplt_jump_table_size
3330 = elf_x86_64_compute_jump_table_size (htab);
3331 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3332 }
3333 else if (htab->elf.irelplt)
3334 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3335
3336 if (htab->tlsdesc_plt)
3337 {
3338 /* If we're not using lazy TLS relocations, don't generate the
3339 PLT and GOT entries they require. */
3340 if ((info->flags & DF_BIND_NOW))
3341 htab->tlsdesc_plt = 0;
3342 else
3343 {
3344 htab->tlsdesc_got = htab->elf.sgot->size;
3345 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3346 /* Reserve room for the initial entry.
3347 FIXME: we could probably do away with it in this case. */
3348 if (htab->elf.splt->size == 0)
3349 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3350 htab->tlsdesc_plt = htab->elf.splt->size;
3351 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3352 }
3353 }
3354
3355 if (htab->elf.sgotplt)
3356 {
3357 /* Don't allocate .got.plt section if there are no GOT nor PLT
3358 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3359 if ((htab->elf.hgot == NULL
3360 || !htab->elf.hgot->ref_regular_nonweak)
3361 && (htab->elf.sgotplt->size
3362 == get_elf_backend_data (output_bfd)->got_header_size)
3363 && (htab->elf.splt == NULL
3364 || htab->elf.splt->size == 0)
3365 && (htab->elf.sgot == NULL
3366 || htab->elf.sgot->size == 0)
3367 && (htab->elf.iplt == NULL
3368 || htab->elf.iplt->size == 0)
3369 && (htab->elf.igotplt == NULL
3370 || htab->elf.igotplt->size == 0))
3371 htab->elf.sgotplt->size = 0;
3372 }
3373
3374 if (htab->plt_eh_frame != NULL
3375 && htab->elf.splt != NULL
3376 && htab->elf.splt->size != 0
3377 && !bfd_is_abs_section (htab->elf.splt->output_section)
3378 && _bfd_elf_eh_frame_present (info))
3379 {
3380 const struct elf_x86_64_backend_data *arch_data
3381 = get_elf_x86_64_arch_data (bed);
3382 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3383 }
3384
3385 /* We now have determined the sizes of the various dynamic sections.
3386 Allocate memory for them. */
3387 relocs = FALSE;
3388 for (s = dynobj->sections; s != NULL; s = s->next)
3389 {
3390 if ((s->flags & SEC_LINKER_CREATED) == 0)
3391 continue;
3392
3393 if (s == htab->elf.splt
3394 || s == htab->elf.sgot
3395 || s == htab->elf.sgotplt
3396 || s == htab->elf.iplt
3397 || s == htab->elf.igotplt
3398 || s == htab->plt_bnd
3399 || s == htab->plt_got
3400 || s == htab->plt_eh_frame
3401 || s == htab->sdynbss)
3402 {
3403 /* Strip this section if we don't need it; see the
3404 comment below. */
3405 }
3406 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3407 {
3408 if (s->size != 0 && s != htab->elf.srelplt)
3409 relocs = TRUE;
3410
3411 /* We use the reloc_count field as a counter if we need
3412 to copy relocs into the output file. */
3413 if (s != htab->elf.srelplt)
3414 s->reloc_count = 0;
3415 }
3416 else
3417 {
3418 /* It's not one of our sections, so don't allocate space. */
3419 continue;
3420 }
3421
3422 if (s->size == 0)
3423 {
3424 /* If we don't need this section, strip it from the
3425 output file. This is mostly to handle .rela.bss and
3426 .rela.plt. We must create both sections in
3427 create_dynamic_sections, because they must be created
3428 before the linker maps input sections to output
3429 sections. The linker does that before
3430 adjust_dynamic_symbol is called, and it is that
3431 function which decides whether anything needs to go
3432 into these sections. */
3433
3434 s->flags |= SEC_EXCLUDE;
3435 continue;
3436 }
3437
3438 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3439 continue;
3440
3441 /* Allocate memory for the section contents. We use bfd_zalloc
3442 here in case unused entries are not reclaimed before the
3443 section's contents are written out. This should not happen,
3444 but this way if it does, we get a R_X86_64_NONE reloc instead
3445 of garbage. */
3446 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3447 if (s->contents == NULL)
3448 return FALSE;
3449 }
3450
3451 if (htab->plt_eh_frame != NULL
3452 && htab->plt_eh_frame->contents != NULL)
3453 {
3454 const struct elf_x86_64_backend_data *arch_data
3455 = get_elf_x86_64_arch_data (bed);
3456
3457 memcpy (htab->plt_eh_frame->contents,
3458 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3459 bfd_put_32 (dynobj, htab->elf.splt->size,
3460 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3461 }
3462
3463 if (htab->elf.dynamic_sections_created)
3464 {
3465 /* Add some entries to the .dynamic section. We fill in the
3466 values later, in elf_x86_64_finish_dynamic_sections, but we
3467 must add the entries now so that we get the correct size for
3468 the .dynamic section. The DT_DEBUG entry is filled in by the
3469 dynamic linker and used by the debugger. */
3470 #define add_dynamic_entry(TAG, VAL) \
3471 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3472
3473 if (info->executable)
3474 {
3475 if (!add_dynamic_entry (DT_DEBUG, 0))
3476 return FALSE;
3477 }
3478
3479 if (htab->elf.splt->size != 0)
3480 {
3481 /* DT_PLTGOT is used by prelink even if there is no PLT
3482 relocation. */
3483 if (!add_dynamic_entry (DT_PLTGOT, 0))
3484 return FALSE;
3485
3486 if (htab->elf.srelplt->size != 0)
3487 {
3488 if (!add_dynamic_entry (DT_PLTRELSZ, 0)
3489 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3490 || !add_dynamic_entry (DT_JMPREL, 0))
3491 return FALSE;
3492 }
3493
3494 if (htab->tlsdesc_plt
3495 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3496 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3497 return FALSE;
3498 }
3499
3500 if (relocs)
3501 {
3502 if (!add_dynamic_entry (DT_RELA, 0)
3503 || !add_dynamic_entry (DT_RELASZ, 0)
3504 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3505 return FALSE;
3506
3507 /* If any dynamic relocs apply to a read-only section,
3508 then we need a DT_TEXTREL entry. */
3509 if ((info->flags & DF_TEXTREL) == 0)
3510 elf_link_hash_traverse (&htab->elf,
3511 elf_x86_64_readonly_dynrelocs,
3512 info);
3513
3514 if ((info->flags & DF_TEXTREL) != 0)
3515 {
3516 if ((elf_tdata (output_bfd)->has_gnu_symbols
3517 & elf_gnu_symbol_ifunc) == elf_gnu_symbol_ifunc)
3518 {
3519 info->callbacks->einfo
3520 (_("%P%X: read-only segment has dynamic IFUNC relocations; recompile with -fPIC\n"));
3521 bfd_set_error (bfd_error_bad_value);
3522 return FALSE;
3523 }
3524
3525 if (!add_dynamic_entry (DT_TEXTREL, 0))
3526 return FALSE;
3527 }
3528 }
3529 }
3530 #undef add_dynamic_entry
3531
3532 return TRUE;
3533 }
3534
3535 static bfd_boolean
3536 elf_x86_64_always_size_sections (bfd *output_bfd,
3537 struct bfd_link_info *info)
3538 {
3539 asection *tls_sec = elf_hash_table (info)->tls_sec;
3540
3541 if (tls_sec)
3542 {
3543 struct elf_link_hash_entry *tlsbase;
3544
3545 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3546 "_TLS_MODULE_BASE_",
3547 FALSE, FALSE, FALSE);
3548
3549 if (tlsbase && tlsbase->type == STT_TLS)
3550 {
3551 struct elf_x86_64_link_hash_table *htab;
3552 struct bfd_link_hash_entry *bh = NULL;
3553 const struct elf_backend_data *bed
3554 = get_elf_backend_data (output_bfd);
3555
3556 htab = elf_x86_64_hash_table (info);
3557 if (htab == NULL)
3558 return FALSE;
3559
3560 if (!(_bfd_generic_link_add_one_symbol
3561 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3562 tls_sec, 0, NULL, FALSE,
3563 bed->collect, &bh)))
3564 return FALSE;
3565
3566 htab->tls_module_base = bh;
3567
3568 tlsbase = (struct elf_link_hash_entry *)bh;
3569 tlsbase->def_regular = 1;
3570 tlsbase->other = STV_HIDDEN;
3571 tlsbase->root.linker_def = 1;
3572 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3573 }
3574 }
3575
3576 return TRUE;
3577 }
3578
3579 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3580 executables. Rather than setting it to the beginning of the TLS
3581 section, we have to set it to the end. This function may be called
3582 multiple times, it is idempotent. */
3583
3584 static void
3585 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3586 {
3587 struct elf_x86_64_link_hash_table *htab;
3588 struct bfd_link_hash_entry *base;
3589
3590 if (!info->executable)
3591 return;
3592
3593 htab = elf_x86_64_hash_table (info);
3594 if (htab == NULL)
3595 return;
3596
3597 base = htab->tls_module_base;
3598 if (base == NULL)
3599 return;
3600
3601 base->u.def.value = htab->elf.tls_size;
3602 }
3603
3604 /* Return the base VMA address which should be subtracted from real addresses
3605 when resolving @dtpoff relocation.
3606 This is PT_TLS segment p_vaddr. */
3607
3608 static bfd_vma
3609 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3610 {
3611 /* If tls_sec is NULL, we should have signalled an error already. */
3612 if (elf_hash_table (info)->tls_sec == NULL)
3613 return 0;
3614 return elf_hash_table (info)->tls_sec->vma;
3615 }
3616
3617 /* Return the relocation value for @tpoff relocation
3618 if STT_TLS virtual address is ADDRESS. */
3619
3620 static bfd_vma
3621 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3622 {
3623 struct elf_link_hash_table *htab = elf_hash_table (info);
3624 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3625 bfd_vma static_tls_size;
3626
3627 /* If tls_segment is NULL, we should have signalled an error already. */
3628 if (htab->tls_sec == NULL)
3629 return 0;
3630
3631 /* Consider special static TLS alignment requirements. */
3632 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3633 return address - static_tls_size - htab->tls_sec->vma;
3634 }
3635
3636 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3637 branch? */
3638
3639 static bfd_boolean
3640 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3641 {
3642 /* Opcode Instruction
3643 0xe8 call
3644 0xe9 jump
3645 0x0f 0x8x conditional jump */
3646 return ((offset > 0
3647 && (contents [offset - 1] == 0xe8
3648 || contents [offset - 1] == 0xe9))
3649 || (offset > 1
3650 && contents [offset - 2] == 0x0f
3651 && (contents [offset - 1] & 0xf0) == 0x80));
3652 }
3653
3654 /* Relocate an x86_64 ELF section. */
3655
3656 static bfd_boolean
3657 elf_x86_64_relocate_section (bfd *output_bfd,
3658 struct bfd_link_info *info,
3659 bfd *input_bfd,
3660 asection *input_section,
3661 bfd_byte *contents,
3662 Elf_Internal_Rela *relocs,
3663 Elf_Internal_Sym *local_syms,
3664 asection **local_sections)
3665 {
3666 struct elf_x86_64_link_hash_table *htab;
3667 Elf_Internal_Shdr *symtab_hdr;
3668 struct elf_link_hash_entry **sym_hashes;
3669 bfd_vma *local_got_offsets;
3670 bfd_vma *local_tlsdesc_gotents;
3671 Elf_Internal_Rela *rel;
3672 Elf_Internal_Rela *relend;
3673 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3674
3675 BFD_ASSERT (is_x86_64_elf (input_bfd));
3676
3677 htab = elf_x86_64_hash_table (info);
3678 if (htab == NULL)
3679 return FALSE;
3680 symtab_hdr = &elf_symtab_hdr (input_bfd);
3681 sym_hashes = elf_sym_hashes (input_bfd);
3682 local_got_offsets = elf_local_got_offsets (input_bfd);
3683 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3684
3685 elf_x86_64_set_tls_module_base (info);
3686
3687 rel = relocs;
3688 relend = relocs + input_section->reloc_count;
3689 for (; rel < relend; rel++)
3690 {
3691 unsigned int r_type;
3692 reloc_howto_type *howto;
3693 unsigned long r_symndx;
3694 struct elf_link_hash_entry *h;
3695 struct elf_x86_64_link_hash_entry *eh;
3696 Elf_Internal_Sym *sym;
3697 asection *sec;
3698 bfd_vma off, offplt, plt_offset;
3699 bfd_vma relocation;
3700 bfd_boolean unresolved_reloc;
3701 bfd_reloc_status_type r;
3702 int tls_type;
3703 asection *base_got, *resolved_plt;
3704 bfd_vma st_size;
3705
3706 r_type = ELF32_R_TYPE (rel->r_info);
3707 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3708 || r_type == (int) R_X86_64_GNU_VTENTRY)
3709 continue;
3710
3711 if (r_type >= (int) R_X86_64_standard)
3712 {
3713 (*_bfd_error_handler)
3714 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3715 input_bfd, input_section, r_type);
3716 bfd_set_error (bfd_error_bad_value);
3717 return FALSE;
3718 }
3719
3720 if (r_type != (int) R_X86_64_32
3721 || ABI_64_P (output_bfd))
3722 howto = x86_64_elf_howto_table + r_type;
3723 else
3724 howto = (x86_64_elf_howto_table
3725 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3726 r_symndx = htab->r_sym (rel->r_info);
3727 h = NULL;
3728 sym = NULL;
3729 sec = NULL;
3730 unresolved_reloc = FALSE;
3731 if (r_symndx < symtab_hdr->sh_info)
3732 {
3733 sym = local_syms + r_symndx;
3734 sec = local_sections[r_symndx];
3735
3736 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3737 &sec, rel);
3738 st_size = sym->st_size;
3739
3740 /* Relocate against local STT_GNU_IFUNC symbol. */
3741 if (!info->relocatable
3742 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3743 {
3744 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3745 rel, FALSE);
3746 if (h == NULL)
3747 abort ();
3748
3749 /* Set STT_GNU_IFUNC symbol value. */
3750 h->root.u.def.value = sym->st_value;
3751 h->root.u.def.section = sec;
3752 }
3753 }
3754 else
3755 {
3756 bfd_boolean warned ATTRIBUTE_UNUSED;
3757 bfd_boolean ignored ATTRIBUTE_UNUSED;
3758
3759 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3760 r_symndx, symtab_hdr, sym_hashes,
3761 h, sec, relocation,
3762 unresolved_reloc, warned, ignored);
3763 st_size = h->size;
3764 }
3765
3766 if (sec != NULL && discarded_section (sec))
3767 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3768 rel, 1, relend, howto, 0, contents);
3769
3770 if (info->relocatable)
3771 continue;
3772
3773 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3774 {
3775 if (r_type == R_X86_64_64)
3776 {
3777 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3778 zero-extend it to 64bit if addend is zero. */
3779 r_type = R_X86_64_32;
3780 memset (contents + rel->r_offset + 4, 0, 4);
3781 }
3782 else if (r_type == R_X86_64_SIZE64)
3783 {
3784 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3785 zero-extend it to 64bit if addend is zero. */
3786 r_type = R_X86_64_SIZE32;
3787 memset (contents + rel->r_offset + 4, 0, 4);
3788 }
3789 }
3790
3791 eh = (struct elf_x86_64_link_hash_entry *) h;
3792
3793 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3794 it here if it is defined in a non-shared object. */
3795 if (h != NULL
3796 && h->type == STT_GNU_IFUNC
3797 && h->def_regular)
3798 {
3799 bfd_vma plt_index;
3800 const char *name;
3801
3802 if ((input_section->flags & SEC_ALLOC) == 0)
3803 {
3804 /* Dynamic relocs are not propagated for SEC_DEBUGGING
3805 sections because such sections are not SEC_ALLOC and
3806 thus ld.so will not process them. */
3807 if ((input_section->flags & SEC_DEBUGGING) != 0)
3808 continue;
3809 abort ();
3810 }
3811 else if (h->plt.offset == (bfd_vma) -1)
3812 abort ();
3813
3814 /* STT_GNU_IFUNC symbol must go through PLT. */
3815 if (htab->elf.splt != NULL)
3816 {
3817 if (htab->plt_bnd != NULL)
3818 {
3819 resolved_plt = htab->plt_bnd;
3820 plt_offset = eh->plt_bnd.offset;
3821 }
3822 else
3823 {
3824 resolved_plt = htab->elf.splt;
3825 plt_offset = h->plt.offset;
3826 }
3827 }
3828 else
3829 {
3830 resolved_plt = htab->elf.iplt;
3831 plt_offset = h->plt.offset;
3832 }
3833
3834 relocation = (resolved_plt->output_section->vma
3835 + resolved_plt->output_offset + plt_offset);
3836
3837 switch (r_type)
3838 {
3839 default:
3840 if (h->root.root.string)
3841 name = h->root.root.string;
3842 else
3843 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3844 NULL);
3845 (*_bfd_error_handler)
3846 (_("%B: relocation %s against STT_GNU_IFUNC "
3847 "symbol `%s' isn't handled by %s"), input_bfd,
3848 x86_64_elf_howto_table[r_type].name,
3849 name, __FUNCTION__);
3850 bfd_set_error (bfd_error_bad_value);
3851 return FALSE;
3852
3853 case R_X86_64_32S:
3854 if (info->shared)
3855 abort ();
3856 goto do_relocation;
3857
3858 case R_X86_64_32:
3859 if (ABI_64_P (output_bfd))
3860 goto do_relocation;
3861 /* FALLTHROUGH */
3862 case R_X86_64_64:
3863 if (rel->r_addend != 0)
3864 {
3865 if (h->root.root.string)
3866 name = h->root.root.string;
3867 else
3868 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3869 sym, NULL);
3870 (*_bfd_error_handler)
3871 (_("%B: relocation %s against STT_GNU_IFUNC "
3872 "symbol `%s' has non-zero addend: %d"),
3873 input_bfd, x86_64_elf_howto_table[r_type].name,
3874 name, rel->r_addend);
3875 bfd_set_error (bfd_error_bad_value);
3876 return FALSE;
3877 }
3878
3879 /* Generate dynamic relcoation only when there is a
3880 non-GOT reference in a shared object. */
3881 if (info->shared && h->non_got_ref)
3882 {
3883 Elf_Internal_Rela outrel;
3884 asection *sreloc;
3885
3886 /* Need a dynamic relocation to get the real function
3887 address. */
3888 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3889 info,
3890 input_section,
3891 rel->r_offset);
3892 if (outrel.r_offset == (bfd_vma) -1
3893 || outrel.r_offset == (bfd_vma) -2)
3894 abort ();
3895
3896 outrel.r_offset += (input_section->output_section->vma
3897 + input_section->output_offset);
3898
3899 if (h->dynindx == -1
3900 || h->forced_local
3901 || info->executable)
3902 {
3903 /* This symbol is resolved locally. */
3904 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3905 outrel.r_addend = (h->root.u.def.value
3906 + h->root.u.def.section->output_section->vma
3907 + h->root.u.def.section->output_offset);
3908 }
3909 else
3910 {
3911 outrel.r_info = htab->r_info (h->dynindx, r_type);
3912 outrel.r_addend = 0;
3913 }
3914
3915 sreloc = htab->elf.irelifunc;
3916 elf_append_rela (output_bfd, sreloc, &outrel);
3917
3918 /* If this reloc is against an external symbol, we
3919 do not want to fiddle with the addend. Otherwise,
3920 we need to include the symbol value so that it
3921 becomes an addend for the dynamic reloc. For an
3922 internal symbol, we have updated addend. */
3923 continue;
3924 }
3925 /* FALLTHROUGH */
3926 case R_X86_64_PC32:
3927 case R_X86_64_PC32_BND:
3928 case R_X86_64_PC64:
3929 case R_X86_64_PLT32:
3930 case R_X86_64_PLT32_BND:
3931 goto do_relocation;
3932
3933 case R_X86_64_GOTPCREL:
3934 case R_X86_64_GOTPCREL64:
3935 base_got = htab->elf.sgot;
3936 off = h->got.offset;
3937
3938 if (base_got == NULL)
3939 abort ();
3940
3941 if (off == (bfd_vma) -1)
3942 {
3943 /* We can't use h->got.offset here to save state, or
3944 even just remember the offset, as finish_dynamic_symbol
3945 would use that as offset into .got. */
3946
3947 if (htab->elf.splt != NULL)
3948 {
3949 plt_index = h->plt.offset / plt_entry_size - 1;
3950 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3951 base_got = htab->elf.sgotplt;
3952 }
3953 else
3954 {
3955 plt_index = h->plt.offset / plt_entry_size;
3956 off = plt_index * GOT_ENTRY_SIZE;
3957 base_got = htab->elf.igotplt;
3958 }
3959
3960 if (h->dynindx == -1
3961 || h->forced_local
3962 || info->symbolic)
3963 {
3964 /* This references the local defitionion. We must
3965 initialize this entry in the global offset table.
3966 Since the offset must always be a multiple of 8,
3967 we use the least significant bit to record
3968 whether we have initialized it already.
3969
3970 When doing a dynamic link, we create a .rela.got
3971 relocation entry to initialize the value. This
3972 is done in the finish_dynamic_symbol routine. */
3973 if ((off & 1) != 0)
3974 off &= ~1;
3975 else
3976 {
3977 bfd_put_64 (output_bfd, relocation,
3978 base_got->contents + off);
3979 /* Note that this is harmless for the GOTPLT64
3980 case, as -1 | 1 still is -1. */
3981 h->got.offset |= 1;
3982 }
3983 }
3984 }
3985
3986 relocation = (base_got->output_section->vma
3987 + base_got->output_offset + off);
3988
3989 goto do_relocation;
3990 }
3991 }
3992
3993 /* When generating a shared object, the relocations handled here are
3994 copied into the output file to be resolved at run time. */
3995 switch (r_type)
3996 {
3997 case R_X86_64_GOT32:
3998 case R_X86_64_GOT64:
3999 /* Relocation is to the entry for this symbol in the global
4000 offset table. */
4001 case R_X86_64_GOTPCREL:
4002 case R_X86_64_GOTPCREL64:
4003 /* Use global offset table entry as symbol value. */
4004 case R_X86_64_GOTPLT64:
4005 /* This is obsolete and treated the the same as GOT64. */
4006 base_got = htab->elf.sgot;
4007
4008 if (htab->elf.sgot == NULL)
4009 abort ();
4010
4011 if (h != NULL)
4012 {
4013 bfd_boolean dyn;
4014
4015 off = h->got.offset;
4016 if (h->needs_plt
4017 && h->plt.offset != (bfd_vma)-1
4018 && off == (bfd_vma)-1)
4019 {
4020 /* We can't use h->got.offset here to save
4021 state, or even just remember the offset, as
4022 finish_dynamic_symbol would use that as offset into
4023 .got. */
4024 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
4025 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4026 base_got = htab->elf.sgotplt;
4027 }
4028
4029 dyn = htab->elf.dynamic_sections_created;
4030
4031 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
4032 || (info->shared
4033 && SYMBOL_REFERENCES_LOCAL (info, h))
4034 || (ELF_ST_VISIBILITY (h->other)
4035 && h->root.type == bfd_link_hash_undefweak))
4036 {
4037 /* This is actually a static link, or it is a -Bsymbolic
4038 link and the symbol is defined locally, or the symbol
4039 was forced to be local because of a version file. We
4040 must initialize this entry in the global offset table.
4041 Since the offset must always be a multiple of 8, we
4042 use the least significant bit to record whether we
4043 have initialized it already.
4044
4045 When doing a dynamic link, we create a .rela.got
4046 relocation entry to initialize the value. This is
4047 done in the finish_dynamic_symbol routine. */
4048 if ((off & 1) != 0)
4049 off &= ~1;
4050 else
4051 {
4052 bfd_put_64 (output_bfd, relocation,
4053 base_got->contents + off);
4054 /* Note that this is harmless for the GOTPLT64 case,
4055 as -1 | 1 still is -1. */
4056 h->got.offset |= 1;
4057 }
4058 }
4059 else
4060 unresolved_reloc = FALSE;
4061 }
4062 else
4063 {
4064 if (local_got_offsets == NULL)
4065 abort ();
4066
4067 off = local_got_offsets[r_symndx];
4068
4069 /* The offset must always be a multiple of 8. We use
4070 the least significant bit to record whether we have
4071 already generated the necessary reloc. */
4072 if ((off & 1) != 0)
4073 off &= ~1;
4074 else
4075 {
4076 bfd_put_64 (output_bfd, relocation,
4077 base_got->contents + off);
4078
4079 if (info->shared)
4080 {
4081 asection *s;
4082 Elf_Internal_Rela outrel;
4083
4084 /* We need to generate a R_X86_64_RELATIVE reloc
4085 for the dynamic linker. */
4086 s = htab->elf.srelgot;
4087 if (s == NULL)
4088 abort ();
4089
4090 outrel.r_offset = (base_got->output_section->vma
4091 + base_got->output_offset
4092 + off);
4093 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4094 outrel.r_addend = relocation;
4095 elf_append_rela (output_bfd, s, &outrel);
4096 }
4097
4098 local_got_offsets[r_symndx] |= 1;
4099 }
4100 }
4101
4102 if (off >= (bfd_vma) -2)
4103 abort ();
4104
4105 relocation = base_got->output_section->vma
4106 + base_got->output_offset + off;
4107 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
4108 relocation -= htab->elf.sgotplt->output_section->vma
4109 - htab->elf.sgotplt->output_offset;
4110
4111 break;
4112
4113 case R_X86_64_GOTOFF64:
4114 /* Relocation is relative to the start of the global offset
4115 table. */
4116
4117 /* Check to make sure it isn't a protected function or data
4118 symbol for shared library since it may not be local when
4119 used as function address or with copy relocation. We also
4120 need to make sure that a symbol is referenced locally. */
4121 if (info->shared && h)
4122 {
4123 if (!h->def_regular)
4124 {
4125 const char *v;
4126
4127 switch (ELF_ST_VISIBILITY (h->other))
4128 {
4129 case STV_HIDDEN:
4130 v = _("hidden symbol");
4131 break;
4132 case STV_INTERNAL:
4133 v = _("internal symbol");
4134 break;
4135 case STV_PROTECTED:
4136 v = _("protected symbol");
4137 break;
4138 default:
4139 v = _("symbol");
4140 break;
4141 }
4142
4143 (*_bfd_error_handler)
4144 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4145 input_bfd, v, h->root.root.string);
4146 bfd_set_error (bfd_error_bad_value);
4147 return FALSE;
4148 }
4149 else if (!info->executable
4150 && !SYMBOL_REFERENCES_LOCAL (info, h)
4151 && (h->type == STT_FUNC
4152 || h->type == STT_OBJECT)
4153 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4154 {
4155 (*_bfd_error_handler)
4156 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4157 input_bfd,
4158 h->type == STT_FUNC ? "function" : "data",
4159 h->root.root.string);
4160 bfd_set_error (bfd_error_bad_value);
4161 return FALSE;
4162 }
4163 }
4164
4165 /* Note that sgot is not involved in this
4166 calculation. We always want the start of .got.plt. If we
4167 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4168 permitted by the ABI, we might have to change this
4169 calculation. */
4170 relocation -= htab->elf.sgotplt->output_section->vma
4171 + htab->elf.sgotplt->output_offset;
4172 break;
4173
4174 case R_X86_64_GOTPC32:
4175 case R_X86_64_GOTPC64:
4176 /* Use global offset table as symbol value. */
4177 relocation = htab->elf.sgotplt->output_section->vma
4178 + htab->elf.sgotplt->output_offset;
4179 unresolved_reloc = FALSE;
4180 break;
4181
4182 case R_X86_64_PLTOFF64:
4183 /* Relocation is PLT entry relative to GOT. For local
4184 symbols it's the symbol itself relative to GOT. */
4185 if (h != NULL
4186 /* See PLT32 handling. */
4187 && h->plt.offset != (bfd_vma) -1
4188 && htab->elf.splt != NULL)
4189 {
4190 if (htab->plt_bnd != NULL)
4191 {
4192 resolved_plt = htab->plt_bnd;
4193 plt_offset = eh->plt_bnd.offset;
4194 }
4195 else
4196 {
4197 resolved_plt = htab->elf.splt;
4198 plt_offset = h->plt.offset;
4199 }
4200
4201 relocation = (resolved_plt->output_section->vma
4202 + resolved_plt->output_offset
4203 + plt_offset);
4204 unresolved_reloc = FALSE;
4205 }
4206
4207 relocation -= htab->elf.sgotplt->output_section->vma
4208 + htab->elf.sgotplt->output_offset;
4209 break;
4210
4211 case R_X86_64_PLT32:
4212 case R_X86_64_PLT32_BND:
4213 /* Relocation is to the entry for this symbol in the
4214 procedure linkage table. */
4215
4216 /* Resolve a PLT32 reloc against a local symbol directly,
4217 without using the procedure linkage table. */
4218 if (h == NULL)
4219 break;
4220
4221 if ((h->plt.offset == (bfd_vma) -1
4222 && eh->plt_got.offset == (bfd_vma) -1)
4223 || htab->elf.splt == NULL)
4224 {
4225 /* We didn't make a PLT entry for this symbol. This
4226 happens when statically linking PIC code, or when
4227 using -Bsymbolic. */
4228 break;
4229 }
4230
4231 if (h->plt.offset != (bfd_vma) -1)
4232 {
4233 if (htab->plt_bnd != NULL)
4234 {
4235 resolved_plt = htab->plt_bnd;
4236 plt_offset = eh->plt_bnd.offset;
4237 }
4238 else
4239 {
4240 resolved_plt = htab->elf.splt;
4241 plt_offset = h->plt.offset;
4242 }
4243 }
4244 else
4245 {
4246 /* Use the GOT PLT. */
4247 resolved_plt = htab->plt_got;
4248 plt_offset = eh->plt_got.offset;
4249 }
4250
4251 relocation = (resolved_plt->output_section->vma
4252 + resolved_plt->output_offset
4253 + plt_offset);
4254 unresolved_reloc = FALSE;
4255 break;
4256
4257 case R_X86_64_SIZE32:
4258 case R_X86_64_SIZE64:
4259 /* Set to symbol size. */
4260 relocation = st_size;
4261 goto direct;
4262
4263 case R_X86_64_PC8:
4264 case R_X86_64_PC16:
4265 case R_X86_64_PC32:
4266 case R_X86_64_PC32_BND:
4267 /* Don't complain about -fPIC if the symbol is undefined when
4268 building executable. */
4269 if (info->shared
4270 && (input_section->flags & SEC_ALLOC) != 0
4271 && (input_section->flags & SEC_READONLY) != 0
4272 && h != NULL
4273 && !(info->executable
4274 && h->root.type == bfd_link_hash_undefined))
4275 {
4276 bfd_boolean fail = FALSE;
4277 bfd_boolean branch
4278 = ((r_type == R_X86_64_PC32
4279 || r_type == R_X86_64_PC32_BND)
4280 && is_32bit_relative_branch (contents, rel->r_offset));
4281
4282 if (SYMBOL_REFERENCES_LOCAL (info, h))
4283 {
4284 /* Symbol is referenced locally. Make sure it is
4285 defined locally or for a branch. */
4286 fail = !h->def_regular && !branch;
4287 }
4288 else if (!(info->executable
4289 && (h->needs_copy || eh->needs_copy)))
4290 {
4291 /* Symbol doesn't need copy reloc and isn't referenced
4292 locally. We only allow branch to symbol with
4293 non-default visibility. */
4294 fail = (!branch
4295 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4296 }
4297
4298 if (fail)
4299 {
4300 const char *fmt;
4301 const char *v;
4302 const char *pic = "";
4303
4304 switch (ELF_ST_VISIBILITY (h->other))
4305 {
4306 case STV_HIDDEN:
4307 v = _("hidden symbol");
4308 break;
4309 case STV_INTERNAL:
4310 v = _("internal symbol");
4311 break;
4312 case STV_PROTECTED:
4313 v = _("protected symbol");
4314 break;
4315 default:
4316 v = _("symbol");
4317 pic = _("; recompile with -fPIC");
4318 break;
4319 }
4320
4321 if (h->def_regular)
4322 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4323 else
4324 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4325
4326 (*_bfd_error_handler) (fmt, input_bfd,
4327 x86_64_elf_howto_table[r_type].name,
4328 v, h->root.root.string, pic);
4329 bfd_set_error (bfd_error_bad_value);
4330 return FALSE;
4331 }
4332 }
4333 /* Fall through. */
4334
4335 case R_X86_64_8:
4336 case R_X86_64_16:
4337 case R_X86_64_32:
4338 case R_X86_64_PC64:
4339 case R_X86_64_64:
4340 /* FIXME: The ABI says the linker should make sure the value is
4341 the same when it's zeroextended to 64 bit. */
4342
4343 direct:
4344 if ((input_section->flags & SEC_ALLOC) == 0)
4345 break;
4346
4347 /* Don't copy a pc-relative relocation into the output file
4348 if the symbol needs copy reloc or the symbol is undefined
4349 when building executable. */
4350 if ((info->shared
4351 && !(info->executable
4352 && h != NULL
4353 && (h->needs_copy
4354 || eh->needs_copy
4355 || h->root.type == bfd_link_hash_undefined)
4356 && IS_X86_64_PCREL_TYPE (r_type))
4357 && (h == NULL
4358 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4359 || h->root.type != bfd_link_hash_undefweak)
4360 && ((! IS_X86_64_PCREL_TYPE (r_type)
4361 && r_type != R_X86_64_SIZE32
4362 && r_type != R_X86_64_SIZE64)
4363 || ! SYMBOL_CALLS_LOCAL (info, h)))
4364 || (ELIMINATE_COPY_RELOCS
4365 && !info->shared
4366 && h != NULL
4367 && h->dynindx != -1
4368 && !h->non_got_ref
4369 && ((h->def_dynamic
4370 && !h->def_regular)
4371 || h->root.type == bfd_link_hash_undefweak
4372 || h->root.type == bfd_link_hash_undefined)))
4373 {
4374 Elf_Internal_Rela outrel;
4375 bfd_boolean skip, relocate;
4376 asection *sreloc;
4377
4378 /* When generating a shared object, these relocations
4379 are copied into the output file to be resolved at run
4380 time. */
4381 skip = FALSE;
4382 relocate = FALSE;
4383
4384 outrel.r_offset =
4385 _bfd_elf_section_offset (output_bfd, info, input_section,
4386 rel->r_offset);
4387 if (outrel.r_offset == (bfd_vma) -1)
4388 skip = TRUE;
4389 else if (outrel.r_offset == (bfd_vma) -2)
4390 skip = TRUE, relocate = TRUE;
4391
4392 outrel.r_offset += (input_section->output_section->vma
4393 + input_section->output_offset);
4394
4395 if (skip)
4396 memset (&outrel, 0, sizeof outrel);
4397
4398 /* h->dynindx may be -1 if this symbol was marked to
4399 become local. */
4400 else if (h != NULL
4401 && h->dynindx != -1
4402 && (IS_X86_64_PCREL_TYPE (r_type)
4403 || ! info->shared
4404 || ! SYMBOLIC_BIND (info, h)
4405 || ! h->def_regular))
4406 {
4407 outrel.r_info = htab->r_info (h->dynindx, r_type);
4408 outrel.r_addend = rel->r_addend;
4409 }
4410 else
4411 {
4412 /* This symbol is local, or marked to become local. */
4413 if (r_type == htab->pointer_r_type)
4414 {
4415 relocate = TRUE;
4416 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4417 outrel.r_addend = relocation + rel->r_addend;
4418 }
4419 else if (r_type == R_X86_64_64
4420 && !ABI_64_P (output_bfd))
4421 {
4422 relocate = TRUE;
4423 outrel.r_info = htab->r_info (0,
4424 R_X86_64_RELATIVE64);
4425 outrel.r_addend = relocation + rel->r_addend;
4426 /* Check addend overflow. */
4427 if ((outrel.r_addend & 0x80000000)
4428 != (rel->r_addend & 0x80000000))
4429 {
4430 const char *name;
4431 int addend = rel->r_addend;
4432 if (h && h->root.root.string)
4433 name = h->root.root.string;
4434 else
4435 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4436 sym, NULL);
4437 if (addend < 0)
4438 (*_bfd_error_handler)
4439 (_("%B: addend -0x%x in relocation %s against "
4440 "symbol `%s' at 0x%lx in section `%A' is "
4441 "out of range"),
4442 input_bfd, input_section, addend,
4443 x86_64_elf_howto_table[r_type].name,
4444 name, (unsigned long) rel->r_offset);
4445 else
4446 (*_bfd_error_handler)
4447 (_("%B: addend 0x%x in relocation %s against "
4448 "symbol `%s' at 0x%lx in section `%A' is "
4449 "out of range"),
4450 input_bfd, input_section, addend,
4451 x86_64_elf_howto_table[r_type].name,
4452 name, (unsigned long) rel->r_offset);
4453 bfd_set_error (bfd_error_bad_value);
4454 return FALSE;
4455 }
4456 }
4457 else
4458 {
4459 long sindx;
4460
4461 if (bfd_is_abs_section (sec))
4462 sindx = 0;
4463 else if (sec == NULL || sec->owner == NULL)
4464 {
4465 bfd_set_error (bfd_error_bad_value);
4466 return FALSE;
4467 }
4468 else
4469 {
4470 asection *osec;
4471
4472 /* We are turning this relocation into one
4473 against a section symbol. It would be
4474 proper to subtract the symbol's value,
4475 osec->vma, from the emitted reloc addend,
4476 but ld.so expects buggy relocs. */
4477 osec = sec->output_section;
4478 sindx = elf_section_data (osec)->dynindx;
4479 if (sindx == 0)
4480 {
4481 asection *oi = htab->elf.text_index_section;
4482 sindx = elf_section_data (oi)->dynindx;
4483 }
4484 BFD_ASSERT (sindx != 0);
4485 }
4486
4487 outrel.r_info = htab->r_info (sindx, r_type);
4488 outrel.r_addend = relocation + rel->r_addend;
4489 }
4490 }
4491
4492 sreloc = elf_section_data (input_section)->sreloc;
4493
4494 if (sreloc == NULL || sreloc->contents == NULL)
4495 {
4496 r = bfd_reloc_notsupported;
4497 goto check_relocation_error;
4498 }
4499
4500 elf_append_rela (output_bfd, sreloc, &outrel);
4501
4502 /* If this reloc is against an external symbol, we do
4503 not want to fiddle with the addend. Otherwise, we
4504 need to include the symbol value so that it becomes
4505 an addend for the dynamic reloc. */
4506 if (! relocate)
4507 continue;
4508 }
4509
4510 break;
4511
4512 case R_X86_64_TLSGD:
4513 case R_X86_64_GOTPC32_TLSDESC:
4514 case R_X86_64_TLSDESC_CALL:
4515 case R_X86_64_GOTTPOFF:
4516 tls_type = GOT_UNKNOWN;
4517 if (h == NULL && local_got_offsets)
4518 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4519 else if (h != NULL)
4520 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4521
4522 if (! elf_x86_64_tls_transition (info, input_bfd,
4523 input_section, contents,
4524 symtab_hdr, sym_hashes,
4525 &r_type, tls_type, rel,
4526 relend, h, r_symndx))
4527 return FALSE;
4528
4529 if (r_type == R_X86_64_TPOFF32)
4530 {
4531 bfd_vma roff = rel->r_offset;
4532
4533 BFD_ASSERT (! unresolved_reloc);
4534
4535 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4536 {
4537 /* GD->LE transition. For 64bit, change
4538 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4539 .word 0x6666; rex64; call __tls_get_addr
4540 into:
4541 movq %fs:0, %rax
4542 leaq foo@tpoff(%rax), %rax
4543 For 32bit, change
4544 leaq foo@tlsgd(%rip), %rdi
4545 .word 0x6666; rex64; call __tls_get_addr
4546 into:
4547 movl %fs:0, %eax
4548 leaq foo@tpoff(%rax), %rax
4549 For largepic, change:
4550 leaq foo@tlsgd(%rip), %rdi
4551 movabsq $__tls_get_addr@pltoff, %rax
4552 addq %rbx, %rax
4553 call *%rax
4554 into:
4555 movq %fs:0, %rax
4556 leaq foo@tpoff(%rax), %rax
4557 nopw 0x0(%rax,%rax,1) */
4558 int largepic = 0;
4559 if (ABI_64_P (output_bfd)
4560 && contents[roff + 5] == (bfd_byte) '\xb8')
4561 {
4562 memcpy (contents + roff - 3,
4563 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4564 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4565 largepic = 1;
4566 }
4567 else if (ABI_64_P (output_bfd))
4568 memcpy (contents + roff - 4,
4569 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4570 16);
4571 else
4572 memcpy (contents + roff - 3,
4573 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4574 15);
4575 bfd_put_32 (output_bfd,
4576 elf_x86_64_tpoff (info, relocation),
4577 contents + roff + 8 + largepic);
4578 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4579 rel++;
4580 continue;
4581 }
4582 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4583 {
4584 /* GDesc -> LE transition.
4585 It's originally something like:
4586 leaq x@tlsdesc(%rip), %rax
4587
4588 Change it to:
4589 movl $x@tpoff, %rax. */
4590
4591 unsigned int val, type;
4592
4593 type = bfd_get_8 (input_bfd, contents + roff - 3);
4594 val = bfd_get_8 (input_bfd, contents + roff - 1);
4595 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4596 contents + roff - 3);
4597 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4598 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4599 contents + roff - 1);
4600 bfd_put_32 (output_bfd,
4601 elf_x86_64_tpoff (info, relocation),
4602 contents + roff);
4603 continue;
4604 }
4605 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4606 {
4607 /* GDesc -> LE transition.
4608 It's originally:
4609 call *(%rax)
4610 Turn it into:
4611 xchg %ax,%ax. */
4612 bfd_put_8 (output_bfd, 0x66, contents + roff);
4613 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4614 continue;
4615 }
4616 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4617 {
4618 /* IE->LE transition:
4619 For 64bit, originally it can be one of:
4620 movq foo@gottpoff(%rip), %reg
4621 addq foo@gottpoff(%rip), %reg
4622 We change it into:
4623 movq $foo, %reg
4624 leaq foo(%reg), %reg
4625 addq $foo, %reg.
4626 For 32bit, originally it can be one of:
4627 movq foo@gottpoff(%rip), %reg
4628 addl foo@gottpoff(%rip), %reg
4629 We change it into:
4630 movq $foo, %reg
4631 leal foo(%reg), %reg
4632 addl $foo, %reg. */
4633
4634 unsigned int val, type, reg;
4635
4636 if (roff >= 3)
4637 val = bfd_get_8 (input_bfd, contents + roff - 3);
4638 else
4639 val = 0;
4640 type = bfd_get_8 (input_bfd, contents + roff - 2);
4641 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4642 reg >>= 3;
4643 if (type == 0x8b)
4644 {
4645 /* movq */
4646 if (val == 0x4c)
4647 bfd_put_8 (output_bfd, 0x49,
4648 contents + roff - 3);
4649 else if (!ABI_64_P (output_bfd) && val == 0x44)
4650 bfd_put_8 (output_bfd, 0x41,
4651 contents + roff - 3);
4652 bfd_put_8 (output_bfd, 0xc7,
4653 contents + roff - 2);
4654 bfd_put_8 (output_bfd, 0xc0 | reg,
4655 contents + roff - 1);
4656 }
4657 else if (reg == 4)
4658 {
4659 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4660 is special */
4661 if (val == 0x4c)
4662 bfd_put_8 (output_bfd, 0x49,
4663 contents + roff - 3);
4664 else if (!ABI_64_P (output_bfd) && val == 0x44)
4665 bfd_put_8 (output_bfd, 0x41,
4666 contents + roff - 3);
4667 bfd_put_8 (output_bfd, 0x81,
4668 contents + roff - 2);
4669 bfd_put_8 (output_bfd, 0xc0 | reg,
4670 contents + roff - 1);
4671 }
4672 else
4673 {
4674 /* addq/addl -> leaq/leal */
4675 if (val == 0x4c)
4676 bfd_put_8 (output_bfd, 0x4d,
4677 contents + roff - 3);
4678 else if (!ABI_64_P (output_bfd) && val == 0x44)
4679 bfd_put_8 (output_bfd, 0x45,
4680 contents + roff - 3);
4681 bfd_put_8 (output_bfd, 0x8d,
4682 contents + roff - 2);
4683 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4684 contents + roff - 1);
4685 }
4686 bfd_put_32 (output_bfd,
4687 elf_x86_64_tpoff (info, relocation),
4688 contents + roff);
4689 continue;
4690 }
4691 else
4692 BFD_ASSERT (FALSE);
4693 }
4694
4695 if (htab->elf.sgot == NULL)
4696 abort ();
4697
4698 if (h != NULL)
4699 {
4700 off = h->got.offset;
4701 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4702 }
4703 else
4704 {
4705 if (local_got_offsets == NULL)
4706 abort ();
4707
4708 off = local_got_offsets[r_symndx];
4709 offplt = local_tlsdesc_gotents[r_symndx];
4710 }
4711
4712 if ((off & 1) != 0)
4713 off &= ~1;
4714 else
4715 {
4716 Elf_Internal_Rela outrel;
4717 int dr_type, indx;
4718 asection *sreloc;
4719
4720 if (htab->elf.srelgot == NULL)
4721 abort ();
4722
4723 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4724
4725 if (GOT_TLS_GDESC_P (tls_type))
4726 {
4727 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4728 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4729 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4730 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4731 + htab->elf.sgotplt->output_offset
4732 + offplt
4733 + htab->sgotplt_jump_table_size);
4734 sreloc = htab->elf.srelplt;
4735 if (indx == 0)
4736 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4737 else
4738 outrel.r_addend = 0;
4739 elf_append_rela (output_bfd, sreloc, &outrel);
4740 }
4741
4742 sreloc = htab->elf.srelgot;
4743
4744 outrel.r_offset = (htab->elf.sgot->output_section->vma
4745 + htab->elf.sgot->output_offset + off);
4746
4747 if (GOT_TLS_GD_P (tls_type))
4748 dr_type = R_X86_64_DTPMOD64;
4749 else if (GOT_TLS_GDESC_P (tls_type))
4750 goto dr_done;
4751 else
4752 dr_type = R_X86_64_TPOFF64;
4753
4754 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4755 outrel.r_addend = 0;
4756 if ((dr_type == R_X86_64_TPOFF64
4757 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4758 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4759 outrel.r_info = htab->r_info (indx, dr_type);
4760
4761 elf_append_rela (output_bfd, sreloc, &outrel);
4762
4763 if (GOT_TLS_GD_P (tls_type))
4764 {
4765 if (indx == 0)
4766 {
4767 BFD_ASSERT (! unresolved_reloc);
4768 bfd_put_64 (output_bfd,
4769 relocation - elf_x86_64_dtpoff_base (info),
4770 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4771 }
4772 else
4773 {
4774 bfd_put_64 (output_bfd, 0,
4775 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4776 outrel.r_info = htab->r_info (indx,
4777 R_X86_64_DTPOFF64);
4778 outrel.r_offset += GOT_ENTRY_SIZE;
4779 elf_append_rela (output_bfd, sreloc,
4780 &outrel);
4781 }
4782 }
4783
4784 dr_done:
4785 if (h != NULL)
4786 h->got.offset |= 1;
4787 else
4788 local_got_offsets[r_symndx] |= 1;
4789 }
4790
4791 if (off >= (bfd_vma) -2
4792 && ! GOT_TLS_GDESC_P (tls_type))
4793 abort ();
4794 if (r_type == ELF32_R_TYPE (rel->r_info))
4795 {
4796 if (r_type == R_X86_64_GOTPC32_TLSDESC
4797 || r_type == R_X86_64_TLSDESC_CALL)
4798 relocation = htab->elf.sgotplt->output_section->vma
4799 + htab->elf.sgotplt->output_offset
4800 + offplt + htab->sgotplt_jump_table_size;
4801 else
4802 relocation = htab->elf.sgot->output_section->vma
4803 + htab->elf.sgot->output_offset + off;
4804 unresolved_reloc = FALSE;
4805 }
4806 else
4807 {
4808 bfd_vma roff = rel->r_offset;
4809
4810 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4811 {
4812 /* GD->IE transition. For 64bit, change
4813 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4814 .word 0x6666; rex64; call __tls_get_addr@plt
4815 into:
4816 movq %fs:0, %rax
4817 addq foo@gottpoff(%rip), %rax
4818 For 32bit, change
4819 leaq foo@tlsgd(%rip), %rdi
4820 .word 0x6666; rex64; call __tls_get_addr@plt
4821 into:
4822 movl %fs:0, %eax
4823 addq foo@gottpoff(%rip), %rax
4824 For largepic, change:
4825 leaq foo@tlsgd(%rip), %rdi
4826 movabsq $__tls_get_addr@pltoff, %rax
4827 addq %rbx, %rax
4828 call *%rax
4829 into:
4830 movq %fs:0, %rax
4831 addq foo@gottpoff(%rax), %rax
4832 nopw 0x0(%rax,%rax,1) */
4833 int largepic = 0;
4834 if (ABI_64_P (output_bfd)
4835 && contents[roff + 5] == (bfd_byte) '\xb8')
4836 {
4837 memcpy (contents + roff - 3,
4838 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4839 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4840 largepic = 1;
4841 }
4842 else if (ABI_64_P (output_bfd))
4843 memcpy (contents + roff - 4,
4844 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4845 16);
4846 else
4847 memcpy (contents + roff - 3,
4848 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4849 15);
4850
4851 relocation = (htab->elf.sgot->output_section->vma
4852 + htab->elf.sgot->output_offset + off
4853 - roff
4854 - largepic
4855 - input_section->output_section->vma
4856 - input_section->output_offset
4857 - 12);
4858 bfd_put_32 (output_bfd, relocation,
4859 contents + roff + 8 + largepic);
4860 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4861 rel++;
4862 continue;
4863 }
4864 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4865 {
4866 /* GDesc -> IE transition.
4867 It's originally something like:
4868 leaq x@tlsdesc(%rip), %rax
4869
4870 Change it to:
4871 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4872
4873 /* Now modify the instruction as appropriate. To
4874 turn a leaq into a movq in the form we use it, it
4875 suffices to change the second byte from 0x8d to
4876 0x8b. */
4877 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4878
4879 bfd_put_32 (output_bfd,
4880 htab->elf.sgot->output_section->vma
4881 + htab->elf.sgot->output_offset + off
4882 - rel->r_offset
4883 - input_section->output_section->vma
4884 - input_section->output_offset
4885 - 4,
4886 contents + roff);
4887 continue;
4888 }
4889 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4890 {
4891 /* GDesc -> IE transition.
4892 It's originally:
4893 call *(%rax)
4894
4895 Change it to:
4896 xchg %ax, %ax. */
4897
4898 bfd_put_8 (output_bfd, 0x66, contents + roff);
4899 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4900 continue;
4901 }
4902 else
4903 BFD_ASSERT (FALSE);
4904 }
4905 break;
4906
4907 case R_X86_64_TLSLD:
4908 if (! elf_x86_64_tls_transition (info, input_bfd,
4909 input_section, contents,
4910 symtab_hdr, sym_hashes,
4911 &r_type, GOT_UNKNOWN,
4912 rel, relend, h, r_symndx))
4913 return FALSE;
4914
4915 if (r_type != R_X86_64_TLSLD)
4916 {
4917 /* LD->LE transition:
4918 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4919 For 64bit, we change it into:
4920 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4921 For 32bit, we change it into:
4922 nopl 0x0(%rax); movl %fs:0, %eax.
4923 For largepic, change:
4924 leaq foo@tlsgd(%rip), %rdi
4925 movabsq $__tls_get_addr@pltoff, %rax
4926 addq %rbx, %rax
4927 call *%rax
4928 into:
4929 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4930 movq %fs:0, %eax */
4931
4932 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4933 if (ABI_64_P (output_bfd)
4934 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4935 memcpy (contents + rel->r_offset - 3,
4936 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4937 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4938 else if (ABI_64_P (output_bfd))
4939 memcpy (contents + rel->r_offset - 3,
4940 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4941 else
4942 memcpy (contents + rel->r_offset - 3,
4943 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4944 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4945 rel++;
4946 continue;
4947 }
4948
4949 if (htab->elf.sgot == NULL)
4950 abort ();
4951
4952 off = htab->tls_ld_got.offset;
4953 if (off & 1)
4954 off &= ~1;
4955 else
4956 {
4957 Elf_Internal_Rela outrel;
4958
4959 if (htab->elf.srelgot == NULL)
4960 abort ();
4961
4962 outrel.r_offset = (htab->elf.sgot->output_section->vma
4963 + htab->elf.sgot->output_offset + off);
4964
4965 bfd_put_64 (output_bfd, 0,
4966 htab->elf.sgot->contents + off);
4967 bfd_put_64 (output_bfd, 0,
4968 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4969 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4970 outrel.r_addend = 0;
4971 elf_append_rela (output_bfd, htab->elf.srelgot,
4972 &outrel);
4973 htab->tls_ld_got.offset |= 1;
4974 }
4975 relocation = htab->elf.sgot->output_section->vma
4976 + htab->elf.sgot->output_offset + off;
4977 unresolved_reloc = FALSE;
4978 break;
4979
4980 case R_X86_64_DTPOFF32:
4981 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4982 relocation -= elf_x86_64_dtpoff_base (info);
4983 else
4984 relocation = elf_x86_64_tpoff (info, relocation);
4985 break;
4986
4987 case R_X86_64_TPOFF32:
4988 case R_X86_64_TPOFF64:
4989 BFD_ASSERT (info->executable);
4990 relocation = elf_x86_64_tpoff (info, relocation);
4991 break;
4992
4993 case R_X86_64_DTPOFF64:
4994 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4995 relocation -= elf_x86_64_dtpoff_base (info);
4996 break;
4997
4998 default:
4999 break;
5000 }
5001
5002 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5003 because such sections are not SEC_ALLOC and thus ld.so will
5004 not process them. */
5005 if (unresolved_reloc
5006 && !((input_section->flags & SEC_DEBUGGING) != 0
5007 && h->def_dynamic)
5008 && _bfd_elf_section_offset (output_bfd, info, input_section,
5009 rel->r_offset) != (bfd_vma) -1)
5010 {
5011 (*_bfd_error_handler)
5012 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5013 input_bfd,
5014 input_section,
5015 (long) rel->r_offset,
5016 howto->name,
5017 h->root.root.string);
5018 return FALSE;
5019 }
5020
5021 do_relocation:
5022 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
5023 contents, rel->r_offset,
5024 relocation, rel->r_addend);
5025
5026 check_relocation_error:
5027 if (r != bfd_reloc_ok)
5028 {
5029 const char *name;
5030
5031 if (h != NULL)
5032 name = h->root.root.string;
5033 else
5034 {
5035 name = bfd_elf_string_from_elf_section (input_bfd,
5036 symtab_hdr->sh_link,
5037 sym->st_name);
5038 if (name == NULL)
5039 return FALSE;
5040 if (*name == '\0')
5041 name = bfd_section_name (input_bfd, sec);
5042 }
5043
5044 if (r == bfd_reloc_overflow)
5045 {
5046 if (! ((*info->callbacks->reloc_overflow)
5047 (info, (h ? &h->root : NULL), name, howto->name,
5048 (bfd_vma) 0, input_bfd, input_section,
5049 rel->r_offset)))
5050 return FALSE;
5051 }
5052 else
5053 {
5054 (*_bfd_error_handler)
5055 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5056 input_bfd, input_section,
5057 (long) rel->r_offset, name, (int) r);
5058 return FALSE;
5059 }
5060 }
5061 }
5062
5063 return TRUE;
5064 }
5065
5066 /* Finish up dynamic symbol handling. We set the contents of various
5067 dynamic sections here. */
5068
5069 static bfd_boolean
5070 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5071 struct bfd_link_info *info,
5072 struct elf_link_hash_entry *h,
5073 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
5074 {
5075 struct elf_x86_64_link_hash_table *htab;
5076 const struct elf_x86_64_backend_data *abed;
5077 bfd_boolean use_plt_bnd;
5078 struct elf_x86_64_link_hash_entry *eh;
5079
5080 htab = elf_x86_64_hash_table (info);
5081 if (htab == NULL)
5082 return FALSE;
5083
5084 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5085 section only if there is .plt section. */
5086 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5087 abed = (use_plt_bnd
5088 ? &elf_x86_64_bnd_arch_bed
5089 : get_elf_x86_64_backend_data (output_bfd));
5090
5091 eh = (struct elf_x86_64_link_hash_entry *) h;
5092
5093 if (h->plt.offset != (bfd_vma) -1)
5094 {
5095 bfd_vma plt_index;
5096 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5097 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5098 Elf_Internal_Rela rela;
5099 bfd_byte *loc;
5100 asection *plt, *gotplt, *relplt, *resolved_plt;
5101 const struct elf_backend_data *bed;
5102 bfd_vma plt_got_pcrel_offset;
5103
5104 /* When building a static executable, use .iplt, .igot.plt and
5105 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5106 if (htab->elf.splt != NULL)
5107 {
5108 plt = htab->elf.splt;
5109 gotplt = htab->elf.sgotplt;
5110 relplt = htab->elf.srelplt;
5111 }
5112 else
5113 {
5114 plt = htab->elf.iplt;
5115 gotplt = htab->elf.igotplt;
5116 relplt = htab->elf.irelplt;
5117 }
5118
5119 /* This symbol has an entry in the procedure linkage table. Set
5120 it up. */
5121 if ((h->dynindx == -1
5122 && !((h->forced_local || info->executable)
5123 && h->def_regular
5124 && h->type == STT_GNU_IFUNC))
5125 || plt == NULL
5126 || gotplt == NULL
5127 || relplt == NULL)
5128 abort ();
5129
5130 /* Get the index in the procedure linkage table which
5131 corresponds to this symbol. This is the index of this symbol
5132 in all the symbols for which we are making plt entries. The
5133 first entry in the procedure linkage table is reserved.
5134
5135 Get the offset into the .got table of the entry that
5136 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5137 bytes. The first three are reserved for the dynamic linker.
5138
5139 For static executables, we don't reserve anything. */
5140
5141 if (plt == htab->elf.splt)
5142 {
5143 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5144 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5145 }
5146 else
5147 {
5148 got_offset = h->plt.offset / abed->plt_entry_size;
5149 got_offset = got_offset * GOT_ENTRY_SIZE;
5150 }
5151
5152 plt_plt_insn_end = abed->plt_plt_insn_end;
5153 plt_plt_offset = abed->plt_plt_offset;
5154 plt_got_insn_size = abed->plt_got_insn_size;
5155 plt_got_offset = abed->plt_got_offset;
5156 if (use_plt_bnd)
5157 {
5158 /* Use the second PLT with BND relocations. */
5159 const bfd_byte *plt_entry, *plt2_entry;
5160
5161 if (eh->has_bnd_reloc)
5162 {
5163 plt_entry = elf_x86_64_bnd_plt_entry;
5164 plt2_entry = elf_x86_64_bnd_plt2_entry;
5165 }
5166 else
5167 {
5168 plt_entry = elf_x86_64_legacy_plt_entry;
5169 plt2_entry = elf_x86_64_legacy_plt2_entry;
5170
5171 /* Subtract 1 since there is no BND prefix. */
5172 plt_plt_insn_end -= 1;
5173 plt_plt_offset -= 1;
5174 plt_got_insn_size -= 1;
5175 plt_got_offset -= 1;
5176 }
5177
5178 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5179 == sizeof (elf_x86_64_legacy_plt_entry));
5180
5181 /* Fill in the entry in the procedure linkage table. */
5182 memcpy (plt->contents + h->plt.offset,
5183 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5184 /* Fill in the entry in the second PLT. */
5185 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5186 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5187
5188 resolved_plt = htab->plt_bnd;
5189 plt_offset = eh->plt_bnd.offset;
5190 }
5191 else
5192 {
5193 /* Fill in the entry in the procedure linkage table. */
5194 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5195 abed->plt_entry_size);
5196
5197 resolved_plt = plt;
5198 plt_offset = h->plt.offset;
5199 }
5200
5201 /* Insert the relocation positions of the plt section. */
5202
5203 /* Put offset the PC-relative instruction referring to the GOT entry,
5204 subtracting the size of that instruction. */
5205 plt_got_pcrel_offset = (gotplt->output_section->vma
5206 + gotplt->output_offset
5207 + got_offset
5208 - resolved_plt->output_section->vma
5209 - resolved_plt->output_offset
5210 - plt_offset
5211 - plt_got_insn_size);
5212
5213 /* Check PC-relative offset overflow in PLT entry. */
5214 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5215 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5216 output_bfd, h->root.root.string);
5217
5218 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5219 resolved_plt->contents + plt_offset + plt_got_offset);
5220
5221 /* Fill in the entry in the global offset table, initially this
5222 points to the second part of the PLT entry. */
5223 bfd_put_64 (output_bfd, (plt->output_section->vma
5224 + plt->output_offset
5225 + h->plt.offset + abed->plt_lazy_offset),
5226 gotplt->contents + got_offset);
5227
5228 /* Fill in the entry in the .rela.plt section. */
5229 rela.r_offset = (gotplt->output_section->vma
5230 + gotplt->output_offset
5231 + got_offset);
5232 if (h->dynindx == -1
5233 || ((info->executable
5234 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5235 && h->def_regular
5236 && h->type == STT_GNU_IFUNC))
5237 {
5238 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5239 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5240 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5241 rela.r_addend = (h->root.u.def.value
5242 + h->root.u.def.section->output_section->vma
5243 + h->root.u.def.section->output_offset);
5244 /* R_X86_64_IRELATIVE comes last. */
5245 plt_index = htab->next_irelative_index--;
5246 }
5247 else
5248 {
5249 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5250 rela.r_addend = 0;
5251 plt_index = htab->next_jump_slot_index++;
5252 }
5253
5254 /* Don't fill PLT entry for static executables. */
5255 if (plt == htab->elf.splt)
5256 {
5257 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5258
5259 /* Put relocation index. */
5260 bfd_put_32 (output_bfd, plt_index,
5261 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5262
5263 /* Put offset for jmp .PLT0 and check for overflow. We don't
5264 check relocation index for overflow since branch displacement
5265 will overflow first. */
5266 if (plt0_offset > 0x80000000)
5267 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5268 output_bfd, h->root.root.string);
5269 bfd_put_32 (output_bfd, - plt0_offset,
5270 plt->contents + h->plt.offset + plt_plt_offset);
5271 }
5272
5273 bed = get_elf_backend_data (output_bfd);
5274 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5275 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5276 }
5277 else if (eh->plt_got.offset != (bfd_vma) -1)
5278 {
5279 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5280 asection *plt, *got;
5281 bfd_boolean got_after_plt;
5282 int32_t got_pcrel_offset;
5283 const bfd_byte *got_plt_entry;
5284
5285 /* Set the entry in the GOT procedure linkage table. */
5286 plt = htab->plt_got;
5287 got = htab->elf.sgot;
5288 got_offset = h->got.offset;
5289
5290 if (got_offset == (bfd_vma) -1
5291 || h->type == STT_GNU_IFUNC
5292 || plt == NULL
5293 || got == NULL)
5294 abort ();
5295
5296 /* Use the second PLT entry template for the GOT PLT since they
5297 are the identical. */
5298 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5299 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5300 if (eh->has_bnd_reloc)
5301 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5302 else
5303 {
5304 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5305
5306 /* Subtract 1 since there is no BND prefix. */
5307 plt_got_insn_size -= 1;
5308 plt_got_offset -= 1;
5309 }
5310
5311 /* Fill in the entry in the GOT procedure linkage table. */
5312 plt_offset = eh->plt_got.offset;
5313 memcpy (plt->contents + plt_offset,
5314 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5315
5316 /* Put offset the PC-relative instruction referring to the GOT
5317 entry, subtracting the size of that instruction. */
5318 got_pcrel_offset = (got->output_section->vma
5319 + got->output_offset
5320 + got_offset
5321 - plt->output_section->vma
5322 - plt->output_offset
5323 - plt_offset
5324 - plt_got_insn_size);
5325
5326 /* Check PC-relative offset overflow in GOT PLT entry. */
5327 got_after_plt = got->output_section->vma > plt->output_section->vma;
5328 if ((got_after_plt && got_pcrel_offset < 0)
5329 || (!got_after_plt && got_pcrel_offset > 0))
5330 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5331 output_bfd, h->root.root.string);
5332
5333 bfd_put_32 (output_bfd, got_pcrel_offset,
5334 plt->contents + plt_offset + plt_got_offset);
5335 }
5336
5337 if (!h->def_regular
5338 && (h->plt.offset != (bfd_vma) -1
5339 || eh->plt_got.offset != (bfd_vma) -1))
5340 {
5341 /* Mark the symbol as undefined, rather than as defined in
5342 the .plt section. Leave the value if there were any
5343 relocations where pointer equality matters (this is a clue
5344 for the dynamic linker, to make function pointer
5345 comparisons work between an application and shared
5346 library), otherwise set it to zero. If a function is only
5347 called from a binary, there is no need to slow down
5348 shared libraries because of that. */
5349 sym->st_shndx = SHN_UNDEF;
5350 if (!h->pointer_equality_needed)
5351 sym->st_value = 0;
5352 }
5353
5354 if (h->got.offset != (bfd_vma) -1
5355 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5356 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5357 {
5358 Elf_Internal_Rela rela;
5359
5360 /* This symbol has an entry in the global offset table. Set it
5361 up. */
5362 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5363 abort ();
5364
5365 rela.r_offset = (htab->elf.sgot->output_section->vma
5366 + htab->elf.sgot->output_offset
5367 + (h->got.offset &~ (bfd_vma) 1));
5368
5369 /* If this is a static link, or it is a -Bsymbolic link and the
5370 symbol is defined locally or was forced to be local because
5371 of a version file, we just want to emit a RELATIVE reloc.
5372 The entry in the global offset table will already have been
5373 initialized in the relocate_section function. */
5374 if (h->def_regular
5375 && h->type == STT_GNU_IFUNC)
5376 {
5377 if (info->shared)
5378 {
5379 /* Generate R_X86_64_GLOB_DAT. */
5380 goto do_glob_dat;
5381 }
5382 else
5383 {
5384 asection *plt;
5385
5386 if (!h->pointer_equality_needed)
5387 abort ();
5388
5389 /* For non-shared object, we can't use .got.plt, which
5390 contains the real function addres if we need pointer
5391 equality. We load the GOT entry with the PLT entry. */
5392 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5393 bfd_put_64 (output_bfd, (plt->output_section->vma
5394 + plt->output_offset
5395 + h->plt.offset),
5396 htab->elf.sgot->contents + h->got.offset);
5397 return TRUE;
5398 }
5399 }
5400 else if (info->shared
5401 && SYMBOL_REFERENCES_LOCAL (info, h))
5402 {
5403 if (!h->def_regular)
5404 return FALSE;
5405 BFD_ASSERT((h->got.offset & 1) != 0);
5406 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5407 rela.r_addend = (h->root.u.def.value
5408 + h->root.u.def.section->output_section->vma
5409 + h->root.u.def.section->output_offset);
5410 }
5411 else
5412 {
5413 BFD_ASSERT((h->got.offset & 1) == 0);
5414 do_glob_dat:
5415 bfd_put_64 (output_bfd, (bfd_vma) 0,
5416 htab->elf.sgot->contents + h->got.offset);
5417 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5418 rela.r_addend = 0;
5419 }
5420
5421 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5422 }
5423
5424 if (h->needs_copy)
5425 {
5426 Elf_Internal_Rela rela;
5427
5428 /* This symbol needs a copy reloc. Set it up. */
5429
5430 if (h->dynindx == -1
5431 || (h->root.type != bfd_link_hash_defined
5432 && h->root.type != bfd_link_hash_defweak)
5433 || htab->srelbss == NULL)
5434 abort ();
5435
5436 rela.r_offset = (h->root.u.def.value
5437 + h->root.u.def.section->output_section->vma
5438 + h->root.u.def.section->output_offset);
5439 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5440 rela.r_addend = 0;
5441 elf_append_rela (output_bfd, htab->srelbss, &rela);
5442 }
5443
5444 return TRUE;
5445 }
5446
5447 /* Finish up local dynamic symbol handling. We set the contents of
5448 various dynamic sections here. */
5449
5450 static bfd_boolean
5451 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5452 {
5453 struct elf_link_hash_entry *h
5454 = (struct elf_link_hash_entry *) *slot;
5455 struct bfd_link_info *info
5456 = (struct bfd_link_info *) inf;
5457
5458 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5459 info, h, NULL);
5460 }
5461
5462 /* Used to decide how to sort relocs in an optimal manner for the
5463 dynamic linker, before writing them out. */
5464
5465 static enum elf_reloc_type_class
5466 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5467 const asection *rel_sec ATTRIBUTE_UNUSED,
5468 const Elf_Internal_Rela *rela)
5469 {
5470 switch ((int) ELF32_R_TYPE (rela->r_info))
5471 {
5472 case R_X86_64_RELATIVE:
5473 case R_X86_64_RELATIVE64:
5474 return reloc_class_relative;
5475 case R_X86_64_JUMP_SLOT:
5476 return reloc_class_plt;
5477 case R_X86_64_COPY:
5478 return reloc_class_copy;
5479 default:
5480 return reloc_class_normal;
5481 }
5482 }
5483
5484 /* Finish up the dynamic sections. */
5485
5486 static bfd_boolean
5487 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5488 struct bfd_link_info *info)
5489 {
5490 struct elf_x86_64_link_hash_table *htab;
5491 bfd *dynobj;
5492 asection *sdyn;
5493 const struct elf_x86_64_backend_data *abed;
5494
5495 htab = elf_x86_64_hash_table (info);
5496 if (htab == NULL)
5497 return FALSE;
5498
5499 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5500 section only if there is .plt section. */
5501 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5502 ? &elf_x86_64_bnd_arch_bed
5503 : get_elf_x86_64_backend_data (output_bfd));
5504
5505 dynobj = htab->elf.dynobj;
5506 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5507
5508 if (htab->elf.dynamic_sections_created)
5509 {
5510 bfd_byte *dyncon, *dynconend;
5511 const struct elf_backend_data *bed;
5512 bfd_size_type sizeof_dyn;
5513
5514 if (sdyn == NULL || htab->elf.sgot == NULL)
5515 abort ();
5516
5517 bed = get_elf_backend_data (dynobj);
5518 sizeof_dyn = bed->s->sizeof_dyn;
5519 dyncon = sdyn->contents;
5520 dynconend = sdyn->contents + sdyn->size;
5521 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5522 {
5523 Elf_Internal_Dyn dyn;
5524 asection *s;
5525
5526 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5527
5528 switch (dyn.d_tag)
5529 {
5530 default:
5531 continue;
5532
5533 case DT_PLTGOT:
5534 s = htab->elf.sgotplt;
5535 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5536 break;
5537
5538 case DT_JMPREL:
5539 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5540 break;
5541
5542 case DT_PLTRELSZ:
5543 s = htab->elf.srelplt->output_section;
5544 dyn.d_un.d_val = s->size;
5545 break;
5546
5547 case DT_RELASZ:
5548 /* The procedure linkage table relocs (DT_JMPREL) should
5549 not be included in the overall relocs (DT_RELA).
5550 Therefore, we override the DT_RELASZ entry here to
5551 make it not include the JMPREL relocs. Since the
5552 linker script arranges for .rela.plt to follow all
5553 other relocation sections, we don't have to worry
5554 about changing the DT_RELA entry. */
5555 if (htab->elf.srelplt != NULL)
5556 {
5557 s = htab->elf.srelplt->output_section;
5558 dyn.d_un.d_val -= s->size;
5559 }
5560 break;
5561
5562 case DT_TLSDESC_PLT:
5563 s = htab->elf.splt;
5564 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5565 + htab->tlsdesc_plt;
5566 break;
5567
5568 case DT_TLSDESC_GOT:
5569 s = htab->elf.sgot;
5570 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5571 + htab->tlsdesc_got;
5572 break;
5573 }
5574
5575 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5576 }
5577
5578 /* Fill in the special first entry in the procedure linkage table. */
5579 if (htab->elf.splt && htab->elf.splt->size > 0)
5580 {
5581 /* Fill in the first entry in the procedure linkage table. */
5582 memcpy (htab->elf.splt->contents,
5583 abed->plt0_entry, abed->plt_entry_size);
5584 /* Add offset for pushq GOT+8(%rip), since the instruction
5585 uses 6 bytes subtract this value. */
5586 bfd_put_32 (output_bfd,
5587 (htab->elf.sgotplt->output_section->vma
5588 + htab->elf.sgotplt->output_offset
5589 + 8
5590 - htab->elf.splt->output_section->vma
5591 - htab->elf.splt->output_offset
5592 - 6),
5593 htab->elf.splt->contents + abed->plt0_got1_offset);
5594 /* Add offset for the PC-relative instruction accessing GOT+16,
5595 subtracting the offset to the end of that instruction. */
5596 bfd_put_32 (output_bfd,
5597 (htab->elf.sgotplt->output_section->vma
5598 + htab->elf.sgotplt->output_offset
5599 + 16
5600 - htab->elf.splt->output_section->vma
5601 - htab->elf.splt->output_offset
5602 - abed->plt0_got2_insn_end),
5603 htab->elf.splt->contents + abed->plt0_got2_offset);
5604
5605 elf_section_data (htab->elf.splt->output_section)
5606 ->this_hdr.sh_entsize = abed->plt_entry_size;
5607
5608 if (htab->tlsdesc_plt)
5609 {
5610 bfd_put_64 (output_bfd, (bfd_vma) 0,
5611 htab->elf.sgot->contents + htab->tlsdesc_got);
5612
5613 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5614 abed->plt0_entry, abed->plt_entry_size);
5615
5616 /* Add offset for pushq GOT+8(%rip), since the
5617 instruction uses 6 bytes subtract this value. */
5618 bfd_put_32 (output_bfd,
5619 (htab->elf.sgotplt->output_section->vma
5620 + htab->elf.sgotplt->output_offset
5621 + 8
5622 - htab->elf.splt->output_section->vma
5623 - htab->elf.splt->output_offset
5624 - htab->tlsdesc_plt
5625 - 6),
5626 htab->elf.splt->contents
5627 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5628 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5629 where TGD stands for htab->tlsdesc_got, subtracting the offset
5630 to the end of that instruction. */
5631 bfd_put_32 (output_bfd,
5632 (htab->elf.sgot->output_section->vma
5633 + htab->elf.sgot->output_offset
5634 + htab->tlsdesc_got
5635 - htab->elf.splt->output_section->vma
5636 - htab->elf.splt->output_offset
5637 - htab->tlsdesc_plt
5638 - abed->plt0_got2_insn_end),
5639 htab->elf.splt->contents
5640 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5641 }
5642 }
5643 }
5644
5645 if (htab->plt_bnd != NULL)
5646 elf_section_data (htab->plt_bnd->output_section)
5647 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5648
5649 if (htab->elf.sgotplt)
5650 {
5651 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5652 {
5653 (*_bfd_error_handler)
5654 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5655 return FALSE;
5656 }
5657
5658 /* Fill in the first three entries in the global offset table. */
5659 if (htab->elf.sgotplt->size > 0)
5660 {
5661 /* Set the first entry in the global offset table to the address of
5662 the dynamic section. */
5663 if (sdyn == NULL)
5664 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5665 else
5666 bfd_put_64 (output_bfd,
5667 sdyn->output_section->vma + sdyn->output_offset,
5668 htab->elf.sgotplt->contents);
5669 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5670 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5671 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5672 }
5673
5674 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5675 GOT_ENTRY_SIZE;
5676 }
5677
5678 /* Adjust .eh_frame for .plt section. */
5679 if (htab->plt_eh_frame != NULL
5680 && htab->plt_eh_frame->contents != NULL)
5681 {
5682 if (htab->elf.splt != NULL
5683 && htab->elf.splt->size != 0
5684 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5685 && htab->elf.splt->output_section != NULL
5686 && htab->plt_eh_frame->output_section != NULL)
5687 {
5688 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5689 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5690 + htab->plt_eh_frame->output_offset
5691 + PLT_FDE_START_OFFSET;
5692 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5693 htab->plt_eh_frame->contents
5694 + PLT_FDE_START_OFFSET);
5695 }
5696 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5697 {
5698 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5699 htab->plt_eh_frame,
5700 htab->plt_eh_frame->contents))
5701 return FALSE;
5702 }
5703 }
5704
5705 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5706 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5707 = GOT_ENTRY_SIZE;
5708
5709 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5710 htab_traverse (htab->loc_hash_table,
5711 elf_x86_64_finish_local_dynamic_symbol,
5712 info);
5713
5714 return TRUE;
5715 }
5716
5717 /* Return an array of PLT entry symbol values. */
5718
5719 static bfd_vma *
5720 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
5721 asection *relplt)
5722 {
5723 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5724 arelent *p;
5725 long count, i;
5726 bfd_vma *plt_sym_val;
5727 bfd_vma plt_offset;
5728 bfd_byte *plt_contents;
5729 const struct elf_x86_64_backend_data *bed;
5730 Elf_Internal_Shdr *hdr;
5731 asection *plt_bnd;
5732
5733 /* Get the .plt section contents. PLT passed down may point to the
5734 .plt.bnd section. Make sure that PLT always points to the .plt
5735 section. */
5736 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
5737 if (plt_bnd)
5738 {
5739 if (plt != plt_bnd)
5740 abort ();
5741 plt = bfd_get_section_by_name (abfd, ".plt");
5742 if (plt == NULL)
5743 abort ();
5744 bed = &elf_x86_64_bnd_arch_bed;
5745 }
5746 else
5747 bed = get_elf_x86_64_backend_data (abfd);
5748
5749 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
5750 if (plt_contents == NULL)
5751 return NULL;
5752 if (!bfd_get_section_contents (abfd, (asection *) plt,
5753 plt_contents, 0, plt->size))
5754 {
5755 bad_return:
5756 free (plt_contents);
5757 return NULL;
5758 }
5759
5760 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5761 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5762 goto bad_return;
5763
5764 hdr = &elf_section_data (relplt)->this_hdr;
5765 count = relplt->size / hdr->sh_entsize;
5766
5767 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
5768 if (plt_sym_val == NULL)
5769 goto bad_return;
5770
5771 for (i = 0; i < count; i++)
5772 plt_sym_val[i] = -1;
5773
5774 plt_offset = bed->plt_entry_size;
5775 p = relplt->relocation;
5776 for (i = 0; i < count; i++, p++)
5777 {
5778 long reloc_index;
5779
5780 /* Skip unknown relocation. */
5781 if (p->howto == NULL)
5782 continue;
5783
5784 if (p->howto->type != R_X86_64_JUMP_SLOT
5785 && p->howto->type != R_X86_64_IRELATIVE)
5786 continue;
5787
5788 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
5789 + bed->plt_reloc_offset));
5790 if (reloc_index >= count)
5791 abort ();
5792 if (plt_bnd)
5793 {
5794 /* This is the index in .plt section. */
5795 long plt_index = plt_offset / bed->plt_entry_size;
5796 /* Store VMA + the offset in .plt.bnd section. */
5797 plt_sym_val[reloc_index] =
5798 (plt_bnd->vma
5799 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
5800 }
5801 else
5802 plt_sym_val[reloc_index] = plt->vma + plt_offset;
5803 plt_offset += bed->plt_entry_size;
5804
5805 /* PR binutils/18437: Skip extra relocations in the .rela.plt
5806 section. */
5807 if (plt_offset >= plt->size)
5808 break;
5809 }
5810
5811 free (plt_contents);
5812
5813 return plt_sym_val;
5814 }
5815
5816 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5817 support. */
5818
5819 static long
5820 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5821 long symcount,
5822 asymbol **syms,
5823 long dynsymcount,
5824 asymbol **dynsyms,
5825 asymbol **ret)
5826 {
5827 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
5828 as PLT if it exists. */
5829 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5830 if (plt == NULL)
5831 plt = bfd_get_section_by_name (abfd, ".plt");
5832 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
5833 dynsymcount, dynsyms, ret,
5834 plt,
5835 elf_x86_64_get_plt_sym_val);
5836 }
5837
5838 /* Handle an x86-64 specific section when reading an object file. This
5839 is called when elfcode.h finds a section with an unknown type. */
5840
5841 static bfd_boolean
5842 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5843 const char *name, int shindex)
5844 {
5845 if (hdr->sh_type != SHT_X86_64_UNWIND)
5846 return FALSE;
5847
5848 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5849 return FALSE;
5850
5851 return TRUE;
5852 }
5853
5854 /* Hook called by the linker routine which adds symbols from an object
5855 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5856 of .bss. */
5857
5858 static bfd_boolean
5859 elf_x86_64_add_symbol_hook (bfd *abfd,
5860 struct bfd_link_info *info,
5861 Elf_Internal_Sym *sym,
5862 const char **namep ATTRIBUTE_UNUSED,
5863 flagword *flagsp ATTRIBUTE_UNUSED,
5864 asection **secp,
5865 bfd_vma *valp)
5866 {
5867 asection *lcomm;
5868
5869 switch (sym->st_shndx)
5870 {
5871 case SHN_X86_64_LCOMMON:
5872 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5873 if (lcomm == NULL)
5874 {
5875 lcomm = bfd_make_section_with_flags (abfd,
5876 "LARGE_COMMON",
5877 (SEC_ALLOC
5878 | SEC_IS_COMMON
5879 | SEC_LINKER_CREATED));
5880 if (lcomm == NULL)
5881 return FALSE;
5882 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5883 }
5884 *secp = lcomm;
5885 *valp = sym->st_size;
5886 return TRUE;
5887 }
5888
5889 if (ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE
5890 && (abfd->flags & DYNAMIC) == 0
5891 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5892 elf_tdata (info->output_bfd)->has_gnu_symbols
5893 |= elf_gnu_symbol_unique;
5894
5895 return TRUE;
5896 }
5897
5898
5899 /* Given a BFD section, try to locate the corresponding ELF section
5900 index. */
5901
5902 static bfd_boolean
5903 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5904 asection *sec, int *index_return)
5905 {
5906 if (sec == &_bfd_elf_large_com_section)
5907 {
5908 *index_return = SHN_X86_64_LCOMMON;
5909 return TRUE;
5910 }
5911 return FALSE;
5912 }
5913
5914 /* Process a symbol. */
5915
5916 static void
5917 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5918 asymbol *asym)
5919 {
5920 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5921
5922 switch (elfsym->internal_elf_sym.st_shndx)
5923 {
5924 case SHN_X86_64_LCOMMON:
5925 asym->section = &_bfd_elf_large_com_section;
5926 asym->value = elfsym->internal_elf_sym.st_size;
5927 /* Common symbol doesn't set BSF_GLOBAL. */
5928 asym->flags &= ~BSF_GLOBAL;
5929 break;
5930 }
5931 }
5932
5933 static bfd_boolean
5934 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5935 {
5936 return (sym->st_shndx == SHN_COMMON
5937 || sym->st_shndx == SHN_X86_64_LCOMMON);
5938 }
5939
5940 static unsigned int
5941 elf_x86_64_common_section_index (asection *sec)
5942 {
5943 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5944 return SHN_COMMON;
5945 else
5946 return SHN_X86_64_LCOMMON;
5947 }
5948
5949 static asection *
5950 elf_x86_64_common_section (asection *sec)
5951 {
5952 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5953 return bfd_com_section_ptr;
5954 else
5955 return &_bfd_elf_large_com_section;
5956 }
5957
5958 static bfd_boolean
5959 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5960 const Elf_Internal_Sym *sym,
5961 asection **psec,
5962 bfd_boolean newdef,
5963 bfd_boolean olddef,
5964 bfd *oldbfd,
5965 const asection *oldsec)
5966 {
5967 /* A normal common symbol and a large common symbol result in a
5968 normal common symbol. We turn the large common symbol into a
5969 normal one. */
5970 if (!olddef
5971 && h->root.type == bfd_link_hash_common
5972 && !newdef
5973 && bfd_is_com_section (*psec)
5974 && oldsec != *psec)
5975 {
5976 if (sym->st_shndx == SHN_COMMON
5977 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5978 {
5979 h->root.u.c.p->section
5980 = bfd_make_section_old_way (oldbfd, "COMMON");
5981 h->root.u.c.p->section->flags = SEC_ALLOC;
5982 }
5983 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5984 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5985 *psec = bfd_com_section_ptr;
5986 }
5987
5988 return TRUE;
5989 }
5990
5991 static int
5992 elf_x86_64_additional_program_headers (bfd *abfd,
5993 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5994 {
5995 asection *s;
5996 int count = 0;
5997
5998 /* Check to see if we need a large readonly segment. */
5999 s = bfd_get_section_by_name (abfd, ".lrodata");
6000 if (s && (s->flags & SEC_LOAD))
6001 count++;
6002
6003 /* Check to see if we need a large data segment. Since .lbss sections
6004 is placed right after the .bss section, there should be no need for
6005 a large data segment just because of .lbss. */
6006 s = bfd_get_section_by_name (abfd, ".ldata");
6007 if (s && (s->flags & SEC_LOAD))
6008 count++;
6009
6010 return count;
6011 }
6012
6013 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
6014
6015 static bfd_boolean
6016 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
6017 {
6018 if (h->plt.offset != (bfd_vma) -1
6019 && !h->def_regular
6020 && !h->pointer_equality_needed)
6021 return FALSE;
6022
6023 return _bfd_elf_hash_symbol (h);
6024 }
6025
6026 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6027
6028 static bfd_boolean
6029 elf_x86_64_relocs_compatible (const bfd_target *input,
6030 const bfd_target *output)
6031 {
6032 return ((xvec_get_elf_backend_data (input)->s->elfclass
6033 == xvec_get_elf_backend_data (output)->s->elfclass)
6034 && _bfd_elf_relocs_compatible (input, output));
6035 }
6036
6037 static const struct bfd_elf_special_section
6038 elf_x86_64_special_sections[]=
6039 {
6040 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6041 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6042 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6043 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6044 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6045 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6046 { NULL, 0, 0, 0, 0 }
6047 };
6048
6049 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6050 #define TARGET_LITTLE_NAME "elf64-x86-64"
6051 #define ELF_ARCH bfd_arch_i386
6052 #define ELF_TARGET_ID X86_64_ELF_DATA
6053 #define ELF_MACHINE_CODE EM_X86_64
6054 #define ELF_MAXPAGESIZE 0x200000
6055 #define ELF_MINPAGESIZE 0x1000
6056 #define ELF_COMMONPAGESIZE 0x1000
6057
6058 #define elf_backend_can_gc_sections 1
6059 #define elf_backend_can_refcount 1
6060 #define elf_backend_want_got_plt 1
6061 #define elf_backend_plt_readonly 1
6062 #define elf_backend_want_plt_sym 0
6063 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6064 #define elf_backend_rela_normal 1
6065 #define elf_backend_plt_alignment 4
6066 #define elf_backend_extern_protected_data 1
6067
6068 #define elf_info_to_howto elf_x86_64_info_to_howto
6069
6070 #define bfd_elf64_bfd_link_hash_table_create \
6071 elf_x86_64_link_hash_table_create
6072 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6073 #define bfd_elf64_bfd_reloc_name_lookup \
6074 elf_x86_64_reloc_name_lookup
6075
6076 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6077 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6078 #define elf_backend_check_relocs elf_x86_64_check_relocs
6079 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6080 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6081 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6082 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6083 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6084 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
6085 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6086 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6087 #ifdef CORE_HEADER
6088 #define elf_backend_write_core_note elf_x86_64_write_core_note
6089 #endif
6090 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6091 #define elf_backend_relocate_section elf_x86_64_relocate_section
6092 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6093 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6094 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6095 #define elf_backend_object_p elf64_x86_64_elf_object_p
6096 #define bfd_elf64_mkobject elf_x86_64_mkobject
6097 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6098
6099 #define elf_backend_section_from_shdr \
6100 elf_x86_64_section_from_shdr
6101
6102 #define elf_backend_section_from_bfd_section \
6103 elf_x86_64_elf_section_from_bfd_section
6104 #define elf_backend_add_symbol_hook \
6105 elf_x86_64_add_symbol_hook
6106 #define elf_backend_symbol_processing \
6107 elf_x86_64_symbol_processing
6108 #define elf_backend_common_section_index \
6109 elf_x86_64_common_section_index
6110 #define elf_backend_common_section \
6111 elf_x86_64_common_section
6112 #define elf_backend_common_definition \
6113 elf_x86_64_common_definition
6114 #define elf_backend_merge_symbol \
6115 elf_x86_64_merge_symbol
6116 #define elf_backend_special_sections \
6117 elf_x86_64_special_sections
6118 #define elf_backend_additional_program_headers \
6119 elf_x86_64_additional_program_headers
6120 #define elf_backend_hash_symbol \
6121 elf_x86_64_hash_symbol
6122
6123 #include "elf64-target.h"
6124
6125 /* CloudABI support. */
6126
6127 #undef TARGET_LITTLE_SYM
6128 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6129 #undef TARGET_LITTLE_NAME
6130 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6131
6132 #undef ELF_OSABI
6133 #define ELF_OSABI ELFOSABI_CLOUDABI
6134
6135 #undef elf64_bed
6136 #define elf64_bed elf64_x86_64_cloudabi_bed
6137
6138 #include "elf64-target.h"
6139
6140 /* FreeBSD support. */
6141
6142 #undef TARGET_LITTLE_SYM
6143 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6144 #undef TARGET_LITTLE_NAME
6145 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6146
6147 #undef ELF_OSABI
6148 #define ELF_OSABI ELFOSABI_FREEBSD
6149
6150 #undef elf64_bed
6151 #define elf64_bed elf64_x86_64_fbsd_bed
6152
6153 #include "elf64-target.h"
6154
6155 /* Solaris 2 support. */
6156
6157 #undef TARGET_LITTLE_SYM
6158 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6159 #undef TARGET_LITTLE_NAME
6160 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6161
6162 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6163 objects won't be recognized. */
6164 #undef ELF_OSABI
6165
6166 #undef elf64_bed
6167 #define elf64_bed elf64_x86_64_sol2_bed
6168
6169 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6170 boundary. */
6171 #undef elf_backend_static_tls_alignment
6172 #define elf_backend_static_tls_alignment 16
6173
6174 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6175
6176 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6177 File, p.63. */
6178 #undef elf_backend_want_plt_sym
6179 #define elf_backend_want_plt_sym 1
6180
6181 #include "elf64-target.h"
6182
6183 /* Native Client support. */
6184
6185 static bfd_boolean
6186 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6187 {
6188 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6189 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6190 return TRUE;
6191 }
6192
6193 #undef TARGET_LITTLE_SYM
6194 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6195 #undef TARGET_LITTLE_NAME
6196 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6197 #undef elf64_bed
6198 #define elf64_bed elf64_x86_64_nacl_bed
6199
6200 #undef ELF_MAXPAGESIZE
6201 #undef ELF_MINPAGESIZE
6202 #undef ELF_COMMONPAGESIZE
6203 #define ELF_MAXPAGESIZE 0x10000
6204 #define ELF_MINPAGESIZE 0x10000
6205 #define ELF_COMMONPAGESIZE 0x10000
6206
6207 /* Restore defaults. */
6208 #undef ELF_OSABI
6209 #undef elf_backend_static_tls_alignment
6210 #undef elf_backend_want_plt_sym
6211 #define elf_backend_want_plt_sym 0
6212
6213 /* NaCl uses substantially different PLT entries for the same effects. */
6214
6215 #undef elf_backend_plt_alignment
6216 #define elf_backend_plt_alignment 5
6217 #define NACL_PLT_ENTRY_SIZE 64
6218 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6219
6220 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6221 {
6222 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6223 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6224 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6225 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6226 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6227
6228 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6229 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6230
6231 /* 32 bytes of nop to pad out to the standard size. */
6232 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6233 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6234 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6235 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6236 0x66, /* excess data32 prefix */
6237 0x90 /* nop */
6238 };
6239
6240 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6241 {
6242 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6243 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6244 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6245 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6246
6247 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6248 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6249 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6250
6251 /* Lazy GOT entries point here (32-byte aligned). */
6252 0x68, /* pushq immediate */
6253 0, 0, 0, 0, /* replaced with index into relocation table. */
6254 0xe9, /* jmp relative */
6255 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6256
6257 /* 22 bytes of nop to pad out to the standard size. */
6258 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6259 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6260 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6261 };
6262
6263 /* .eh_frame covering the .plt section. */
6264
6265 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6266 {
6267 #if (PLT_CIE_LENGTH != 20 \
6268 || PLT_FDE_LENGTH != 36 \
6269 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6270 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6271 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6272 #endif
6273 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6274 0, 0, 0, 0, /* CIE ID */
6275 1, /* CIE version */
6276 'z', 'R', 0, /* Augmentation string */
6277 1, /* Code alignment factor */
6278 0x78, /* Data alignment factor */
6279 16, /* Return address column */
6280 1, /* Augmentation size */
6281 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6282 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6283 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6284 DW_CFA_nop, DW_CFA_nop,
6285
6286 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6287 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6288 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6289 0, 0, 0, 0, /* .plt size goes here */
6290 0, /* Augmentation size */
6291 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6292 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6293 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6294 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6295 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6296 13, /* Block length */
6297 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6298 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6299 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6300 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6301 DW_CFA_nop, DW_CFA_nop
6302 };
6303
6304 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6305 {
6306 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6307 elf_x86_64_nacl_plt_entry, /* plt_entry */
6308 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6309 2, /* plt0_got1_offset */
6310 9, /* plt0_got2_offset */
6311 13, /* plt0_got2_insn_end */
6312 3, /* plt_got_offset */
6313 33, /* plt_reloc_offset */
6314 38, /* plt_plt_offset */
6315 7, /* plt_got_insn_size */
6316 42, /* plt_plt_insn_end */
6317 32, /* plt_lazy_offset */
6318 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6319 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6320 };
6321
6322 #undef elf_backend_arch_data
6323 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6324
6325 #undef elf_backend_object_p
6326 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6327 #undef elf_backend_modify_segment_map
6328 #define elf_backend_modify_segment_map nacl_modify_segment_map
6329 #undef elf_backend_modify_program_headers
6330 #define elf_backend_modify_program_headers nacl_modify_program_headers
6331 #undef elf_backend_final_write_processing
6332 #define elf_backend_final_write_processing nacl_final_write_processing
6333
6334 #include "elf64-target.h"
6335
6336 /* Native Client x32 support. */
6337
6338 static bfd_boolean
6339 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6340 {
6341 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6342 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6343 return TRUE;
6344 }
6345
6346 #undef TARGET_LITTLE_SYM
6347 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6348 #undef TARGET_LITTLE_NAME
6349 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6350 #undef elf32_bed
6351 #define elf32_bed elf32_x86_64_nacl_bed
6352
6353 #define bfd_elf32_bfd_link_hash_table_create \
6354 elf_x86_64_link_hash_table_create
6355 #define bfd_elf32_bfd_reloc_type_lookup \
6356 elf_x86_64_reloc_type_lookup
6357 #define bfd_elf32_bfd_reloc_name_lookup \
6358 elf_x86_64_reloc_name_lookup
6359 #define bfd_elf32_mkobject \
6360 elf_x86_64_mkobject
6361 #define bfd_elf32_get_synthetic_symtab \
6362 elf_x86_64_get_synthetic_symtab
6363
6364 #undef elf_backend_object_p
6365 #define elf_backend_object_p \
6366 elf32_x86_64_nacl_elf_object_p
6367
6368 #undef elf_backend_bfd_from_remote_memory
6369 #define elf_backend_bfd_from_remote_memory \
6370 _bfd_elf32_bfd_from_remote_memory
6371
6372 #undef elf_backend_size_info
6373 #define elf_backend_size_info \
6374 _bfd_elf32_size_info
6375
6376 #include "elf32-target.h"
6377
6378 /* Restore defaults. */
6379 #undef elf_backend_object_p
6380 #define elf_backend_object_p elf64_x86_64_elf_object_p
6381 #undef elf_backend_bfd_from_remote_memory
6382 #undef elf_backend_size_info
6383 #undef elf_backend_modify_segment_map
6384 #undef elf_backend_modify_program_headers
6385 #undef elf_backend_final_write_processing
6386
6387 /* Intel L1OM support. */
6388
6389 static bfd_boolean
6390 elf64_l1om_elf_object_p (bfd *abfd)
6391 {
6392 /* Set the right machine number for an L1OM elf64 file. */
6393 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6394 return TRUE;
6395 }
6396
6397 #undef TARGET_LITTLE_SYM
6398 #define TARGET_LITTLE_SYM l1om_elf64_vec
6399 #undef TARGET_LITTLE_NAME
6400 #define TARGET_LITTLE_NAME "elf64-l1om"
6401 #undef ELF_ARCH
6402 #define ELF_ARCH bfd_arch_l1om
6403
6404 #undef ELF_MACHINE_CODE
6405 #define ELF_MACHINE_CODE EM_L1OM
6406
6407 #undef ELF_OSABI
6408
6409 #undef elf64_bed
6410 #define elf64_bed elf64_l1om_bed
6411
6412 #undef elf_backend_object_p
6413 #define elf_backend_object_p elf64_l1om_elf_object_p
6414
6415 /* Restore defaults. */
6416 #undef ELF_MAXPAGESIZE
6417 #undef ELF_MINPAGESIZE
6418 #undef ELF_COMMONPAGESIZE
6419 #define ELF_MAXPAGESIZE 0x200000
6420 #define ELF_MINPAGESIZE 0x1000
6421 #define ELF_COMMONPAGESIZE 0x1000
6422 #undef elf_backend_plt_alignment
6423 #define elf_backend_plt_alignment 4
6424 #undef elf_backend_arch_data
6425 #define elf_backend_arch_data &elf_x86_64_arch_bed
6426
6427 #include "elf64-target.h"
6428
6429 /* FreeBSD L1OM support. */
6430
6431 #undef TARGET_LITTLE_SYM
6432 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6433 #undef TARGET_LITTLE_NAME
6434 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6435
6436 #undef ELF_OSABI
6437 #define ELF_OSABI ELFOSABI_FREEBSD
6438
6439 #undef elf64_bed
6440 #define elf64_bed elf64_l1om_fbsd_bed
6441
6442 #include "elf64-target.h"
6443
6444 /* Intel K1OM support. */
6445
6446 static bfd_boolean
6447 elf64_k1om_elf_object_p (bfd *abfd)
6448 {
6449 /* Set the right machine number for an K1OM elf64 file. */
6450 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6451 return TRUE;
6452 }
6453
6454 #undef TARGET_LITTLE_SYM
6455 #define TARGET_LITTLE_SYM k1om_elf64_vec
6456 #undef TARGET_LITTLE_NAME
6457 #define TARGET_LITTLE_NAME "elf64-k1om"
6458 #undef ELF_ARCH
6459 #define ELF_ARCH bfd_arch_k1om
6460
6461 #undef ELF_MACHINE_CODE
6462 #define ELF_MACHINE_CODE EM_K1OM
6463
6464 #undef ELF_OSABI
6465
6466 #undef elf64_bed
6467 #define elf64_bed elf64_k1om_bed
6468
6469 #undef elf_backend_object_p
6470 #define elf_backend_object_p elf64_k1om_elf_object_p
6471
6472 #undef elf_backend_static_tls_alignment
6473
6474 #undef elf_backend_want_plt_sym
6475 #define elf_backend_want_plt_sym 0
6476
6477 #include "elf64-target.h"
6478
6479 /* FreeBSD K1OM support. */
6480
6481 #undef TARGET_LITTLE_SYM
6482 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6483 #undef TARGET_LITTLE_NAME
6484 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6485
6486 #undef ELF_OSABI
6487 #define ELF_OSABI ELFOSABI_FREEBSD
6488
6489 #undef elf64_bed
6490 #define elf64_bed elf64_k1om_fbsd_bed
6491
6492 #include "elf64-target.h"
6493
6494 /* 32bit x86-64 support. */
6495
6496 #undef TARGET_LITTLE_SYM
6497 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6498 #undef TARGET_LITTLE_NAME
6499 #define TARGET_LITTLE_NAME "elf32-x86-64"
6500 #undef elf32_bed
6501
6502 #undef ELF_ARCH
6503 #define ELF_ARCH bfd_arch_i386
6504
6505 #undef ELF_MACHINE_CODE
6506 #define ELF_MACHINE_CODE EM_X86_64
6507
6508 #undef ELF_OSABI
6509
6510 #undef elf_backend_object_p
6511 #define elf_backend_object_p \
6512 elf32_x86_64_elf_object_p
6513
6514 #undef elf_backend_bfd_from_remote_memory
6515 #define elf_backend_bfd_from_remote_memory \
6516 _bfd_elf32_bfd_from_remote_memory
6517
6518 #undef elf_backend_size_info
6519 #define elf_backend_size_info \
6520 _bfd_elf32_size_info
6521
6522 #include "elf32-target.h"