Fix missing "Core was generated by" when loading a x32 corefile.
[binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2023 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "dwarf2.h"
24 #include "libiberty.h"
25 #include "sframe.h"
26
27 #include "opcode/i386.h"
28
29 #ifdef CORE_HEADER
30 #include <stdarg.h>
31 #include CORE_HEADER
32 #endif
33
34 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
35 #define MINUS_ONE (~ (bfd_vma) 0)
36
37 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
38 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
39 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
40 since they are the same. */
41
42 /* The relocation "howto" table. Order of fields:
43 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
44 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
45 static reloc_howto_type x86_64_elf_howto_table[] =
46 {
47 HOWTO(R_X86_64_NONE, 0, 0, 0, false, 0, complain_overflow_dont,
48 bfd_elf_generic_reloc, "R_X86_64_NONE", false, 0, 0x00000000,
49 false),
50 HOWTO(R_X86_64_64, 0, 8, 64, false, 0, complain_overflow_dont,
51 bfd_elf_generic_reloc, "R_X86_64_64", false, 0, MINUS_ONE,
52 false),
53 HOWTO(R_X86_64_PC32, 0, 4, 32, true, 0, complain_overflow_signed,
54 bfd_elf_generic_reloc, "R_X86_64_PC32", false, 0, 0xffffffff,
55 true),
56 HOWTO(R_X86_64_GOT32, 0, 4, 32, false, 0, complain_overflow_signed,
57 bfd_elf_generic_reloc, "R_X86_64_GOT32", false, 0, 0xffffffff,
58 false),
59 HOWTO(R_X86_64_PLT32, 0, 4, 32, true, 0, complain_overflow_signed,
60 bfd_elf_generic_reloc, "R_X86_64_PLT32", false, 0, 0xffffffff,
61 true),
62 HOWTO(R_X86_64_COPY, 0, 4, 32, false, 0, complain_overflow_bitfield,
63 bfd_elf_generic_reloc, "R_X86_64_COPY", false, 0, 0xffffffff,
64 false),
65 HOWTO(R_X86_64_GLOB_DAT, 0, 8, 64, false, 0, complain_overflow_dont,
66 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", false, 0, MINUS_ONE,
67 false),
68 HOWTO(R_X86_64_JUMP_SLOT, 0, 8, 64, false, 0, complain_overflow_dont,
69 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", false, 0, MINUS_ONE,
70 false),
71 HOWTO(R_X86_64_RELATIVE, 0, 8, 64, false, 0, complain_overflow_dont,
72 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", false, 0, MINUS_ONE,
73 false),
74 HOWTO(R_X86_64_GOTPCREL, 0, 4, 32, true, 0, complain_overflow_signed,
75 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", false, 0, 0xffffffff,
76 true),
77 HOWTO(R_X86_64_32, 0, 4, 32, false, 0, complain_overflow_unsigned,
78 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
79 false),
80 HOWTO(R_X86_64_32S, 0, 4, 32, false, 0, complain_overflow_signed,
81 bfd_elf_generic_reloc, "R_X86_64_32S", false, 0, 0xffffffff,
82 false),
83 HOWTO(R_X86_64_16, 0, 2, 16, false, 0, complain_overflow_bitfield,
84 bfd_elf_generic_reloc, "R_X86_64_16", false, 0, 0xffff, false),
85 HOWTO(R_X86_64_PC16, 0, 2, 16, true, 0, complain_overflow_bitfield,
86 bfd_elf_generic_reloc, "R_X86_64_PC16", false, 0, 0xffff, true),
87 HOWTO(R_X86_64_8, 0, 1, 8, false, 0, complain_overflow_bitfield,
88 bfd_elf_generic_reloc, "R_X86_64_8", false, 0, 0xff, false),
89 HOWTO(R_X86_64_PC8, 0, 1, 8, true, 0, complain_overflow_signed,
90 bfd_elf_generic_reloc, "R_X86_64_PC8", false, 0, 0xff, true),
91 HOWTO(R_X86_64_DTPMOD64, 0, 8, 64, false, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", false, 0, MINUS_ONE,
93 false),
94 HOWTO(R_X86_64_DTPOFF64, 0, 8, 64, false, 0, complain_overflow_dont,
95 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", false, 0, MINUS_ONE,
96 false),
97 HOWTO(R_X86_64_TPOFF64, 0, 8, 64, false, 0, complain_overflow_dont,
98 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", false, 0, MINUS_ONE,
99 false),
100 HOWTO(R_X86_64_TLSGD, 0, 4, 32, true, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_TLSGD", false, 0, 0xffffffff,
102 true),
103 HOWTO(R_X86_64_TLSLD, 0, 4, 32, true, 0, complain_overflow_signed,
104 bfd_elf_generic_reloc, "R_X86_64_TLSLD", false, 0, 0xffffffff,
105 true),
106 HOWTO(R_X86_64_DTPOFF32, 0, 4, 32, false, 0, complain_overflow_signed,
107 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", false, 0, 0xffffffff,
108 false),
109 HOWTO(R_X86_64_GOTTPOFF, 0, 4, 32, true, 0, complain_overflow_signed,
110 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", false, 0, 0xffffffff,
111 true),
112 HOWTO(R_X86_64_TPOFF32, 0, 4, 32, false, 0, complain_overflow_signed,
113 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", false, 0, 0xffffffff,
114 false),
115 HOWTO(R_X86_64_PC64, 0, 8, 64, true, 0, complain_overflow_dont,
116 bfd_elf_generic_reloc, "R_X86_64_PC64", false, 0, MINUS_ONE,
117 true),
118 HOWTO(R_X86_64_GOTOFF64, 0, 8, 64, false, 0, complain_overflow_dont,
119 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64", false, 0, MINUS_ONE,
120 false),
121 HOWTO(R_X86_64_GOTPC32, 0, 4, 32, true, 0, complain_overflow_signed,
122 bfd_elf_generic_reloc, "R_X86_64_GOTPC32", false, 0, 0xffffffff,
123 true),
124 HOWTO(R_X86_64_GOT64, 0, 8, 64, false, 0, complain_overflow_signed,
125 bfd_elf_generic_reloc, "R_X86_64_GOT64", false, 0, MINUS_ONE,
126 false),
127 HOWTO(R_X86_64_GOTPCREL64, 0, 8, 64, true, 0, complain_overflow_signed,
128 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", false, 0, MINUS_ONE,
129 true),
130 HOWTO(R_X86_64_GOTPC64, 0, 8, 64, true, 0, complain_overflow_signed,
131 bfd_elf_generic_reloc, "R_X86_64_GOTPC64", false, 0, MINUS_ONE,
132 true),
133 HOWTO(R_X86_64_GOTPLT64, 0, 8, 64, false, 0, complain_overflow_signed,
134 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", false, 0, MINUS_ONE,
135 false),
136 HOWTO(R_X86_64_PLTOFF64, 0, 8, 64, false, 0, complain_overflow_signed,
137 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", false, 0, MINUS_ONE,
138 false),
139 HOWTO(R_X86_64_SIZE32, 0, 4, 32, false, 0, complain_overflow_unsigned,
140 bfd_elf_generic_reloc, "R_X86_64_SIZE32", false, 0, 0xffffffff,
141 false),
142 HOWTO(R_X86_64_SIZE64, 0, 8, 64, false, 0, complain_overflow_dont,
143 bfd_elf_generic_reloc, "R_X86_64_SIZE64", false, 0, MINUS_ONE,
144 false),
145 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 4, 32, true, 0,
146 complain_overflow_bitfield, bfd_elf_generic_reloc,
147 "R_X86_64_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
148 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, false, 0,
149 complain_overflow_dont, bfd_elf_generic_reloc,
150 "R_X86_64_TLSDESC_CALL",
151 false, 0, 0, false),
152 HOWTO(R_X86_64_TLSDESC, 0, 8, 64, false, 0,
153 complain_overflow_dont, bfd_elf_generic_reloc,
154 "R_X86_64_TLSDESC", false, 0, MINUS_ONE, false),
155 HOWTO(R_X86_64_IRELATIVE, 0, 8, 64, false, 0, complain_overflow_dont,
156 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", false, 0, MINUS_ONE,
157 false),
158 HOWTO(R_X86_64_RELATIVE64, 0, 8, 64, false, 0, complain_overflow_dont,
159 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", false, 0, MINUS_ONE,
160 false),
161 HOWTO(R_X86_64_PC32_BND, 0, 4, 32, true, 0, complain_overflow_signed,
162 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", false, 0, 0xffffffff,
163 true),
164 HOWTO(R_X86_64_PLT32_BND, 0, 4, 32, true, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", false, 0, 0xffffffff,
166 true),
167 HOWTO(R_X86_64_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", false, 0, 0xffffffff,
169 true),
170 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", false, 0, 0xffffffff,
172 true),
173
174 /* We have a gap in the reloc numbers here.
175 R_X86_64_standard counts the number up to this point, and
176 R_X86_64_vt_offset is the value to subtract from a reloc type of
177 R_X86_64_GNU_VT* to form an index into this table. */
178 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
179 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
180
181 /* GNU extension to record C++ vtable hierarchy. */
182 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 8, 0, false, 0, complain_overflow_dont,
183 NULL, "R_X86_64_GNU_VTINHERIT", false, 0, 0, false),
184
185 /* GNU extension to record C++ vtable member usage. */
186 HOWTO (R_X86_64_GNU_VTENTRY, 0, 8, 0, false, 0, complain_overflow_dont,
187 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", false, 0, 0,
188 false),
189
190 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
191 HOWTO(R_X86_64_32, 0, 4, 32, false, 0, complain_overflow_bitfield,
192 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
193 false)
194 };
195
196 /* Map BFD relocs to the x86_64 elf relocs. */
197 struct elf_reloc_map
198 {
199 bfd_reloc_code_real_type bfd_reloc_val;
200 unsigned char elf_reloc_val;
201 };
202
203 static const struct elf_reloc_map x86_64_reloc_map[] =
204 {
205 { BFD_RELOC_NONE, R_X86_64_NONE, },
206 { BFD_RELOC_64, R_X86_64_64, },
207 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
208 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
209 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
210 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
211 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
212 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
213 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
214 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
215 { BFD_RELOC_32, R_X86_64_32, },
216 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
217 { BFD_RELOC_16, R_X86_64_16, },
218 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
219 { BFD_RELOC_8, R_X86_64_8, },
220 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
221 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
222 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
223 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
224 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
225 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
226 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
227 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
228 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
229 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
230 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
231 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
232 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
233 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
234 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
235 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
236 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
237 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
238 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
239 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
240 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
241 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
242 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
243 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
244 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
245 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
246 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
247 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
248 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
249 };
250
251 static reloc_howto_type *
252 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
253 {
254 unsigned i;
255
256 if (r_type == (unsigned int) R_X86_64_32)
257 {
258 if (ABI_64_P (abfd))
259 i = r_type;
260 else
261 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
262 }
263 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
264 || r_type >= (unsigned int) R_X86_64_max)
265 {
266 if (r_type >= (unsigned int) R_X86_64_standard)
267 {
268 /* xgettext:c-format */
269 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
270 abfd, r_type);
271 bfd_set_error (bfd_error_bad_value);
272 return NULL;
273 }
274 i = r_type;
275 }
276 else
277 i = r_type - (unsigned int) R_X86_64_vt_offset;
278 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
279 return &x86_64_elf_howto_table[i];
280 }
281
282 /* Given a BFD reloc type, return a HOWTO structure. */
283 static reloc_howto_type *
284 elf_x86_64_reloc_type_lookup (bfd *abfd,
285 bfd_reloc_code_real_type code)
286 {
287 unsigned int i;
288
289 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
290 i++)
291 {
292 if (x86_64_reloc_map[i].bfd_reloc_val == code)
293 return elf_x86_64_rtype_to_howto (abfd,
294 x86_64_reloc_map[i].elf_reloc_val);
295 }
296 return NULL;
297 }
298
299 static reloc_howto_type *
300 elf_x86_64_reloc_name_lookup (bfd *abfd,
301 const char *r_name)
302 {
303 unsigned int i;
304
305 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
306 {
307 /* Get x32 R_X86_64_32. */
308 reloc_howto_type *reloc
309 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
310 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
311 return reloc;
312 }
313
314 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
315 if (x86_64_elf_howto_table[i].name != NULL
316 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
317 return &x86_64_elf_howto_table[i];
318
319 return NULL;
320 }
321
322 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
323
324 static bool
325 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
326 Elf_Internal_Rela *dst)
327 {
328 unsigned r_type;
329
330 r_type = ELF32_R_TYPE (dst->r_info);
331 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
332 if (cache_ptr->howto == NULL)
333 return false;
334 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
335 return true;
336 }
337 \f
338 /* Support for core dump NOTE sections. */
339 static bool
340 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
341 {
342 int offset;
343 size_t size;
344
345 switch (note->descsz)
346 {
347 default:
348 return false;
349
350 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
351 /* pr_cursig */
352 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
353
354 /* pr_pid */
355 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
356
357 /* pr_reg */
358 offset = 72;
359 size = 216;
360
361 break;
362
363 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
364 /* pr_cursig */
365 elf_tdata (abfd)->core->signal
366 = bfd_get_16 (abfd, note->descdata + 12);
367
368 /* pr_pid */
369 elf_tdata (abfd)->core->lwpid
370 = bfd_get_32 (abfd, note->descdata + 32);
371
372 /* pr_reg */
373 offset = 112;
374 size = 216;
375
376 break;
377 }
378
379 /* Make a ".reg/999" section. */
380 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
381 size, note->descpos + offset);
382 }
383
384 static bool
385 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
386 {
387 switch (note->descsz)
388 {
389 default:
390 return false;
391
392 case 124:
393 /* sizeof (struct elf_external_linux_prpsinfo32_ugid16). */
394 elf_tdata (abfd)->core->pid
395 = bfd_get_32 (abfd, note->descdata + 12);
396 elf_tdata (abfd)->core->program
397 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
398 elf_tdata (abfd)->core->command
399 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
400 break;
401
402 case 128:
403 /* sizeof (struct elf_external_linux_prpsinfo32_ugid32). */
404 elf_tdata (abfd)->core->pid
405 = bfd_get_32 (abfd, note->descdata + 12);
406 elf_tdata (abfd)->core->program
407 = _bfd_elfcore_strndup (abfd, note->descdata + 32, 16);
408 elf_tdata (abfd)->core->command
409 = _bfd_elfcore_strndup (abfd, note->descdata + 48, 80);
410 break;
411
412 case 136:
413 /* sizeof (struct elf_prpsinfo) on Linux/x86_64. */
414 elf_tdata (abfd)->core->pid
415 = bfd_get_32 (abfd, note->descdata + 24);
416 elf_tdata (abfd)->core->program
417 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
418 elf_tdata (abfd)->core->command
419 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
420 }
421
422 /* Note that for some reason, a spurious space is tacked
423 onto the end of the args in some (at least one anyway)
424 implementations, so strip it off if it exists. */
425
426 {
427 char *command = elf_tdata (abfd)->core->command;
428 int n = strlen (command);
429
430 if (0 < n && command[n - 1] == ' ')
431 command[n - 1] = '\0';
432 }
433
434 return true;
435 }
436
437 #ifdef CORE_HEADER
438 # if GCC_VERSION >= 8000
439 # pragma GCC diagnostic push
440 # pragma GCC diagnostic ignored "-Wstringop-truncation"
441 # endif
442 static char *
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
444 int note_type, ...)
445 {
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
447 va_list ap;
448 const char *fname, *psargs;
449 long pid;
450 int cursig;
451 const void *gregs;
452
453 switch (note_type)
454 {
455 default:
456 return NULL;
457
458 case NT_PRPSINFO:
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
462 va_end (ap);
463
464 if (bed->s->elfclass == ELFCLASS32)
465 {
466 prpsinfo32_t data;
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
472 }
473 else
474 {
475 prpsinfo64_t data;
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
481 }
482 /* NOTREACHED */
483
484 case NT_PRSTATUS:
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
489 va_end (ap);
490
491 if (bed->s->elfclass == ELFCLASS32)
492 {
493 if (bed->elf_machine_code == EM_X86_64)
494 {
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 else
504 {
505 prstatus32_t prstat;
506 memset (&prstat, 0, sizeof (prstat));
507 prstat.pr_pid = pid;
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
512 }
513 }
514 else
515 {
516 prstatus64_t prstat;
517 memset (&prstat, 0, sizeof (prstat));
518 prstat.pr_pid = pid;
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
523 }
524 }
525 /* NOTREACHED */
526 }
527 # if GCC_VERSION >= 8000
528 # pragma GCC diagnostic pop
529 # endif
530 #endif
531 \f
532 /* Functions for the x86-64 ELF linker. */
533
534 /* The size in bytes of an entry in the global offset table. */
535
536 #define GOT_ENTRY_SIZE 8
537
538 /* The size in bytes of an entry in the lazy procedure linkage table. */
539
540 #define LAZY_PLT_ENTRY_SIZE 16
541
542 /* The size in bytes of an entry in the non-lazy procedure linkage
543 table. */
544
545 #define NON_LAZY_PLT_ENTRY_SIZE 8
546
547 /* The first entry in a lazy procedure linkage table looks like this.
548 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
549 works. */
550
551 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
552 {
553 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
554 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
555 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
556 };
557
558 /* Subsequent entries in a lazy procedure linkage table look like this. */
559
560 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
561 {
562 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
563 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
564 0x68, /* pushq immediate */
565 0, 0, 0, 0, /* replaced with index into relocation table. */
566 0xe9, /* jmp relative */
567 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
568 };
569
570 /* The first entry in a lazy procedure linkage table with BND prefix
571 like this. */
572
573 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
574 {
575 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
576 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
577 0x0f, 0x1f, 0 /* nopl (%rax) */
578 };
579
580 /* Subsequent entries for branches with BND prefx in a lazy procedure
581 linkage table look like this. */
582
583 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
584 {
585 0x68, 0, 0, 0, 0, /* pushq immediate */
586 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
587 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
588 };
589
590 /* The first entry in the IBT-enabled lazy procedure linkage table is the
591 the same as the lazy PLT with BND prefix so that bound registers are
592 preserved when control is passed to dynamic linker. Subsequent
593 entries for a IBT-enabled lazy procedure linkage table look like
594 this. */
595
596 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
597 {
598 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
599 0x68, 0, 0, 0, 0, /* pushq immediate */
600 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
601 0x90 /* nop */
602 };
603
604 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
605 is the same as the normal lazy PLT. Subsequent entries for an
606 x32 IBT-enabled lazy procedure linkage table look like this. */
607
608 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
609 {
610 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
611 0x68, 0, 0, 0, 0, /* pushq immediate */
612 0xe9, 0, 0, 0, 0, /* jmpq relative */
613 0x66, 0x90 /* xchg %ax,%ax */
614 };
615
616 /* Entries in the non-lazey procedure linkage table look like this. */
617
618 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
619 {
620 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
621 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
622 0x66, 0x90 /* xchg %ax,%ax */
623 };
624
625 /* Entries for branches with BND prefix in the non-lazey procedure
626 linkage table look like this. */
627
628 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
629 {
630 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
631 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
632 0x90 /* nop */
633 };
634
635 /* Entries for branches with IBT-enabled in the non-lazey procedure
636 linkage table look like this. They have the same size as the lazy
637 PLT entry. */
638
639 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
640 {
641 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
642 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
643 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
644 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
645 };
646
647 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
648 linkage table look like this. They have the same size as the lazy
649 PLT entry. */
650
651 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
652 {
653 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
654 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
655 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
656 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
657 };
658
659 /* The TLSDESC entry in a lazy procedure linkage table. */
660 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
661 {
662 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
663 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
664 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
665 };
666
667 /* .eh_frame covering the lazy .plt section. */
668
669 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
670 {
671 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
672 0, 0, 0, 0, /* CIE ID */
673 1, /* CIE version */
674 'z', 'R', 0, /* Augmentation string */
675 1, /* Code alignment factor */
676 0x78, /* Data alignment factor */
677 16, /* Return address column */
678 1, /* Augmentation size */
679 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
680 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
681 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
682 DW_CFA_nop, DW_CFA_nop,
683
684 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
685 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
686 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
687 0, 0, 0, 0, /* .plt size goes here */
688 0, /* Augmentation size */
689 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
690 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
691 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
692 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
693 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
694 11, /* Block length */
695 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
696 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
697 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
698 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
699 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
700 };
701
702 /* .eh_frame covering the lazy BND .plt section. */
703
704 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
705 {
706 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
707 0, 0, 0, 0, /* CIE ID */
708 1, /* CIE version */
709 'z', 'R', 0, /* Augmentation string */
710 1, /* Code alignment factor */
711 0x78, /* Data alignment factor */
712 16, /* Return address column */
713 1, /* Augmentation size */
714 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
715 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
716 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
717 DW_CFA_nop, DW_CFA_nop,
718
719 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
720 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
721 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
722 0, 0, 0, 0, /* .plt size goes here */
723 0, /* Augmentation size */
724 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
725 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
726 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
727 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
728 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
729 11, /* Block length */
730 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
731 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
732 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
733 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
734 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
735 };
736
737 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
738
739 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
740 {
741 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
742 0, 0, 0, 0, /* CIE ID */
743 1, /* CIE version */
744 'z', 'R', 0, /* Augmentation string */
745 1, /* Code alignment factor */
746 0x78, /* Data alignment factor */
747 16, /* Return address column */
748 1, /* Augmentation size */
749 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
750 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
751 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
752 DW_CFA_nop, DW_CFA_nop,
753
754 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
755 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
756 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
757 0, 0, 0, 0, /* .plt size goes here */
758 0, /* Augmentation size */
759 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
760 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
761 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
762 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
763 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
764 11, /* Block length */
765 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
766 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
767 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
768 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
769 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
770 };
771
772 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
773
774 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
775 {
776 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
777 0, 0, 0, 0, /* CIE ID */
778 1, /* CIE version */
779 'z', 'R', 0, /* Augmentation string */
780 1, /* Code alignment factor */
781 0x78, /* Data alignment factor */
782 16, /* Return address column */
783 1, /* Augmentation size */
784 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
785 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
786 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
787 DW_CFA_nop, DW_CFA_nop,
788
789 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
790 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
791 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
792 0, 0, 0, 0, /* .plt size goes here */
793 0, /* Augmentation size */
794 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
795 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
796 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
797 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
798 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
799 11, /* Block length */
800 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
801 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
802 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
803 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
804 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
805 };
806
807 /* .eh_frame covering the non-lazy .plt section. */
808
809 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
810 {
811 #define PLT_GOT_FDE_LENGTH 20
812 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
813 0, 0, 0, 0, /* CIE ID */
814 1, /* CIE version */
815 'z', 'R', 0, /* Augmentation string */
816 1, /* Code alignment factor */
817 0x78, /* Data alignment factor */
818 16, /* Return address column */
819 1, /* Augmentation size */
820 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
821 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
822 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
823 DW_CFA_nop, DW_CFA_nop,
824
825 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
826 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
827 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
828 0, 0, 0, 0, /* non-lazy .plt size goes here */
829 0, /* Augmentation size */
830 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
831 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
832 };
833
834 static const sframe_frame_row_entry elf_x86_64_sframe_null_fre =
835 {
836 0,
837 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
838 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
839 };
840
841 /* .sframe FRE covering the .plt section entry. */
842 static const sframe_frame_row_entry elf_x86_64_sframe_plt0_fre1 =
843 {
844 0, /* SFrame FRE start address. */
845 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
846 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
847 };
848
849 /* .sframe FRE covering the .plt section entry. */
850 static const sframe_frame_row_entry elf_x86_64_sframe_plt0_fre2 =
851 {
852 6, /* SFrame FRE start address. */
853 {24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
854 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
855 };
856
857 /* .sframe FRE covering the .plt section entry. */
858 static const sframe_frame_row_entry elf_x86_64_sframe_pltn_fre1 =
859 {
860 0, /* SFrame FRE start address. */
861 {8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
862 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
863 };
864
865 /* .sframe FRE covering the .plt section entry. */
866 static const sframe_frame_row_entry elf_x86_64_sframe_pltn_fre2 =
867 {
868 11, /* SFrame FRE start address. */
869 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
870 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
871 };
872
873 /* .sframe FRE covering the second .plt section entry. */
874 static const sframe_frame_row_entry elf_x86_64_sframe_sec_pltn_fre1 =
875 {
876 0, /* SFrame FRE start address. */
877 {8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
878 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
879 };
880
881 /* SFrame helper object for non-lazy PLT. Also used for IBT enabled PLT. */
882 static const struct elf_x86_sframe_plt elf_x86_64_sframe_non_lazy_plt =
883 {
884 LAZY_PLT_ENTRY_SIZE,
885 2, /* Number of FREs for PLT0. */
886 /* Array of SFrame FREs for plt0. */
887 { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 },
888 LAZY_PLT_ENTRY_SIZE,
889 1, /* Number of FREs for PLTn. */
890 /* Array of SFrame FREs for plt. */
891 { &elf_x86_64_sframe_sec_pltn_fre1, &elf_x86_64_sframe_null_fre },
892 0,
893 0, /* There is no second PLT necessary. */
894 { &elf_x86_64_sframe_null_fre }
895 };
896
897 /* SFrame helper object for lazy PLT. Also used for IBT enabled PLT. */
898 static const struct elf_x86_sframe_plt elf_x86_64_sframe_plt =
899 {
900 LAZY_PLT_ENTRY_SIZE,
901 2, /* Number of FREs for PLT0. */
902 /* Array of SFrame FREs for plt0. */
903 { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 },
904 LAZY_PLT_ENTRY_SIZE,
905 2, /* Number of FREs for PLTn. */
906 /* Array of SFrame FREs for plt. */
907 { &elf_x86_64_sframe_pltn_fre1, &elf_x86_64_sframe_pltn_fre2 },
908 NON_LAZY_PLT_ENTRY_SIZE,
909 1, /* Number of FREs for PLTn for second PLT. */
910 /* FREs for second plt (stack trace info for .plt.got is
911 identical). Used when IBT or non-lazy PLT is in effect. */
912 { &elf_x86_64_sframe_sec_pltn_fre1 }
913 };
914
915 /* These are the standard parameters. */
916 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
917 {
918 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
919 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
920 elf_x86_64_lazy_plt_entry, /* plt_entry */
921 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
922 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
923 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
924 6, /* plt_tlsdesc_got1_offset */
925 12, /* plt_tlsdesc_got2_offset */
926 10, /* plt_tlsdesc_got1_insn_end */
927 16, /* plt_tlsdesc_got2_insn_end */
928 2, /* plt0_got1_offset */
929 8, /* plt0_got2_offset */
930 12, /* plt0_got2_insn_end */
931 2, /* plt_got_offset */
932 7, /* plt_reloc_offset */
933 12, /* plt_plt_offset */
934 6, /* plt_got_insn_size */
935 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
936 6, /* plt_lazy_offset */
937 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
938 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
939 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
940 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
941 };
942
943 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
944 {
945 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
946 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
947 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
948 2, /* plt_got_offset */
949 6, /* plt_got_insn_size */
950 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
951 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
952 };
953
954 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
955 {
956 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
957 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
958 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
959 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
960 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
961 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
962 6, /* plt_tlsdesc_got1_offset */
963 12, /* plt_tlsdesc_got2_offset */
964 10, /* plt_tlsdesc_got1_insn_end */
965 16, /* plt_tlsdesc_got2_insn_end */
966 2, /* plt0_got1_offset */
967 1+8, /* plt0_got2_offset */
968 1+12, /* plt0_got2_insn_end */
969 1+2, /* plt_got_offset */
970 1, /* plt_reloc_offset */
971 7, /* plt_plt_offset */
972 1+6, /* plt_got_insn_size */
973 11, /* plt_plt_insn_end */
974 0, /* plt_lazy_offset */
975 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
976 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
977 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
978 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
979 };
980
981 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
982 {
983 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
984 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
985 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
986 1+2, /* plt_got_offset */
987 1+6, /* plt_got_insn_size */
988 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
989 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
990 };
991
992 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
993 {
994 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
995 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
996 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
997 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
998 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
999 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
1000 6, /* plt_tlsdesc_got1_offset */
1001 12, /* plt_tlsdesc_got2_offset */
1002 10, /* plt_tlsdesc_got1_insn_end */
1003 16, /* plt_tlsdesc_got2_insn_end */
1004 2, /* plt0_got1_offset */
1005 1+8, /* plt0_got2_offset */
1006 1+12, /* plt0_got2_insn_end */
1007 4+1+2, /* plt_got_offset */
1008 4+1, /* plt_reloc_offset */
1009 4+1+6, /* plt_plt_offset */
1010 4+1+6, /* plt_got_insn_size */
1011 4+1+5+5, /* plt_plt_insn_end */
1012 0, /* plt_lazy_offset */
1013 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
1014 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
1015 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
1016 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
1017 };
1018
1019 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
1020 {
1021 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
1022 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
1023 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
1024 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1025 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
1026 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
1027 6, /* plt_tlsdesc_got1_offset */
1028 12, /* plt_tlsdesc_got2_offset */
1029 10, /* plt_tlsdesc_got1_insn_end */
1030 16, /* plt_tlsdesc_got2_insn_end */
1031 2, /* plt0_got1_offset */
1032 8, /* plt0_got2_offset */
1033 12, /* plt0_got2_insn_end */
1034 4+2, /* plt_got_offset */
1035 4+1, /* plt_reloc_offset */
1036 4+6, /* plt_plt_offset */
1037 4+6, /* plt_got_insn_size */
1038 4+5+5, /* plt_plt_insn_end */
1039 0, /* plt_lazy_offset */
1040 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
1041 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
1042 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
1043 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
1044 };
1045
1046 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
1047 {
1048 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
1049 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
1050 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1051 4+1+2, /* plt_got_offset */
1052 4+1+6, /* plt_got_insn_size */
1053 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
1054 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
1055 };
1056
1057 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
1058 {
1059 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
1060 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
1061 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1062 4+2, /* plt_got_offset */
1063 4+6, /* plt_got_insn_size */
1064 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
1065 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
1066 };
1067
1068 static bool
1069 elf64_x86_64_elf_object_p (bfd *abfd)
1070 {
1071 /* Set the right machine number for an x86-64 elf64 file. */
1072 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1073 return true;
1074 }
1075
1076 static bool
1077 elf32_x86_64_elf_object_p (bfd *abfd)
1078 {
1079 /* Set the right machine number for an x86-64 elf32 file. */
1080 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1081 return true;
1082 }
1083
1084 /* Return TRUE if the TLS access code sequence support transition
1085 from R_TYPE. */
1086
1087 static bool
1088 elf_x86_64_check_tls_transition (bfd *abfd,
1089 struct bfd_link_info *info,
1090 asection *sec,
1091 bfd_byte *contents,
1092 Elf_Internal_Shdr *symtab_hdr,
1093 struct elf_link_hash_entry **sym_hashes,
1094 unsigned int r_type,
1095 const Elf_Internal_Rela *rel,
1096 const Elf_Internal_Rela *relend)
1097 {
1098 unsigned int val;
1099 unsigned long r_symndx;
1100 bool largepic = false;
1101 struct elf_link_hash_entry *h;
1102 bfd_vma offset;
1103 struct elf_x86_link_hash_table *htab;
1104 bfd_byte *call;
1105 bool indirect_call;
1106
1107 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1108 offset = rel->r_offset;
1109 switch (r_type)
1110 {
1111 case R_X86_64_TLSGD:
1112 case R_X86_64_TLSLD:
1113 if ((rel + 1) >= relend)
1114 return false;
1115
1116 if (r_type == R_X86_64_TLSGD)
1117 {
1118 /* Check transition from GD access model. For 64bit, only
1119 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1120 .word 0x6666; rex64; call __tls_get_addr@PLT
1121 or
1122 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1123 .byte 0x66; rex64
1124 call *__tls_get_addr@GOTPCREL(%rip)
1125 which may be converted to
1126 addr32 call __tls_get_addr
1127 can transit to different access model. For 32bit, only
1128 leaq foo@tlsgd(%rip), %rdi
1129 .word 0x6666; rex64; call __tls_get_addr@PLT
1130 or
1131 leaq foo@tlsgd(%rip), %rdi
1132 .byte 0x66; rex64
1133 call *__tls_get_addr@GOTPCREL(%rip)
1134 which may be converted to
1135 addr32 call __tls_get_addr
1136 can transit to different access model. For largepic,
1137 we also support:
1138 leaq foo@tlsgd(%rip), %rdi
1139 movabsq $__tls_get_addr@pltoff, %rax
1140 addq $r15, %rax
1141 call *%rax
1142 or
1143 leaq foo@tlsgd(%rip), %rdi
1144 movabsq $__tls_get_addr@pltoff, %rax
1145 addq $rbx, %rax
1146 call *%rax */
1147
1148 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1149
1150 if ((offset + 12) > sec->size)
1151 return false;
1152
1153 call = contents + offset + 4;
1154 if (call[0] != 0x66
1155 || !((call[1] == 0x48
1156 && call[2] == 0xff
1157 && call[3] == 0x15)
1158 || (call[1] == 0x48
1159 && call[2] == 0x67
1160 && call[3] == 0xe8)
1161 || (call[1] == 0x66
1162 && call[2] == 0x48
1163 && call[3] == 0xe8)))
1164 {
1165 if (!ABI_64_P (abfd)
1166 || (offset + 19) > sec->size
1167 || offset < 3
1168 || memcmp (call - 7, leaq + 1, 3) != 0
1169 || memcmp (call, "\x48\xb8", 2) != 0
1170 || call[11] != 0x01
1171 || call[13] != 0xff
1172 || call[14] != 0xd0
1173 || !((call[10] == 0x48 && call[12] == 0xd8)
1174 || (call[10] == 0x4c && call[12] == 0xf8)))
1175 return false;
1176 largepic = true;
1177 }
1178 else if (ABI_64_P (abfd))
1179 {
1180 if (offset < 4
1181 || memcmp (contents + offset - 4, leaq, 4) != 0)
1182 return false;
1183 }
1184 else
1185 {
1186 if (offset < 3
1187 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1188 return false;
1189 }
1190 indirect_call = call[2] == 0xff;
1191 }
1192 else
1193 {
1194 /* Check transition from LD access model. Only
1195 leaq foo@tlsld(%rip), %rdi;
1196 call __tls_get_addr@PLT
1197 or
1198 leaq foo@tlsld(%rip), %rdi;
1199 call *__tls_get_addr@GOTPCREL(%rip)
1200 which may be converted to
1201 addr32 call __tls_get_addr
1202 can transit to different access model. For largepic
1203 we also support:
1204 leaq foo@tlsld(%rip), %rdi
1205 movabsq $__tls_get_addr@pltoff, %rax
1206 addq $r15, %rax
1207 call *%rax
1208 or
1209 leaq foo@tlsld(%rip), %rdi
1210 movabsq $__tls_get_addr@pltoff, %rax
1211 addq $rbx, %rax
1212 call *%rax */
1213
1214 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1215
1216 if (offset < 3 || (offset + 9) > sec->size)
1217 return false;
1218
1219 if (memcmp (contents + offset - 3, lea, 3) != 0)
1220 return false;
1221
1222 call = contents + offset + 4;
1223 if (!(call[0] == 0xe8
1224 || (call[0] == 0xff && call[1] == 0x15)
1225 || (call[0] == 0x67 && call[1] == 0xe8)))
1226 {
1227 if (!ABI_64_P (abfd)
1228 || (offset + 19) > sec->size
1229 || memcmp (call, "\x48\xb8", 2) != 0
1230 || call[11] != 0x01
1231 || call[13] != 0xff
1232 || call[14] != 0xd0
1233 || !((call[10] == 0x48 && call[12] == 0xd8)
1234 || (call[10] == 0x4c && call[12] == 0xf8)))
1235 return false;
1236 largepic = true;
1237 }
1238 indirect_call = call[0] == 0xff;
1239 }
1240
1241 r_symndx = htab->r_sym (rel[1].r_info);
1242 if (r_symndx < symtab_hdr->sh_info)
1243 return false;
1244
1245 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1246 if (h == NULL
1247 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1248 return false;
1249 else
1250 {
1251 r_type = (ELF32_R_TYPE (rel[1].r_info)
1252 & ~R_X86_64_converted_reloc_bit);
1253 if (largepic)
1254 return r_type == R_X86_64_PLTOFF64;
1255 else if (indirect_call)
1256 return r_type == R_X86_64_GOTPCRELX;
1257 else
1258 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1259 }
1260
1261 case R_X86_64_GOTTPOFF:
1262 /* Check transition from IE access model:
1263 mov foo@gottpoff(%rip), %reg
1264 add foo@gottpoff(%rip), %reg
1265 */
1266
1267 /* Check REX prefix first. */
1268 if (offset >= 3 && (offset + 4) <= sec->size)
1269 {
1270 val = bfd_get_8 (abfd, contents + offset - 3);
1271 if (val != 0x48 && val != 0x4c)
1272 {
1273 /* X32 may have 0x44 REX prefix or no REX prefix. */
1274 if (ABI_64_P (abfd))
1275 return false;
1276 }
1277 }
1278 else
1279 {
1280 /* X32 may not have any REX prefix. */
1281 if (ABI_64_P (abfd))
1282 return false;
1283 if (offset < 2 || (offset + 3) > sec->size)
1284 return false;
1285 }
1286
1287 val = bfd_get_8 (abfd, contents + offset - 2);
1288 if (val != 0x8b && val != 0x03)
1289 return false;
1290
1291 val = bfd_get_8 (abfd, contents + offset - 1);
1292 return (val & 0xc7) == 5;
1293
1294 case R_X86_64_GOTPC32_TLSDESC:
1295 /* Check transition from GDesc access model:
1296 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
1297 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
1298
1299 Make sure it's a leaq adding rip to a 32-bit offset
1300 into any register, although it's probably almost always
1301 going to be rax. */
1302
1303 if (offset < 3 || (offset + 4) > sec->size)
1304 return false;
1305
1306 val = bfd_get_8 (abfd, contents + offset - 3);
1307 val &= 0xfb;
1308 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
1309 return false;
1310
1311 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1312 return false;
1313
1314 val = bfd_get_8 (abfd, contents + offset - 1);
1315 return (val & 0xc7) == 0x05;
1316
1317 case R_X86_64_TLSDESC_CALL:
1318 /* Check transition from GDesc access model:
1319 call *x@tlsdesc(%rax) <--- LP64 mode.
1320 call *x@tlsdesc(%eax) <--- X32 mode.
1321 */
1322 if (offset + 2 <= sec->size)
1323 {
1324 unsigned int prefix;
1325 call = contents + offset;
1326 prefix = 0;
1327 if (!ABI_64_P (abfd))
1328 {
1329 /* Check for call *x@tlsdesc(%eax). */
1330 if (call[0] == 0x67)
1331 {
1332 prefix = 1;
1333 if (offset + 3 > sec->size)
1334 return false;
1335 }
1336 }
1337 /* Make sure that it's a call *x@tlsdesc(%rax). */
1338 return call[prefix] == 0xff && call[1 + prefix] == 0x10;
1339 }
1340
1341 return false;
1342
1343 default:
1344 abort ();
1345 }
1346 }
1347
1348 /* Return TRUE if the TLS access transition is OK or no transition
1349 will be performed. Update R_TYPE if there is a transition. */
1350
1351 static bool
1352 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1353 asection *sec, bfd_byte *contents,
1354 Elf_Internal_Shdr *symtab_hdr,
1355 struct elf_link_hash_entry **sym_hashes,
1356 unsigned int *r_type, int tls_type,
1357 const Elf_Internal_Rela *rel,
1358 const Elf_Internal_Rela *relend,
1359 struct elf_link_hash_entry *h,
1360 unsigned long r_symndx,
1361 bool from_relocate_section)
1362 {
1363 unsigned int from_type = *r_type;
1364 unsigned int to_type = from_type;
1365 bool check = true;
1366
1367 /* Skip TLS transition for functions. */
1368 if (h != NULL
1369 && (h->type == STT_FUNC
1370 || h->type == STT_GNU_IFUNC))
1371 return true;
1372
1373 switch (from_type)
1374 {
1375 case R_X86_64_TLSGD:
1376 case R_X86_64_GOTPC32_TLSDESC:
1377 case R_X86_64_TLSDESC_CALL:
1378 case R_X86_64_GOTTPOFF:
1379 if (bfd_link_executable (info))
1380 {
1381 if (h == NULL)
1382 to_type = R_X86_64_TPOFF32;
1383 else
1384 to_type = R_X86_64_GOTTPOFF;
1385 }
1386
1387 /* When we are called from elf_x86_64_relocate_section, there may
1388 be additional transitions based on TLS_TYPE. */
1389 if (from_relocate_section)
1390 {
1391 unsigned int new_to_type = to_type;
1392
1393 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1394 new_to_type = R_X86_64_TPOFF32;
1395
1396 if (to_type == R_X86_64_TLSGD
1397 || to_type == R_X86_64_GOTPC32_TLSDESC
1398 || to_type == R_X86_64_TLSDESC_CALL)
1399 {
1400 if (tls_type == GOT_TLS_IE)
1401 new_to_type = R_X86_64_GOTTPOFF;
1402 }
1403
1404 /* We checked the transition before when we were called from
1405 elf_x86_64_scan_relocs. We only want to check the new
1406 transition which hasn't been checked before. */
1407 check = new_to_type != to_type && from_type == to_type;
1408 to_type = new_to_type;
1409 }
1410
1411 break;
1412
1413 case R_X86_64_TLSLD:
1414 if (bfd_link_executable (info))
1415 to_type = R_X86_64_TPOFF32;
1416 break;
1417
1418 default:
1419 return true;
1420 }
1421
1422 /* Return TRUE if there is no transition. */
1423 if (from_type == to_type)
1424 return true;
1425
1426 /* Check if the transition can be performed. */
1427 if (check
1428 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1429 symtab_hdr, sym_hashes,
1430 from_type, rel, relend))
1431 {
1432 reloc_howto_type *from, *to;
1433 const char *name;
1434
1435 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1436 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1437
1438 if (from == NULL || to == NULL)
1439 return false;
1440
1441 if (h)
1442 name = h->root.root.string;
1443 else
1444 {
1445 struct elf_x86_link_hash_table *htab;
1446
1447 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1448 if (htab == NULL)
1449 name = "*unknown*";
1450 else
1451 {
1452 Elf_Internal_Sym *isym;
1453
1454 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1455 abfd, r_symndx);
1456 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1457 }
1458 }
1459
1460 _bfd_error_handler
1461 /* xgettext:c-format */
1462 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1463 " in section `%pA' failed"),
1464 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1465 bfd_set_error (bfd_error_bad_value);
1466 return false;
1467 }
1468
1469 *r_type = to_type;
1470 return true;
1471 }
1472
1473 static bool
1474 elf_x86_64_need_pic (struct bfd_link_info *info,
1475 bfd *input_bfd, asection *sec,
1476 struct elf_link_hash_entry *h,
1477 Elf_Internal_Shdr *symtab_hdr,
1478 Elf_Internal_Sym *isym,
1479 reloc_howto_type *howto)
1480 {
1481 const char *v = "";
1482 const char *und = "";
1483 const char *pic = "";
1484 const char *object;
1485
1486 const char *name;
1487 if (h)
1488 {
1489 name = h->root.root.string;
1490 switch (ELF_ST_VISIBILITY (h->other))
1491 {
1492 case STV_HIDDEN:
1493 v = _("hidden symbol ");
1494 break;
1495 case STV_INTERNAL:
1496 v = _("internal symbol ");
1497 break;
1498 case STV_PROTECTED:
1499 v = _("protected symbol ");
1500 break;
1501 default:
1502 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1503 v = _("protected symbol ");
1504 else
1505 v = _("symbol ");
1506 pic = NULL;
1507 break;
1508 }
1509
1510 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1511 und = _("undefined ");
1512 }
1513 else
1514 {
1515 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1516 pic = NULL;
1517 }
1518
1519 if (bfd_link_dll (info))
1520 {
1521 object = _("a shared object");
1522 if (!pic)
1523 pic = _("; recompile with -fPIC");
1524 }
1525 else
1526 {
1527 if (bfd_link_pie (info))
1528 object = _("a PIE object");
1529 else
1530 object = _("a PDE object");
1531 if (!pic)
1532 pic = _("; recompile with -fPIE");
1533 }
1534
1535 /* xgettext:c-format */
1536 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1537 "not be used when making %s%s"),
1538 input_bfd, howto->name, und, v, name,
1539 object, pic);
1540 bfd_set_error (bfd_error_bad_value);
1541 sec->check_relocs_failed = 1;
1542 return false;
1543 }
1544
1545 /* With the local symbol, foo, we convert
1546 mov foo@GOTPCREL(%rip), %reg
1547 to
1548 lea foo(%rip), %reg
1549 and convert
1550 call/jmp *foo@GOTPCREL(%rip)
1551 to
1552 nop call foo/jmp foo nop
1553 When PIC is false, convert
1554 test %reg, foo@GOTPCREL(%rip)
1555 to
1556 test $foo, %reg
1557 and convert
1558 binop foo@GOTPCREL(%rip), %reg
1559 to
1560 binop $foo, %reg
1561 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1562 instructions. */
1563
1564 static bool
1565 elf_x86_64_convert_load_reloc (bfd *abfd,
1566 bfd_byte *contents,
1567 unsigned int *r_type_p,
1568 Elf_Internal_Rela *irel,
1569 struct elf_link_hash_entry *h,
1570 bool *converted,
1571 struct bfd_link_info *link_info)
1572 {
1573 struct elf_x86_link_hash_table *htab;
1574 bool is_pic;
1575 bool no_overflow;
1576 bool relocx;
1577 bool to_reloc_pc32;
1578 bool abs_symbol;
1579 bool local_ref;
1580 asection *tsec;
1581 bfd_signed_vma raddend;
1582 unsigned int opcode;
1583 unsigned int modrm;
1584 unsigned int r_type = *r_type_p;
1585 unsigned int r_symndx;
1586 bfd_vma roff = irel->r_offset;
1587 bfd_vma abs_relocation;
1588
1589 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1590 return true;
1591
1592 raddend = irel->r_addend;
1593 /* Addend for 32-bit PC-relative relocation must be -4. */
1594 if (raddend != -4)
1595 return true;
1596
1597 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1598 is_pic = bfd_link_pic (link_info);
1599
1600 relocx = (r_type == R_X86_64_GOTPCRELX
1601 || r_type == R_X86_64_REX_GOTPCRELX);
1602
1603 /* TRUE if --no-relax is used. */
1604 no_overflow = link_info->disable_target_specific_optimizations > 1;
1605
1606 r_symndx = htab->r_sym (irel->r_info);
1607
1608 opcode = bfd_get_8 (abfd, contents + roff - 2);
1609
1610 /* Convert mov to lea since it has been done for a while. */
1611 if (opcode != 0x8b)
1612 {
1613 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1614 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1615 test, xor instructions. */
1616 if (!relocx)
1617 return true;
1618 }
1619
1620 /* We convert only to R_X86_64_PC32:
1621 1. Branch.
1622 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1623 3. no_overflow is true.
1624 4. PIC.
1625 */
1626 to_reloc_pc32 = (opcode == 0xff
1627 || !relocx
1628 || no_overflow
1629 || is_pic);
1630
1631 abs_symbol = false;
1632 abs_relocation = 0;
1633
1634 /* Get the symbol referred to by the reloc. */
1635 if (h == NULL)
1636 {
1637 Elf_Internal_Sym *isym
1638 = bfd_sym_from_r_symndx (&htab->elf.sym_cache, abfd, r_symndx);
1639
1640 /* Skip relocation against undefined symbols. */
1641 if (isym->st_shndx == SHN_UNDEF)
1642 return true;
1643
1644 local_ref = true;
1645 if (isym->st_shndx == SHN_ABS)
1646 {
1647 tsec = bfd_abs_section_ptr;
1648 abs_symbol = true;
1649 abs_relocation = isym->st_value;
1650 }
1651 else if (isym->st_shndx == SHN_COMMON)
1652 tsec = bfd_com_section_ptr;
1653 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1654 tsec = &_bfd_elf_large_com_section;
1655 else
1656 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1657 }
1658 else
1659 {
1660 /* Undefined weak symbol is only bound locally in executable
1661 and its reference is resolved as 0 without relocation
1662 overflow. We can only perform this optimization for
1663 GOTPCRELX relocations since we need to modify REX byte.
1664 It is OK convert mov with R_X86_64_GOTPCREL to
1665 R_X86_64_PC32. */
1666 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1667
1668 abs_symbol = ABS_SYMBOL_P (h);
1669 abs_relocation = h->root.u.def.value;
1670
1671 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1672 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1673 if ((relocx || opcode == 0x8b)
1674 && (h->root.type == bfd_link_hash_undefweak
1675 && !eh->linker_def
1676 && local_ref))
1677 {
1678 if (opcode == 0xff)
1679 {
1680 /* Skip for branch instructions since R_X86_64_PC32
1681 may overflow. */
1682 if (no_overflow)
1683 return true;
1684 }
1685 else if (relocx)
1686 {
1687 /* For non-branch instructions, we can convert to
1688 R_X86_64_32/R_X86_64_32S since we know if there
1689 is a REX byte. */
1690 to_reloc_pc32 = false;
1691 }
1692
1693 /* Since we don't know the current PC when PIC is true,
1694 we can't convert to R_X86_64_PC32. */
1695 if (to_reloc_pc32 && is_pic)
1696 return true;
1697
1698 goto convert;
1699 }
1700 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1701 ld.so may use its link-time address. */
1702 else if (h->start_stop
1703 || eh->linker_def
1704 || ((h->def_regular
1705 || h->root.type == bfd_link_hash_defined
1706 || h->root.type == bfd_link_hash_defweak)
1707 && h != htab->elf.hdynamic
1708 && local_ref))
1709 {
1710 /* bfd_link_hash_new or bfd_link_hash_undefined is
1711 set by an assignment in a linker script in
1712 bfd_elf_record_link_assignment. start_stop is set
1713 on __start_SECNAME/__stop_SECNAME which mark section
1714 SECNAME. */
1715 if (h->start_stop
1716 || eh->linker_def
1717 || (h->def_regular
1718 && (h->root.type == bfd_link_hash_new
1719 || h->root.type == bfd_link_hash_undefined
1720 || ((h->root.type == bfd_link_hash_defined
1721 || h->root.type == bfd_link_hash_defweak)
1722 && h->root.u.def.section == bfd_und_section_ptr))))
1723 {
1724 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1725 if (no_overflow)
1726 return true;
1727 goto convert;
1728 }
1729 tsec = h->root.u.def.section;
1730 }
1731 else
1732 return true;
1733 }
1734
1735 /* Don't convert GOTPCREL relocation against large section. */
1736 if (elf_section_data (tsec) != NULL
1737 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1738 return true;
1739
1740 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1741 if (no_overflow)
1742 return true;
1743
1744 convert:
1745 if (opcode == 0xff)
1746 {
1747 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1748 unsigned int nop;
1749 unsigned int disp;
1750 bfd_vma nop_offset;
1751
1752 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1753 R_X86_64_PC32. */
1754 modrm = bfd_get_8 (abfd, contents + roff - 1);
1755 if (modrm == 0x25)
1756 {
1757 /* Convert to "jmp foo nop". */
1758 modrm = 0xe9;
1759 nop = NOP_OPCODE;
1760 nop_offset = irel->r_offset + 3;
1761 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1762 irel->r_offset -= 1;
1763 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1764 }
1765 else
1766 {
1767 struct elf_x86_link_hash_entry *eh
1768 = (struct elf_x86_link_hash_entry *) h;
1769
1770 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1771 is a nop prefix. */
1772 modrm = 0xe8;
1773 /* To support TLS optimization, always use addr32 prefix for
1774 "call *__tls_get_addr@GOTPCREL(%rip)". */
1775 if (eh && eh->tls_get_addr)
1776 {
1777 nop = 0x67;
1778 nop_offset = irel->r_offset - 2;
1779 }
1780 else
1781 {
1782 nop = htab->params->call_nop_byte;
1783 if (htab->params->call_nop_as_suffix)
1784 {
1785 nop_offset = irel->r_offset + 3;
1786 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1787 irel->r_offset -= 1;
1788 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1789 }
1790 else
1791 nop_offset = irel->r_offset - 2;
1792 }
1793 }
1794 bfd_put_8 (abfd, nop, contents + nop_offset);
1795 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1796 r_type = R_X86_64_PC32;
1797 }
1798 else
1799 {
1800 unsigned int rex;
1801 unsigned int rex_mask = REX_R;
1802
1803 if (r_type == R_X86_64_REX_GOTPCRELX)
1804 rex = bfd_get_8 (abfd, contents + roff - 3);
1805 else
1806 rex = 0;
1807
1808 if (opcode == 0x8b)
1809 {
1810 if (abs_symbol && local_ref && relocx)
1811 to_reloc_pc32 = false;
1812
1813 if (to_reloc_pc32)
1814 {
1815 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1816 "lea foo(%rip), %reg". */
1817 opcode = 0x8d;
1818 r_type = R_X86_64_PC32;
1819 }
1820 else
1821 {
1822 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1823 "mov $foo, %reg". */
1824 opcode = 0xc7;
1825 modrm = bfd_get_8 (abfd, contents + roff - 1);
1826 modrm = 0xc0 | (modrm & 0x38) >> 3;
1827 if ((rex & REX_W) != 0
1828 && ABI_64_P (link_info->output_bfd))
1829 {
1830 /* Keep the REX_W bit in REX byte for LP64. */
1831 r_type = R_X86_64_32S;
1832 goto rewrite_modrm_rex;
1833 }
1834 else
1835 {
1836 /* If the REX_W bit in REX byte isn't needed,
1837 use R_X86_64_32 and clear the W bit to avoid
1838 sign-extend imm32 to imm64. */
1839 r_type = R_X86_64_32;
1840 /* Clear the W bit in REX byte. */
1841 rex_mask |= REX_W;
1842 goto rewrite_modrm_rex;
1843 }
1844 }
1845 }
1846 else
1847 {
1848 /* R_X86_64_PC32 isn't supported. */
1849 if (to_reloc_pc32)
1850 return true;
1851
1852 modrm = bfd_get_8 (abfd, contents + roff - 1);
1853 if (opcode == 0x85)
1854 {
1855 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1856 "test $foo, %reg". */
1857 modrm = 0xc0 | (modrm & 0x38) >> 3;
1858 opcode = 0xf7;
1859 }
1860 else
1861 {
1862 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1863 "binop $foo, %reg". */
1864 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1865 opcode = 0x81;
1866 }
1867
1868 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1869 overflow when sign-extending imm32 to imm64. */
1870 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1871
1872 rewrite_modrm_rex:
1873 if (abs_relocation)
1874 {
1875 /* Check if R_X86_64_32S/R_X86_64_32 fits. */
1876 if (r_type == R_X86_64_32S)
1877 {
1878 if ((abs_relocation + 0x80000000) > 0xffffffff)
1879 return true;
1880 }
1881 else
1882 {
1883 if (abs_relocation > 0xffffffff)
1884 return true;
1885 }
1886 }
1887
1888 bfd_put_8 (abfd, modrm, contents + roff - 1);
1889
1890 if (rex)
1891 {
1892 /* Move the R bit to the B bit in REX byte. */
1893 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1894 bfd_put_8 (abfd, rex, contents + roff - 3);
1895 }
1896
1897 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1898 irel->r_addend = 0;
1899 }
1900
1901 bfd_put_8 (abfd, opcode, contents + roff - 2);
1902 }
1903
1904 *r_type_p = r_type;
1905 irel->r_info = htab->r_info (r_symndx,
1906 r_type | R_X86_64_converted_reloc_bit);
1907
1908 *converted = true;
1909
1910 return true;
1911 }
1912
1913 /* Look through the relocs for a section during the first phase, and
1914 calculate needed space in the global offset table, and procedure
1915 linkage table. */
1916
1917 static bool
1918 elf_x86_64_scan_relocs (bfd *abfd, struct bfd_link_info *info,
1919 asection *sec,
1920 const Elf_Internal_Rela *relocs)
1921 {
1922 struct elf_x86_link_hash_table *htab;
1923 Elf_Internal_Shdr *symtab_hdr;
1924 struct elf_link_hash_entry **sym_hashes;
1925 const Elf_Internal_Rela *rel;
1926 const Elf_Internal_Rela *rel_end;
1927 bfd_byte *contents;
1928 bool converted;
1929
1930 if (bfd_link_relocatable (info))
1931 return true;
1932
1933 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1934 if (htab == NULL)
1935 {
1936 sec->check_relocs_failed = 1;
1937 return false;
1938 }
1939
1940 BFD_ASSERT (is_x86_elf (abfd, htab));
1941
1942 /* Get the section contents. */
1943 if (elf_section_data (sec)->this_hdr.contents != NULL)
1944 contents = elf_section_data (sec)->this_hdr.contents;
1945 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1946 {
1947 sec->check_relocs_failed = 1;
1948 return false;
1949 }
1950
1951 symtab_hdr = &elf_symtab_hdr (abfd);
1952 sym_hashes = elf_sym_hashes (abfd);
1953
1954 converted = false;
1955
1956 rel_end = relocs + sec->reloc_count;
1957 for (rel = relocs; rel < rel_end; rel++)
1958 {
1959 unsigned int r_type;
1960 unsigned int r_symndx;
1961 struct elf_link_hash_entry *h;
1962 struct elf_x86_link_hash_entry *eh;
1963 Elf_Internal_Sym *isym;
1964 const char *name;
1965 bool size_reloc;
1966 bool converted_reloc;
1967 bool no_dynreloc;
1968
1969 r_symndx = htab->r_sym (rel->r_info);
1970 r_type = ELF32_R_TYPE (rel->r_info);
1971
1972 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1973 {
1974 /* xgettext:c-format */
1975 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1976 abfd, r_symndx);
1977 goto error_return;
1978 }
1979
1980 if (r_symndx < symtab_hdr->sh_info)
1981 {
1982 /* A local symbol. */
1983 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1984 abfd, r_symndx);
1985 if (isym == NULL)
1986 goto error_return;
1987
1988 /* Check relocation against local STT_GNU_IFUNC symbol. */
1989 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1990 {
1991 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1992 true);
1993 if (h == NULL)
1994 goto error_return;
1995
1996 /* Fake a STT_GNU_IFUNC symbol. */
1997 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1998 isym, NULL);
1999 h->type = STT_GNU_IFUNC;
2000 h->def_regular = 1;
2001 h->ref_regular = 1;
2002 h->forced_local = 1;
2003 h->root.type = bfd_link_hash_defined;
2004 }
2005 else
2006 h = NULL;
2007 }
2008 else
2009 {
2010 isym = NULL;
2011 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2012 while (h->root.type == bfd_link_hash_indirect
2013 || h->root.type == bfd_link_hash_warning)
2014 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2015 }
2016
2017 /* Check invalid x32 relocations. */
2018 if (!ABI_64_P (abfd))
2019 switch (r_type)
2020 {
2021 default:
2022 break;
2023
2024 case R_X86_64_DTPOFF64:
2025 case R_X86_64_TPOFF64:
2026 case R_X86_64_PC64:
2027 case R_X86_64_GOTOFF64:
2028 case R_X86_64_GOT64:
2029 case R_X86_64_GOTPCREL64:
2030 case R_X86_64_GOTPC64:
2031 case R_X86_64_GOTPLT64:
2032 case R_X86_64_PLTOFF64:
2033 {
2034 if (h)
2035 name = h->root.root.string;
2036 else
2037 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
2038 NULL);
2039 _bfd_error_handler
2040 /* xgettext:c-format */
2041 (_("%pB: relocation %s against symbol `%s' isn't "
2042 "supported in x32 mode"), abfd,
2043 x86_64_elf_howto_table[r_type].name, name);
2044 bfd_set_error (bfd_error_bad_value);
2045 goto error_return;
2046 }
2047 break;
2048 }
2049
2050 eh = (struct elf_x86_link_hash_entry *) h;
2051
2052 if (h != NULL)
2053 {
2054 /* It is referenced by a non-shared object. */
2055 h->ref_regular = 1;
2056 }
2057
2058 converted_reloc = false;
2059 if ((r_type == R_X86_64_GOTPCREL
2060 || r_type == R_X86_64_GOTPCRELX
2061 || r_type == R_X86_64_REX_GOTPCRELX)
2062 && (h == NULL || h->type != STT_GNU_IFUNC))
2063 {
2064 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
2065 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
2066 irel, h, &converted_reloc,
2067 info))
2068 goto error_return;
2069
2070 if (converted_reloc)
2071 converted = true;
2072 }
2073
2074 if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym,
2075 symtab_hdr, &no_dynreloc))
2076 return false;
2077
2078 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2079 symtab_hdr, sym_hashes,
2080 &r_type, GOT_UNKNOWN,
2081 rel, rel_end, h, r_symndx, false))
2082 goto error_return;
2083
2084 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
2085 if (h == htab->elf.hgot)
2086 htab->got_referenced = true;
2087
2088 switch (r_type)
2089 {
2090 case R_X86_64_TLSLD:
2091 htab->tls_ld_or_ldm_got.refcount = 1;
2092 goto create_got;
2093
2094 case R_X86_64_TPOFF32:
2095 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2096 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2097 &x86_64_elf_howto_table[r_type]);
2098 if (eh != NULL)
2099 eh->zero_undefweak &= 0x2;
2100 break;
2101
2102 case R_X86_64_GOTTPOFF:
2103 if (!bfd_link_executable (info))
2104 info->flags |= DF_STATIC_TLS;
2105 /* Fall through */
2106
2107 case R_X86_64_GOT32:
2108 case R_X86_64_GOTPCREL:
2109 case R_X86_64_GOTPCRELX:
2110 case R_X86_64_REX_GOTPCRELX:
2111 case R_X86_64_TLSGD:
2112 case R_X86_64_GOT64:
2113 case R_X86_64_GOTPCREL64:
2114 case R_X86_64_GOTPLT64:
2115 case R_X86_64_GOTPC32_TLSDESC:
2116 case R_X86_64_TLSDESC_CALL:
2117 /* This symbol requires a global offset table entry. */
2118 {
2119 int tls_type, old_tls_type;
2120
2121 switch (r_type)
2122 {
2123 default:
2124 tls_type = GOT_NORMAL;
2125 if (h)
2126 {
2127 if (ABS_SYMBOL_P (h))
2128 tls_type = GOT_ABS;
2129 }
2130 else if (isym->st_shndx == SHN_ABS)
2131 tls_type = GOT_ABS;
2132 break;
2133 case R_X86_64_TLSGD:
2134 tls_type = GOT_TLS_GD;
2135 break;
2136 case R_X86_64_GOTTPOFF:
2137 tls_type = GOT_TLS_IE;
2138 break;
2139 case R_X86_64_GOTPC32_TLSDESC:
2140 case R_X86_64_TLSDESC_CALL:
2141 tls_type = GOT_TLS_GDESC;
2142 break;
2143 }
2144
2145 if (h != NULL)
2146 {
2147 h->got.refcount = 1;
2148 old_tls_type = eh->tls_type;
2149 }
2150 else
2151 {
2152 bfd_signed_vma *local_got_refcounts;
2153
2154 if (!elf_x86_allocate_local_got_info (abfd,
2155 symtab_hdr->sh_info))
2156 goto error_return;
2157
2158 /* This is a global offset table entry for a local symbol. */
2159 local_got_refcounts = elf_local_got_refcounts (abfd);
2160 local_got_refcounts[r_symndx] = 1;
2161 old_tls_type
2162 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2163 }
2164
2165 /* If a TLS symbol is accessed using IE at least once,
2166 there is no point to use dynamic model for it. */
2167 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2168 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2169 || tls_type != GOT_TLS_IE))
2170 {
2171 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2172 tls_type = old_tls_type;
2173 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2174 && GOT_TLS_GD_ANY_P (tls_type))
2175 tls_type |= old_tls_type;
2176 else
2177 {
2178 if (h)
2179 name = h->root.root.string;
2180 else
2181 name = bfd_elf_sym_name (abfd, symtab_hdr,
2182 isym, NULL);
2183 _bfd_error_handler
2184 /* xgettext:c-format */
2185 (_("%pB: '%s' accessed both as normal and"
2186 " thread local symbol"),
2187 abfd, name);
2188 bfd_set_error (bfd_error_bad_value);
2189 goto error_return;
2190 }
2191 }
2192
2193 if (old_tls_type != tls_type)
2194 {
2195 if (eh != NULL)
2196 eh->tls_type = tls_type;
2197 else
2198 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2199 }
2200 }
2201 /* Fall through */
2202
2203 case R_X86_64_GOTOFF64:
2204 case R_X86_64_GOTPC32:
2205 case R_X86_64_GOTPC64:
2206 create_got:
2207 if (eh != NULL)
2208 eh->zero_undefweak &= 0x2;
2209 break;
2210
2211 case R_X86_64_PLT32:
2212 /* This symbol requires a procedure linkage table entry. We
2213 actually build the entry in adjust_dynamic_symbol,
2214 because this might be a case of linking PIC code which is
2215 never referenced by a dynamic object, in which case we
2216 don't need to generate a procedure linkage table entry
2217 after all. */
2218
2219 /* If this is a local symbol, we resolve it directly without
2220 creating a procedure linkage table entry. */
2221 if (h == NULL)
2222 continue;
2223
2224 eh->zero_undefweak &= 0x2;
2225 h->needs_plt = 1;
2226 h->plt.refcount = 1;
2227 break;
2228
2229 case R_X86_64_PLTOFF64:
2230 /* This tries to form the 'address' of a function relative
2231 to GOT. For global symbols we need a PLT entry. */
2232 if (h != NULL)
2233 {
2234 h->needs_plt = 1;
2235 h->plt.refcount = 1;
2236 }
2237 goto create_got;
2238
2239 case R_X86_64_SIZE32:
2240 case R_X86_64_SIZE64:
2241 size_reloc = true;
2242 goto do_size;
2243
2244 case R_X86_64_32:
2245 if (!ABI_64_P (abfd))
2246 goto pointer;
2247 /* Fall through. */
2248 case R_X86_64_8:
2249 case R_X86_64_16:
2250 case R_X86_64_32S:
2251 /* Check relocation overflow as these relocs may lead to
2252 run-time relocation overflow. Don't error out for
2253 sections we don't care about, such as debug sections or
2254 when relocation overflow check is disabled. */
2255 if (!htab->params->no_reloc_overflow_check
2256 && !converted_reloc
2257 && (bfd_link_pic (info)
2258 || (bfd_link_executable (info)
2259 && h != NULL
2260 && !h->def_regular
2261 && h->def_dynamic
2262 && (sec->flags & SEC_READONLY) == 0)))
2263 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2264 &x86_64_elf_howto_table[r_type]);
2265 /* Fall through. */
2266
2267 case R_X86_64_PC8:
2268 case R_X86_64_PC16:
2269 case R_X86_64_PC32:
2270 case R_X86_64_PC64:
2271 case R_X86_64_64:
2272 pointer:
2273 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2274 eh->zero_undefweak |= 0x2;
2275 /* We are called after all symbols have been resolved. Only
2276 relocation against STT_GNU_IFUNC symbol must go through
2277 PLT. */
2278 if (h != NULL
2279 && (bfd_link_executable (info)
2280 || h->type == STT_GNU_IFUNC))
2281 {
2282 bool func_pointer_ref = false;
2283
2284 if (r_type == R_X86_64_PC32)
2285 {
2286 /* Since something like ".long foo - ." may be used
2287 as pointer, make sure that PLT is used if foo is
2288 a function defined in a shared library. */
2289 if ((sec->flags & SEC_CODE) == 0)
2290 {
2291 h->pointer_equality_needed = 1;
2292 if (bfd_link_pie (info)
2293 && h->type == STT_FUNC
2294 && !h->def_regular
2295 && h->def_dynamic)
2296 {
2297 h->needs_plt = 1;
2298 h->plt.refcount = 1;
2299 }
2300 }
2301 }
2302 else if (r_type != R_X86_64_PC64)
2303 {
2304 /* At run-time, R_X86_64_64 can be resolved for both
2305 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2306 can only be resolved for x32. Function pointer
2307 reference doesn't need PLT for pointer equality. */
2308 if ((sec->flags & SEC_READONLY) == 0
2309 && (r_type == R_X86_64_64
2310 || (!ABI_64_P (abfd)
2311 && (r_type == R_X86_64_32
2312 || r_type == R_X86_64_32S))))
2313 func_pointer_ref = true;
2314
2315 /* IFUNC symbol needs pointer equality in PDE so that
2316 function pointer reference will be resolved to its
2317 PLT entry directly. */
2318 if (!func_pointer_ref
2319 || (bfd_link_pde (info)
2320 && h->type == STT_GNU_IFUNC))
2321 h->pointer_equality_needed = 1;
2322 }
2323
2324 if (!func_pointer_ref)
2325 {
2326 /* If this reloc is in a read-only section, we might
2327 need a copy reloc. We can't check reliably at this
2328 stage whether the section is read-only, as input
2329 sections have not yet been mapped to output sections.
2330 Tentatively set the flag for now, and correct in
2331 adjust_dynamic_symbol. */
2332 h->non_got_ref = 1;
2333
2334 if (!elf_has_indirect_extern_access (sec->owner))
2335 eh->non_got_ref_without_indirect_extern_access = 1;
2336
2337 /* We may need a .plt entry if the symbol is a function
2338 defined in a shared lib or is a function referenced
2339 from the code or read-only section. */
2340 if (!h->def_regular
2341 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2342 h->plt.refcount = 1;
2343
2344 if (htab->elf.target_os != is_solaris
2345 && h->pointer_equality_needed
2346 && h->type == STT_FUNC
2347 && eh->def_protected
2348 && !SYMBOL_DEFINED_NON_SHARED_P (h)
2349 && h->def_dynamic)
2350 {
2351 /* Disallow non-canonical reference to canonical
2352 protected function. */
2353 _bfd_error_handler
2354 /* xgettext:c-format */
2355 (_("%pB: non-canonical reference to canonical "
2356 "protected function `%s' in %pB"),
2357 abfd, h->root.root.string,
2358 h->root.u.def.section->owner);
2359 bfd_set_error (bfd_error_bad_value);
2360 goto error_return;
2361 }
2362 }
2363 }
2364
2365 size_reloc = false;
2366 do_size:
2367 if (!no_dynreloc
2368 && NEED_DYNAMIC_RELOCATION_P (true, info, true, h, sec,
2369 r_type,
2370 htab->pointer_r_type))
2371 {
2372 struct elf_dyn_relocs *p;
2373 struct elf_dyn_relocs **head;
2374
2375 /* If this is a global symbol, we count the number of
2376 relocations we need for this symbol. */
2377 if (h != NULL)
2378 head = &h->dyn_relocs;
2379 else
2380 {
2381 /* Track dynamic relocs needed for local syms too.
2382 We really need local syms available to do this
2383 easily. Oh well. */
2384 asection *s;
2385 void **vpp;
2386
2387 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
2388 abfd, r_symndx);
2389 if (isym == NULL)
2390 goto error_return;
2391
2392 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2393 if (s == NULL)
2394 s = sec;
2395
2396 /* Beware of type punned pointers vs strict aliasing
2397 rules. */
2398 vpp = &(elf_section_data (s)->local_dynrel);
2399 head = (struct elf_dyn_relocs **)vpp;
2400 }
2401
2402 p = *head;
2403 if (p == NULL || p->sec != sec)
2404 {
2405 size_t amt = sizeof *p;
2406
2407 p = ((struct elf_dyn_relocs *)
2408 bfd_alloc (htab->elf.dynobj, amt));
2409 if (p == NULL)
2410 goto error_return;
2411 p->next = *head;
2412 *head = p;
2413 p->sec = sec;
2414 p->count = 0;
2415 p->pc_count = 0;
2416 }
2417
2418 p->count += 1;
2419 /* Count size relocation as PC-relative relocation. */
2420 if (X86_PCREL_TYPE_P (true, r_type) || size_reloc)
2421 p->pc_count += 1;
2422 }
2423 break;
2424
2425 /* This relocation describes the C++ object vtable hierarchy.
2426 Reconstruct it for later use during GC. */
2427 case R_X86_64_GNU_VTINHERIT:
2428 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2429 goto error_return;
2430 break;
2431
2432 /* This relocation describes which C++ vtable entries are actually
2433 used. Record for later use during GC. */
2434 case R_X86_64_GNU_VTENTRY:
2435 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2436 goto error_return;
2437 break;
2438
2439 default:
2440 break;
2441 }
2442 }
2443
2444 if (elf_section_data (sec)->this_hdr.contents != contents)
2445 {
2446 if (!converted && !_bfd_link_keep_memory (info))
2447 free (contents);
2448 else
2449 {
2450 /* Cache the section contents for elf_link_input_bfd if any
2451 load is converted or --no-keep-memory isn't used. */
2452 elf_section_data (sec)->this_hdr.contents = contents;
2453 info->cache_size += sec->size;
2454 }
2455 }
2456
2457 /* Cache relocations if any load is converted. */
2458 if (elf_section_data (sec)->relocs != relocs && converted)
2459 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2460
2461 return true;
2462
2463 error_return:
2464 if (elf_section_data (sec)->this_hdr.contents != contents)
2465 free (contents);
2466 sec->check_relocs_failed = 1;
2467 return false;
2468 }
2469
2470 static bool
2471 elf_x86_64_always_size_sections (bfd *output_bfd,
2472 struct bfd_link_info *info)
2473 {
2474 bfd *abfd;
2475
2476 /* Scan relocations after rel_from_abs has been set on __ehdr_start. */
2477 for (abfd = info->input_bfds;
2478 abfd != (bfd *) NULL;
2479 abfd = abfd->link.next)
2480 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour
2481 && !_bfd_elf_link_iterate_on_relocs (abfd, info,
2482 elf_x86_64_scan_relocs))
2483 return false;
2484
2485 return _bfd_x86_elf_always_size_sections (output_bfd, info);
2486 }
2487
2488 /* Return the relocation value for @tpoff relocation
2489 if STT_TLS virtual address is ADDRESS. */
2490
2491 static bfd_vma
2492 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2493 {
2494 struct elf_link_hash_table *htab = elf_hash_table (info);
2495 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2496 bfd_vma static_tls_size;
2497
2498 /* If tls_segment is NULL, we should have signalled an error already. */
2499 if (htab->tls_sec == NULL)
2500 return 0;
2501
2502 /* Consider special static TLS alignment requirements. */
2503 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2504 return address - static_tls_size - htab->tls_sec->vma;
2505 }
2506
2507 /* Relocate an x86_64 ELF section. */
2508
2509 static int
2510 elf_x86_64_relocate_section (bfd *output_bfd,
2511 struct bfd_link_info *info,
2512 bfd *input_bfd,
2513 asection *input_section,
2514 bfd_byte *contents,
2515 Elf_Internal_Rela *relocs,
2516 Elf_Internal_Sym *local_syms,
2517 asection **local_sections)
2518 {
2519 struct elf_x86_link_hash_table *htab;
2520 Elf_Internal_Shdr *symtab_hdr;
2521 struct elf_link_hash_entry **sym_hashes;
2522 bfd_vma *local_got_offsets;
2523 bfd_vma *local_tlsdesc_gotents;
2524 Elf_Internal_Rela *rel;
2525 Elf_Internal_Rela *wrel;
2526 Elf_Internal_Rela *relend;
2527 unsigned int plt_entry_size;
2528 bool status;
2529
2530 /* Skip if check_relocs or scan_relocs failed. */
2531 if (input_section->check_relocs_failed)
2532 return false;
2533
2534 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2535 if (htab == NULL)
2536 return false;
2537
2538 if (!is_x86_elf (input_bfd, htab))
2539 {
2540 bfd_set_error (bfd_error_wrong_format);
2541 return false;
2542 }
2543
2544 plt_entry_size = htab->plt.plt_entry_size;
2545 symtab_hdr = &elf_symtab_hdr (input_bfd);
2546 sym_hashes = elf_sym_hashes (input_bfd);
2547 local_got_offsets = elf_local_got_offsets (input_bfd);
2548 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2549
2550 _bfd_x86_elf_set_tls_module_base (info);
2551
2552 status = true;
2553 rel = wrel = relocs;
2554 relend = relocs + input_section->reloc_count;
2555 for (; rel < relend; wrel++, rel++)
2556 {
2557 unsigned int r_type, r_type_tls;
2558 reloc_howto_type *howto;
2559 unsigned long r_symndx;
2560 struct elf_link_hash_entry *h;
2561 struct elf_x86_link_hash_entry *eh;
2562 Elf_Internal_Sym *sym;
2563 asection *sec;
2564 bfd_vma off, offplt, plt_offset;
2565 bfd_vma relocation;
2566 bool unresolved_reloc;
2567 bfd_reloc_status_type r;
2568 int tls_type;
2569 asection *base_got, *resolved_plt;
2570 bfd_vma st_size;
2571 bool resolved_to_zero;
2572 bool relative_reloc;
2573 bool converted_reloc;
2574 bool need_copy_reloc_in_pie;
2575 bool no_copyreloc_p;
2576
2577 r_type = ELF32_R_TYPE (rel->r_info);
2578 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2579 || r_type == (int) R_X86_64_GNU_VTENTRY)
2580 {
2581 if (wrel != rel)
2582 *wrel = *rel;
2583 continue;
2584 }
2585
2586 r_symndx = htab->r_sym (rel->r_info);
2587 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2588 if (converted_reloc)
2589 {
2590 r_type &= ~R_X86_64_converted_reloc_bit;
2591 rel->r_info = htab->r_info (r_symndx, r_type);
2592 }
2593
2594 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2595 if (howto == NULL)
2596 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2597
2598 h = NULL;
2599 sym = NULL;
2600 sec = NULL;
2601 unresolved_reloc = false;
2602 if (r_symndx < symtab_hdr->sh_info)
2603 {
2604 sym = local_syms + r_symndx;
2605 sec = local_sections[r_symndx];
2606
2607 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2608 &sec, rel);
2609 st_size = sym->st_size;
2610
2611 /* Relocate against local STT_GNU_IFUNC symbol. */
2612 if (!bfd_link_relocatable (info)
2613 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2614 {
2615 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2616 rel, false);
2617 if (h == NULL)
2618 abort ();
2619
2620 /* Set STT_GNU_IFUNC symbol value. */
2621 h->root.u.def.value = sym->st_value;
2622 h->root.u.def.section = sec;
2623 }
2624 }
2625 else
2626 {
2627 bool warned ATTRIBUTE_UNUSED;
2628 bool ignored ATTRIBUTE_UNUSED;
2629
2630 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2631 r_symndx, symtab_hdr, sym_hashes,
2632 h, sec, relocation,
2633 unresolved_reloc, warned, ignored);
2634 st_size = h->size;
2635 }
2636
2637 if (sec != NULL && discarded_section (sec))
2638 {
2639 _bfd_clear_contents (howto, input_bfd, input_section,
2640 contents, rel->r_offset);
2641 wrel->r_offset = rel->r_offset;
2642 wrel->r_info = 0;
2643 wrel->r_addend = 0;
2644
2645 /* For ld -r, remove relocations in debug sections against
2646 sections defined in discarded sections. Not done for
2647 eh_frame editing code expects to be present. */
2648 if (bfd_link_relocatable (info)
2649 && (input_section->flags & SEC_DEBUGGING))
2650 wrel--;
2651
2652 continue;
2653 }
2654
2655 if (bfd_link_relocatable (info))
2656 {
2657 if (wrel != rel)
2658 *wrel = *rel;
2659 continue;
2660 }
2661
2662 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2663 {
2664 if (r_type == R_X86_64_64)
2665 {
2666 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2667 zero-extend it to 64bit if addend is zero. */
2668 r_type = R_X86_64_32;
2669 memset (contents + rel->r_offset + 4, 0, 4);
2670 }
2671 else if (r_type == R_X86_64_SIZE64)
2672 {
2673 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2674 zero-extend it to 64bit if addend is zero. */
2675 r_type = R_X86_64_SIZE32;
2676 memset (contents + rel->r_offset + 4, 0, 4);
2677 }
2678 }
2679
2680 eh = (struct elf_x86_link_hash_entry *) h;
2681
2682 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2683 it here if it is defined in a non-shared object. */
2684 if (h != NULL
2685 && h->type == STT_GNU_IFUNC
2686 && h->def_regular)
2687 {
2688 bfd_vma plt_index;
2689 const char *name;
2690
2691 if ((input_section->flags & SEC_ALLOC) == 0)
2692 {
2693 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2694 STT_GNU_IFUNC symbol as STT_FUNC. */
2695 if (elf_section_type (input_section) == SHT_NOTE)
2696 goto skip_ifunc;
2697 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2698 sections because such sections are not SEC_ALLOC and
2699 thus ld.so will not process them. */
2700 if ((input_section->flags & SEC_DEBUGGING) != 0)
2701 continue;
2702 abort ();
2703 }
2704
2705 switch (r_type)
2706 {
2707 default:
2708 break;
2709
2710 case R_X86_64_GOTPCREL:
2711 case R_X86_64_GOTPCRELX:
2712 case R_X86_64_REX_GOTPCRELX:
2713 case R_X86_64_GOTPCREL64:
2714 base_got = htab->elf.sgot;
2715 off = h->got.offset;
2716
2717 if (base_got == NULL)
2718 abort ();
2719
2720 if (off == (bfd_vma) -1)
2721 {
2722 /* We can't use h->got.offset here to save state, or
2723 even just remember the offset, as finish_dynamic_symbol
2724 would use that as offset into .got. */
2725
2726 if (h->plt.offset == (bfd_vma) -1)
2727 abort ();
2728
2729 if (htab->elf.splt != NULL)
2730 {
2731 plt_index = (h->plt.offset / plt_entry_size
2732 - htab->plt.has_plt0);
2733 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2734 base_got = htab->elf.sgotplt;
2735 }
2736 else
2737 {
2738 plt_index = h->plt.offset / plt_entry_size;
2739 off = plt_index * GOT_ENTRY_SIZE;
2740 base_got = htab->elf.igotplt;
2741 }
2742
2743 if (h->dynindx == -1
2744 || h->forced_local
2745 || info->symbolic)
2746 {
2747 /* This references the local defitionion. We must
2748 initialize this entry in the global offset table.
2749 Since the offset must always be a multiple of 8,
2750 we use the least significant bit to record
2751 whether we have initialized it already.
2752
2753 When doing a dynamic link, we create a .rela.got
2754 relocation entry to initialize the value. This
2755 is done in the finish_dynamic_symbol routine. */
2756 if ((off & 1) != 0)
2757 off &= ~1;
2758 else
2759 {
2760 bfd_put_64 (output_bfd, relocation,
2761 base_got->contents + off);
2762 /* Note that this is harmless for the GOTPLT64
2763 case, as -1 | 1 still is -1. */
2764 h->got.offset |= 1;
2765 }
2766 }
2767 }
2768
2769 relocation = (base_got->output_section->vma
2770 + base_got->output_offset + off);
2771
2772 goto do_relocation;
2773 }
2774
2775 if (h->plt.offset == (bfd_vma) -1)
2776 {
2777 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2778 if (r_type == htab->pointer_r_type
2779 && (input_section->flags & SEC_CODE) == 0)
2780 goto do_ifunc_pointer;
2781 goto bad_ifunc_reloc;
2782 }
2783
2784 /* STT_GNU_IFUNC symbol must go through PLT. */
2785 if (htab->elf.splt != NULL)
2786 {
2787 if (htab->plt_second != NULL)
2788 {
2789 resolved_plt = htab->plt_second;
2790 plt_offset = eh->plt_second.offset;
2791 }
2792 else
2793 {
2794 resolved_plt = htab->elf.splt;
2795 plt_offset = h->plt.offset;
2796 }
2797 }
2798 else
2799 {
2800 resolved_plt = htab->elf.iplt;
2801 plt_offset = h->plt.offset;
2802 }
2803
2804 relocation = (resolved_plt->output_section->vma
2805 + resolved_plt->output_offset + plt_offset);
2806
2807 switch (r_type)
2808 {
2809 default:
2810 bad_ifunc_reloc:
2811 if (h->root.root.string)
2812 name = h->root.root.string;
2813 else
2814 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2815 NULL);
2816 _bfd_error_handler
2817 /* xgettext:c-format */
2818 (_("%pB: relocation %s against STT_GNU_IFUNC "
2819 "symbol `%s' isn't supported"), input_bfd,
2820 howto->name, name);
2821 bfd_set_error (bfd_error_bad_value);
2822 return false;
2823
2824 case R_X86_64_32S:
2825 if (bfd_link_pic (info))
2826 abort ();
2827 goto do_relocation;
2828
2829 case R_X86_64_32:
2830 if (ABI_64_P (output_bfd))
2831 goto do_relocation;
2832 /* FALLTHROUGH */
2833 case R_X86_64_64:
2834 do_ifunc_pointer:
2835 if (rel->r_addend != 0)
2836 {
2837 if (h->root.root.string)
2838 name = h->root.root.string;
2839 else
2840 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2841 sym, NULL);
2842 _bfd_error_handler
2843 /* xgettext:c-format */
2844 (_("%pB: relocation %s against STT_GNU_IFUNC "
2845 "symbol `%s' has non-zero addend: %" PRId64),
2846 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2847 bfd_set_error (bfd_error_bad_value);
2848 return false;
2849 }
2850
2851 /* Generate dynamic relcoation only when there is a
2852 non-GOT reference in a shared object or there is no
2853 PLT. */
2854 if ((bfd_link_pic (info) && h->non_got_ref)
2855 || h->plt.offset == (bfd_vma) -1)
2856 {
2857 Elf_Internal_Rela outrel;
2858 asection *sreloc;
2859
2860 /* Need a dynamic relocation to get the real function
2861 address. */
2862 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2863 info,
2864 input_section,
2865 rel->r_offset);
2866 if (outrel.r_offset == (bfd_vma) -1
2867 || outrel.r_offset == (bfd_vma) -2)
2868 abort ();
2869
2870 outrel.r_offset += (input_section->output_section->vma
2871 + input_section->output_offset);
2872
2873 if (POINTER_LOCAL_IFUNC_P (info, h))
2874 {
2875 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2876 h->root.root.string,
2877 h->root.u.def.section->owner);
2878
2879 /* This symbol is resolved locally. */
2880 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2881 outrel.r_addend = (h->root.u.def.value
2882 + h->root.u.def.section->output_section->vma
2883 + h->root.u.def.section->output_offset);
2884
2885 if (htab->params->report_relative_reloc)
2886 _bfd_x86_elf_link_report_relative_reloc
2887 (info, input_section, h, sym,
2888 "R_X86_64_IRELATIVE", &outrel);
2889 }
2890 else
2891 {
2892 outrel.r_info = htab->r_info (h->dynindx, r_type);
2893 outrel.r_addend = 0;
2894 }
2895
2896 /* Dynamic relocations are stored in
2897 1. .rela.ifunc section in PIC object.
2898 2. .rela.got section in dynamic executable.
2899 3. .rela.iplt section in static executable. */
2900 if (bfd_link_pic (info))
2901 sreloc = htab->elf.irelifunc;
2902 else if (htab->elf.splt != NULL)
2903 sreloc = htab->elf.srelgot;
2904 else
2905 sreloc = htab->elf.irelplt;
2906 elf_append_rela (output_bfd, sreloc, &outrel);
2907
2908 /* If this reloc is against an external symbol, we
2909 do not want to fiddle with the addend. Otherwise,
2910 we need to include the symbol value so that it
2911 becomes an addend for the dynamic reloc. For an
2912 internal symbol, we have updated addend. */
2913 continue;
2914 }
2915 /* FALLTHROUGH */
2916 case R_X86_64_PC32:
2917 case R_X86_64_PC64:
2918 case R_X86_64_PLT32:
2919 goto do_relocation;
2920 }
2921 }
2922
2923 skip_ifunc:
2924 resolved_to_zero = (eh != NULL
2925 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2926
2927 /* When generating a shared object, the relocations handled here are
2928 copied into the output file to be resolved at run time. */
2929 switch (r_type)
2930 {
2931 case R_X86_64_GOT32:
2932 case R_X86_64_GOT64:
2933 /* Relocation is to the entry for this symbol in the global
2934 offset table. */
2935 case R_X86_64_GOTPCREL:
2936 case R_X86_64_GOTPCRELX:
2937 case R_X86_64_REX_GOTPCRELX:
2938 case R_X86_64_GOTPCREL64:
2939 /* Use global offset table entry as symbol value. */
2940 case R_X86_64_GOTPLT64:
2941 /* This is obsolete and treated the same as GOT64. */
2942 base_got = htab->elf.sgot;
2943
2944 if (htab->elf.sgot == NULL)
2945 abort ();
2946
2947 relative_reloc = false;
2948 if (h != NULL)
2949 {
2950 off = h->got.offset;
2951 if (h->needs_plt
2952 && h->plt.offset != (bfd_vma)-1
2953 && off == (bfd_vma)-1)
2954 {
2955 /* We can't use h->got.offset here to save
2956 state, or even just remember the offset, as
2957 finish_dynamic_symbol would use that as offset into
2958 .got. */
2959 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2960 - htab->plt.has_plt0);
2961 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2962 base_got = htab->elf.sgotplt;
2963 }
2964
2965 if (RESOLVED_LOCALLY_P (info, h, htab))
2966 {
2967 /* We must initialize this entry in the global offset
2968 table. Since the offset must always be a multiple
2969 of 8, we use the least significant bit to record
2970 whether we have initialized it already.
2971
2972 When doing a dynamic link, we create a .rela.got
2973 relocation entry to initialize the value. This is
2974 done in the finish_dynamic_symbol routine. */
2975 if ((off & 1) != 0)
2976 off &= ~1;
2977 else
2978 {
2979 bfd_put_64 (output_bfd, relocation,
2980 base_got->contents + off);
2981 /* Note that this is harmless for the GOTPLT64 case,
2982 as -1 | 1 still is -1. */
2983 h->got.offset |= 1;
2984
2985 /* NB: Don't generate relative relocation here if
2986 it has been generated by DT_RELR. */
2987 if (!info->enable_dt_relr
2988 && GENERATE_RELATIVE_RELOC_P (info, h))
2989 {
2990 /* If this symbol isn't dynamic in PIC,
2991 generate R_X86_64_RELATIVE here. */
2992 eh->no_finish_dynamic_symbol = 1;
2993 relative_reloc = true;
2994 }
2995 }
2996 }
2997 else
2998 unresolved_reloc = false;
2999 }
3000 else
3001 {
3002 if (local_got_offsets == NULL)
3003 abort ();
3004
3005 off = local_got_offsets[r_symndx];
3006
3007 /* The offset must always be a multiple of 8. We use
3008 the least significant bit to record whether we have
3009 already generated the necessary reloc. */
3010 if ((off & 1) != 0)
3011 off &= ~1;
3012 else
3013 {
3014 bfd_put_64 (output_bfd, relocation,
3015 base_got->contents + off);
3016 local_got_offsets[r_symndx] |= 1;
3017
3018 /* NB: GOTPCREL relocations against local absolute
3019 symbol store relocation value in the GOT slot
3020 without relative relocation. Don't generate
3021 relative relocation here if it has been generated
3022 by DT_RELR. */
3023 if (!info->enable_dt_relr
3024 && bfd_link_pic (info)
3025 && !(sym->st_shndx == SHN_ABS
3026 && (r_type == R_X86_64_GOTPCREL
3027 || r_type == R_X86_64_GOTPCRELX
3028 || r_type == R_X86_64_REX_GOTPCRELX)))
3029 relative_reloc = true;
3030 }
3031 }
3032
3033 if (relative_reloc)
3034 {
3035 asection *s;
3036 Elf_Internal_Rela outrel;
3037
3038 /* We need to generate a R_X86_64_RELATIVE reloc
3039 for the dynamic linker. */
3040 s = htab->elf.srelgot;
3041 if (s == NULL)
3042 abort ();
3043
3044 outrel.r_offset = (base_got->output_section->vma
3045 + base_got->output_offset
3046 + off);
3047 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3048 outrel.r_addend = relocation;
3049
3050 if (htab->params->report_relative_reloc)
3051 _bfd_x86_elf_link_report_relative_reloc
3052 (info, input_section, h, sym, "R_X86_64_RELATIVE",
3053 &outrel);
3054
3055 elf_append_rela (output_bfd, s, &outrel);
3056 }
3057
3058 if (off >= (bfd_vma) -2)
3059 abort ();
3060
3061 relocation = base_got->output_section->vma
3062 + base_got->output_offset + off;
3063 if (r_type != R_X86_64_GOTPCREL
3064 && r_type != R_X86_64_GOTPCRELX
3065 && r_type != R_X86_64_REX_GOTPCRELX
3066 && r_type != R_X86_64_GOTPCREL64)
3067 relocation -= htab->elf.sgotplt->output_section->vma
3068 - htab->elf.sgotplt->output_offset;
3069
3070 break;
3071
3072 case R_X86_64_GOTOFF64:
3073 /* Relocation is relative to the start of the global offset
3074 table. */
3075
3076 /* Check to make sure it isn't a protected function or data
3077 symbol for shared library since it may not be local when
3078 used as function address or with copy relocation. We also
3079 need to make sure that a symbol is referenced locally. */
3080 if (bfd_link_pic (info) && h)
3081 {
3082 if (!h->def_regular)
3083 {
3084 const char *v;
3085
3086 switch (ELF_ST_VISIBILITY (h->other))
3087 {
3088 case STV_HIDDEN:
3089 v = _("hidden symbol");
3090 break;
3091 case STV_INTERNAL:
3092 v = _("internal symbol");
3093 break;
3094 case STV_PROTECTED:
3095 v = _("protected symbol");
3096 break;
3097 default:
3098 v = _("symbol");
3099 break;
3100 }
3101
3102 _bfd_error_handler
3103 /* xgettext:c-format */
3104 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3105 " `%s' can not be used when making a shared object"),
3106 input_bfd, v, h->root.root.string);
3107 bfd_set_error (bfd_error_bad_value);
3108 return false;
3109 }
3110 else if (!bfd_link_executable (info)
3111 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3112 && (h->type == STT_FUNC
3113 || h->type == STT_OBJECT)
3114 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3115 {
3116 _bfd_error_handler
3117 /* xgettext:c-format */
3118 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3119 " `%s' can not be used when making a shared object"),
3120 input_bfd,
3121 h->type == STT_FUNC ? "function" : "data",
3122 h->root.root.string);
3123 bfd_set_error (bfd_error_bad_value);
3124 return false;
3125 }
3126 }
3127
3128 /* Note that sgot is not involved in this
3129 calculation. We always want the start of .got.plt. If we
3130 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3131 permitted by the ABI, we might have to change this
3132 calculation. */
3133 relocation -= htab->elf.sgotplt->output_section->vma
3134 + htab->elf.sgotplt->output_offset;
3135 break;
3136
3137 case R_X86_64_GOTPC32:
3138 case R_X86_64_GOTPC64:
3139 /* Use global offset table as symbol value. */
3140 relocation = htab->elf.sgotplt->output_section->vma
3141 + htab->elf.sgotplt->output_offset;
3142 unresolved_reloc = false;
3143 break;
3144
3145 case R_X86_64_PLTOFF64:
3146 /* Relocation is PLT entry relative to GOT. For local
3147 symbols it's the symbol itself relative to GOT. */
3148 if (h != NULL
3149 /* See PLT32 handling. */
3150 && (h->plt.offset != (bfd_vma) -1
3151 || eh->plt_got.offset != (bfd_vma) -1)
3152 && htab->elf.splt != NULL)
3153 {
3154 if (eh->plt_got.offset != (bfd_vma) -1)
3155 {
3156 /* Use the GOT PLT. */
3157 resolved_plt = htab->plt_got;
3158 plt_offset = eh->plt_got.offset;
3159 }
3160 else if (htab->plt_second != NULL)
3161 {
3162 resolved_plt = htab->plt_second;
3163 plt_offset = eh->plt_second.offset;
3164 }
3165 else
3166 {
3167 resolved_plt = htab->elf.splt;
3168 plt_offset = h->plt.offset;
3169 }
3170
3171 relocation = (resolved_plt->output_section->vma
3172 + resolved_plt->output_offset
3173 + plt_offset);
3174 unresolved_reloc = false;
3175 }
3176
3177 relocation -= htab->elf.sgotplt->output_section->vma
3178 + htab->elf.sgotplt->output_offset;
3179 break;
3180
3181 case R_X86_64_PLT32:
3182 /* Relocation is to the entry for this symbol in the
3183 procedure linkage table. */
3184
3185 /* Resolve a PLT32 reloc against a local symbol directly,
3186 without using the procedure linkage table. */
3187 if (h == NULL)
3188 break;
3189
3190 if ((h->plt.offset == (bfd_vma) -1
3191 && eh->plt_got.offset == (bfd_vma) -1)
3192 || htab->elf.splt == NULL)
3193 {
3194 /* We didn't make a PLT entry for this symbol. This
3195 happens when statically linking PIC code, or when
3196 using -Bsymbolic. */
3197 break;
3198 }
3199
3200 use_plt:
3201 if (h->plt.offset != (bfd_vma) -1)
3202 {
3203 if (htab->plt_second != NULL)
3204 {
3205 resolved_plt = htab->plt_second;
3206 plt_offset = eh->plt_second.offset;
3207 }
3208 else
3209 {
3210 resolved_plt = htab->elf.splt;
3211 plt_offset = h->plt.offset;
3212 }
3213 }
3214 else
3215 {
3216 /* Use the GOT PLT. */
3217 resolved_plt = htab->plt_got;
3218 plt_offset = eh->plt_got.offset;
3219 }
3220
3221 relocation = (resolved_plt->output_section->vma
3222 + resolved_plt->output_offset
3223 + plt_offset);
3224 unresolved_reloc = false;
3225 break;
3226
3227 case R_X86_64_SIZE32:
3228 case R_X86_64_SIZE64:
3229 /* Set to symbol size. */
3230 relocation = st_size;
3231 goto direct;
3232
3233 case R_X86_64_PC8:
3234 case R_X86_64_PC16:
3235 case R_X86_64_PC32:
3236 /* Don't complain about -fPIC if the symbol is undefined when
3237 building executable unless it is unresolved weak symbol,
3238 references a dynamic definition in PIE or -z nocopyreloc
3239 is used. */
3240 no_copyreloc_p
3241 = (info->nocopyreloc
3242 || (h != NULL
3243 && !h->root.linker_def
3244 && !h->root.ldscript_def
3245 && eh->def_protected));
3246
3247 if ((input_section->flags & SEC_ALLOC) != 0
3248 && (input_section->flags & SEC_READONLY) != 0
3249 && h != NULL
3250 && ((bfd_link_executable (info)
3251 && ((h->root.type == bfd_link_hash_undefweak
3252 && (eh == NULL
3253 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3254 eh)))
3255 || (bfd_link_pie (info)
3256 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3257 && h->def_dynamic)
3258 || (no_copyreloc_p
3259 && h->def_dynamic
3260 && !(h->root.u.def.section->flags & SEC_CODE))))
3261 || (bfd_link_pie (info)
3262 && h->root.type == bfd_link_hash_undefweak)
3263 || bfd_link_dll (info)))
3264 {
3265 bool fail = false;
3266 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3267 {
3268 /* Symbol is referenced locally. Make sure it is
3269 defined locally. */
3270 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3271 }
3272 else if (bfd_link_pie (info))
3273 {
3274 /* We can only use PC-relative relocations in PIE
3275 from non-code sections. */
3276 if (h->root.type == bfd_link_hash_undefweak
3277 || (h->type == STT_FUNC
3278 && (sec->flags & SEC_CODE) != 0))
3279 fail = true;
3280 }
3281 else if (no_copyreloc_p || bfd_link_dll (info))
3282 {
3283 /* Symbol doesn't need copy reloc and isn't
3284 referenced locally. Don't allow PC-relative
3285 relocations against default and protected
3286 symbols since address of protected function
3287 and location of protected data may not be in
3288 the shared object. */
3289 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3290 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3291 }
3292
3293 if (fail)
3294 return elf_x86_64_need_pic (info, input_bfd, input_section,
3295 h, NULL, NULL, howto);
3296 }
3297 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3298 as function address. */
3299 else if (h != NULL
3300 && (input_section->flags & SEC_CODE) == 0
3301 && bfd_link_pie (info)
3302 && h->type == STT_FUNC
3303 && !h->def_regular
3304 && h->def_dynamic)
3305 goto use_plt;
3306 /* Fall through. */
3307
3308 case R_X86_64_8:
3309 case R_X86_64_16:
3310 case R_X86_64_32:
3311 case R_X86_64_PC64:
3312 case R_X86_64_64:
3313 /* FIXME: The ABI says the linker should make sure the value is
3314 the same when it's zeroextended to 64 bit. */
3315
3316 direct:
3317 if ((input_section->flags & SEC_ALLOC) == 0)
3318 break;
3319
3320 need_copy_reloc_in_pie = (bfd_link_pie (info)
3321 && h != NULL
3322 && (h->needs_copy
3323 || eh->needs_copy
3324 || (h->root.type
3325 == bfd_link_hash_undefined))
3326 && (X86_PCREL_TYPE_P (true, r_type)
3327 || X86_SIZE_TYPE_P (true,
3328 r_type)));
3329
3330 if (GENERATE_DYNAMIC_RELOCATION_P (true, info, eh, r_type, sec,
3331 need_copy_reloc_in_pie,
3332 resolved_to_zero, false))
3333 {
3334 Elf_Internal_Rela outrel;
3335 bool skip, relocate;
3336 bool generate_dynamic_reloc = true;
3337 asection *sreloc;
3338 const char *relative_reloc_name = NULL;
3339
3340 /* When generating a shared object, these relocations
3341 are copied into the output file to be resolved at run
3342 time. */
3343 skip = false;
3344 relocate = false;
3345
3346 outrel.r_offset =
3347 _bfd_elf_section_offset (output_bfd, info, input_section,
3348 rel->r_offset);
3349 if (outrel.r_offset == (bfd_vma) -1)
3350 skip = true;
3351 else if (outrel.r_offset == (bfd_vma) -2)
3352 skip = true, relocate = true;
3353
3354 outrel.r_offset += (input_section->output_section->vma
3355 + input_section->output_offset);
3356
3357 if (skip)
3358 memset (&outrel, 0, sizeof outrel);
3359
3360 else if (COPY_INPUT_RELOC_P (true, info, h, r_type))
3361 {
3362 outrel.r_info = htab->r_info (h->dynindx, r_type);
3363 outrel.r_addend = rel->r_addend;
3364 }
3365 else
3366 {
3367 /* This symbol is local, or marked to become local.
3368 When relocation overflow check is disabled, we
3369 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3370 if (r_type == htab->pointer_r_type
3371 || (r_type == R_X86_64_32
3372 && htab->params->no_reloc_overflow_check))
3373 {
3374 relocate = true;
3375 /* NB: Don't generate relative relocation here if
3376 it has been generated by DT_RELR. */
3377 if (info->enable_dt_relr)
3378 generate_dynamic_reloc = false;
3379 else
3380 {
3381 outrel.r_info =
3382 htab->r_info (0, R_X86_64_RELATIVE);
3383 outrel.r_addend = relocation + rel->r_addend;
3384 relative_reloc_name = "R_X86_64_RELATIVE";
3385 }
3386 }
3387 else if (r_type == R_X86_64_64
3388 && !ABI_64_P (output_bfd))
3389 {
3390 relocate = true;
3391 outrel.r_info = htab->r_info (0,
3392 R_X86_64_RELATIVE64);
3393 outrel.r_addend = relocation + rel->r_addend;
3394 relative_reloc_name = "R_X86_64_RELATIVE64";
3395 /* Check addend overflow. */
3396 if ((outrel.r_addend & 0x80000000)
3397 != (rel->r_addend & 0x80000000))
3398 {
3399 const char *name;
3400 int addend = rel->r_addend;
3401 if (h && h->root.root.string)
3402 name = h->root.root.string;
3403 else
3404 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3405 sym, NULL);
3406 _bfd_error_handler
3407 /* xgettext:c-format */
3408 (_("%pB: addend %s%#x in relocation %s against "
3409 "symbol `%s' at %#" PRIx64
3410 " in section `%pA' is out of range"),
3411 input_bfd, addend < 0 ? "-" : "", addend,
3412 howto->name, name, (uint64_t) rel->r_offset,
3413 input_section);
3414 bfd_set_error (bfd_error_bad_value);
3415 return false;
3416 }
3417 }
3418 else
3419 {
3420 long sindx;
3421
3422 if (bfd_is_abs_section (sec))
3423 sindx = 0;
3424 else if (sec == NULL || sec->owner == NULL)
3425 {
3426 bfd_set_error (bfd_error_bad_value);
3427 return false;
3428 }
3429 else
3430 {
3431 asection *osec;
3432
3433 /* We are turning this relocation into one
3434 against a section symbol. It would be
3435 proper to subtract the symbol's value,
3436 osec->vma, from the emitted reloc addend,
3437 but ld.so expects buggy relocs. */
3438 osec = sec->output_section;
3439 sindx = elf_section_data (osec)->dynindx;
3440 if (sindx == 0)
3441 {
3442 asection *oi = htab->elf.text_index_section;
3443 sindx = elf_section_data (oi)->dynindx;
3444 }
3445 BFD_ASSERT (sindx != 0);
3446 }
3447
3448 outrel.r_info = htab->r_info (sindx, r_type);
3449 outrel.r_addend = relocation + rel->r_addend;
3450 }
3451 }
3452
3453 if (generate_dynamic_reloc)
3454 {
3455 sreloc = elf_section_data (input_section)->sreloc;
3456
3457 if (sreloc == NULL || sreloc->contents == NULL)
3458 {
3459 r = bfd_reloc_notsupported;
3460 goto check_relocation_error;
3461 }
3462
3463 if (relative_reloc_name
3464 && htab->params->report_relative_reloc)
3465 _bfd_x86_elf_link_report_relative_reloc
3466 (info, input_section, h, sym,
3467 relative_reloc_name, &outrel);
3468
3469 elf_append_rela (output_bfd, sreloc, &outrel);
3470 }
3471
3472 /* If this reloc is against an external symbol, we do
3473 not want to fiddle with the addend. Otherwise, we
3474 need to include the symbol value so that it becomes
3475 an addend for the dynamic reloc. */
3476 if (! relocate)
3477 continue;
3478 }
3479
3480 break;
3481
3482 case R_X86_64_TLSGD:
3483 case R_X86_64_GOTPC32_TLSDESC:
3484 case R_X86_64_TLSDESC_CALL:
3485 case R_X86_64_GOTTPOFF:
3486 tls_type = GOT_UNKNOWN;
3487 if (h == NULL && local_got_offsets)
3488 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3489 else if (h != NULL)
3490 tls_type = elf_x86_hash_entry (h)->tls_type;
3491
3492 r_type_tls = r_type;
3493 if (! elf_x86_64_tls_transition (info, input_bfd,
3494 input_section, contents,
3495 symtab_hdr, sym_hashes,
3496 &r_type_tls, tls_type, rel,
3497 relend, h, r_symndx, true))
3498 return false;
3499
3500 if (r_type_tls == R_X86_64_TPOFF32)
3501 {
3502 bfd_vma roff = rel->r_offset;
3503
3504 BFD_ASSERT (! unresolved_reloc);
3505
3506 if (r_type == R_X86_64_TLSGD)
3507 {
3508 /* GD->LE transition. For 64bit, change
3509 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3510 .word 0x6666; rex64; call __tls_get_addr@PLT
3511 or
3512 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3513 .byte 0x66; rex64
3514 call *__tls_get_addr@GOTPCREL(%rip)
3515 which may be converted to
3516 addr32 call __tls_get_addr
3517 into:
3518 movq %fs:0, %rax
3519 leaq foo@tpoff(%rax), %rax
3520 For 32bit, change
3521 leaq foo@tlsgd(%rip), %rdi
3522 .word 0x6666; rex64; call __tls_get_addr@PLT
3523 or
3524 leaq foo@tlsgd(%rip), %rdi
3525 .byte 0x66; rex64
3526 call *__tls_get_addr@GOTPCREL(%rip)
3527 which may be converted to
3528 addr32 call __tls_get_addr
3529 into:
3530 movl %fs:0, %eax
3531 leaq foo@tpoff(%rax), %rax
3532 For largepic, change:
3533 leaq foo@tlsgd(%rip), %rdi
3534 movabsq $__tls_get_addr@pltoff, %rax
3535 addq %r15, %rax
3536 call *%rax
3537 into:
3538 movq %fs:0, %rax
3539 leaq foo@tpoff(%rax), %rax
3540 nopw 0x0(%rax,%rax,1) */
3541 int largepic = 0;
3542 if (ABI_64_P (output_bfd))
3543 {
3544 if (contents[roff + 5] == 0xb8)
3545 {
3546 if (roff < 3
3547 || (roff - 3 + 22) > input_section->size)
3548 {
3549 corrupt_input:
3550 info->callbacks->einfo
3551 (_("%F%P: corrupt input: %pB\n"),
3552 input_bfd);
3553 return false;
3554 }
3555 memcpy (contents + roff - 3,
3556 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3557 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3558 largepic = 1;
3559 }
3560 else
3561 {
3562 if (roff < 4
3563 || (roff - 4 + 16) > input_section->size)
3564 goto corrupt_input;
3565 memcpy (contents + roff - 4,
3566 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3567 16);
3568 }
3569 }
3570 else
3571 {
3572 if (roff < 3
3573 || (roff - 3 + 15) > input_section->size)
3574 goto corrupt_input;
3575 memcpy (contents + roff - 3,
3576 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3577 15);
3578 }
3579 bfd_put_32 (output_bfd,
3580 elf_x86_64_tpoff (info, relocation),
3581 contents + roff + 8 + largepic);
3582 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3583 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3584 rel++;
3585 wrel++;
3586 continue;
3587 }
3588 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3589 {
3590 /* GDesc -> LE transition.
3591 It's originally something like:
3592 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3593 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3594
3595 Change it to:
3596 movq $x@tpoff, %rax <--- LP64 mode.
3597 rex movl $x@tpoff, %eax <--- X32 mode.
3598 */
3599
3600 unsigned int val, type;
3601
3602 if (roff < 3)
3603 goto corrupt_input;
3604 type = bfd_get_8 (input_bfd, contents + roff - 3);
3605 val = bfd_get_8 (input_bfd, contents + roff - 1);
3606 bfd_put_8 (output_bfd,
3607 (type & 0x48) | ((type >> 2) & 1),
3608 contents + roff - 3);
3609 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3610 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3611 contents + roff - 1);
3612 bfd_put_32 (output_bfd,
3613 elf_x86_64_tpoff (info, relocation),
3614 contents + roff);
3615 continue;
3616 }
3617 else if (r_type == R_X86_64_TLSDESC_CALL)
3618 {
3619 /* GDesc -> LE transition.
3620 It's originally:
3621 call *(%rax) <--- LP64 mode.
3622 call *(%eax) <--- X32 mode.
3623 Turn it into:
3624 xchg %ax,%ax <-- LP64 mode.
3625 nopl (%rax) <-- X32 mode.
3626 */
3627 unsigned int prefix = 0;
3628 if (!ABI_64_P (input_bfd))
3629 {
3630 /* Check for call *x@tlsdesc(%eax). */
3631 if (contents[roff] == 0x67)
3632 prefix = 1;
3633 }
3634 if (prefix)
3635 {
3636 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3637 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3638 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3639 }
3640 else
3641 {
3642 bfd_put_8 (output_bfd, 0x66, contents + roff);
3643 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3644 }
3645 continue;
3646 }
3647 else if (r_type == R_X86_64_GOTTPOFF)
3648 {
3649 /* IE->LE transition:
3650 For 64bit, originally it can be one of:
3651 movq foo@gottpoff(%rip), %reg
3652 addq foo@gottpoff(%rip), %reg
3653 We change it into:
3654 movq $foo, %reg
3655 leaq foo(%reg), %reg
3656 addq $foo, %reg.
3657 For 32bit, originally it can be one of:
3658 movq foo@gottpoff(%rip), %reg
3659 addl foo@gottpoff(%rip), %reg
3660 We change it into:
3661 movq $foo, %reg
3662 leal foo(%reg), %reg
3663 addl $foo, %reg. */
3664
3665 unsigned int val, type, reg;
3666
3667 if (roff >= 3)
3668 val = bfd_get_8 (input_bfd, contents + roff - 3);
3669 else
3670 {
3671 if (roff < 2)
3672 goto corrupt_input;
3673 val = 0;
3674 }
3675 type = bfd_get_8 (input_bfd, contents + roff - 2);
3676 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3677 reg >>= 3;
3678 if (type == 0x8b)
3679 {
3680 /* movq */
3681 if (val == 0x4c)
3682 {
3683 if (roff < 3)
3684 goto corrupt_input;
3685 bfd_put_8 (output_bfd, 0x49,
3686 contents + roff - 3);
3687 }
3688 else if (!ABI_64_P (output_bfd) && val == 0x44)
3689 {
3690 if (roff < 3)
3691 goto corrupt_input;
3692 bfd_put_8 (output_bfd, 0x41,
3693 contents + roff - 3);
3694 }
3695 bfd_put_8 (output_bfd, 0xc7,
3696 contents + roff - 2);
3697 bfd_put_8 (output_bfd, 0xc0 | reg,
3698 contents + roff - 1);
3699 }
3700 else if (reg == 4)
3701 {
3702 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3703 is special */
3704 if (val == 0x4c)
3705 {
3706 if (roff < 3)
3707 goto corrupt_input;
3708 bfd_put_8 (output_bfd, 0x49,
3709 contents + roff - 3);
3710 }
3711 else if (!ABI_64_P (output_bfd) && val == 0x44)
3712 {
3713 if (roff < 3)
3714 goto corrupt_input;
3715 bfd_put_8 (output_bfd, 0x41,
3716 contents + roff - 3);
3717 }
3718 bfd_put_8 (output_bfd, 0x81,
3719 contents + roff - 2);
3720 bfd_put_8 (output_bfd, 0xc0 | reg,
3721 contents + roff - 1);
3722 }
3723 else
3724 {
3725 /* addq/addl -> leaq/leal */
3726 if (val == 0x4c)
3727 {
3728 if (roff < 3)
3729 goto corrupt_input;
3730 bfd_put_8 (output_bfd, 0x4d,
3731 contents + roff - 3);
3732 }
3733 else if (!ABI_64_P (output_bfd) && val == 0x44)
3734 {
3735 if (roff < 3)
3736 goto corrupt_input;
3737 bfd_put_8 (output_bfd, 0x45,
3738 contents + roff - 3);
3739 }
3740 bfd_put_8 (output_bfd, 0x8d,
3741 contents + roff - 2);
3742 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3743 contents + roff - 1);
3744 }
3745 bfd_put_32 (output_bfd,
3746 elf_x86_64_tpoff (info, relocation),
3747 contents + roff);
3748 continue;
3749 }
3750 else
3751 BFD_ASSERT (false);
3752 }
3753
3754 if (htab->elf.sgot == NULL)
3755 abort ();
3756
3757 if (h != NULL)
3758 {
3759 off = h->got.offset;
3760 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3761 }
3762 else
3763 {
3764 if (local_got_offsets == NULL)
3765 abort ();
3766
3767 off = local_got_offsets[r_symndx];
3768 offplt = local_tlsdesc_gotents[r_symndx];
3769 }
3770
3771 if ((off & 1) != 0)
3772 off &= ~1;
3773 else
3774 {
3775 Elf_Internal_Rela outrel;
3776 int dr_type, indx;
3777 asection *sreloc;
3778
3779 if (htab->elf.srelgot == NULL)
3780 abort ();
3781
3782 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3783
3784 if (GOT_TLS_GDESC_P (tls_type))
3785 {
3786 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3787 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3788 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3789 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3790 + htab->elf.sgotplt->output_offset
3791 + offplt
3792 + htab->sgotplt_jump_table_size);
3793 sreloc = htab->elf.srelplt;
3794 if (indx == 0)
3795 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3796 else
3797 outrel.r_addend = 0;
3798 elf_append_rela (output_bfd, sreloc, &outrel);
3799 }
3800
3801 sreloc = htab->elf.srelgot;
3802
3803 outrel.r_offset = (htab->elf.sgot->output_section->vma
3804 + htab->elf.sgot->output_offset + off);
3805
3806 if (GOT_TLS_GD_P (tls_type))
3807 dr_type = R_X86_64_DTPMOD64;
3808 else if (GOT_TLS_GDESC_P (tls_type))
3809 goto dr_done;
3810 else
3811 dr_type = R_X86_64_TPOFF64;
3812
3813 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3814 outrel.r_addend = 0;
3815 if ((dr_type == R_X86_64_TPOFF64
3816 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3817 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3818 outrel.r_info = htab->r_info (indx, dr_type);
3819
3820 elf_append_rela (output_bfd, sreloc, &outrel);
3821
3822 if (GOT_TLS_GD_P (tls_type))
3823 {
3824 if (indx == 0)
3825 {
3826 BFD_ASSERT (! unresolved_reloc);
3827 bfd_put_64 (output_bfd,
3828 relocation - _bfd_x86_elf_dtpoff_base (info),
3829 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3830 }
3831 else
3832 {
3833 bfd_put_64 (output_bfd, 0,
3834 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3835 outrel.r_info = htab->r_info (indx,
3836 R_X86_64_DTPOFF64);
3837 outrel.r_offset += GOT_ENTRY_SIZE;
3838 elf_append_rela (output_bfd, sreloc,
3839 &outrel);
3840 }
3841 }
3842
3843 dr_done:
3844 if (h != NULL)
3845 h->got.offset |= 1;
3846 else
3847 local_got_offsets[r_symndx] |= 1;
3848 }
3849
3850 if (off >= (bfd_vma) -2
3851 && ! GOT_TLS_GDESC_P (tls_type))
3852 abort ();
3853 if (r_type_tls == r_type)
3854 {
3855 if (r_type == R_X86_64_GOTPC32_TLSDESC
3856 || r_type == R_X86_64_TLSDESC_CALL)
3857 relocation = htab->elf.sgotplt->output_section->vma
3858 + htab->elf.sgotplt->output_offset
3859 + offplt + htab->sgotplt_jump_table_size;
3860 else
3861 relocation = htab->elf.sgot->output_section->vma
3862 + htab->elf.sgot->output_offset + off;
3863 unresolved_reloc = false;
3864 }
3865 else
3866 {
3867 bfd_vma roff = rel->r_offset;
3868
3869 if (r_type == R_X86_64_TLSGD)
3870 {
3871 /* GD->IE transition. For 64bit, change
3872 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3873 .word 0x6666; rex64; call __tls_get_addr@PLT
3874 or
3875 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3876 .byte 0x66; rex64
3877 call *__tls_get_addr@GOTPCREL(%rip
3878 which may be converted to
3879 addr32 call __tls_get_addr
3880 into:
3881 movq %fs:0, %rax
3882 addq foo@gottpoff(%rip), %rax
3883 For 32bit, change
3884 leaq foo@tlsgd(%rip), %rdi
3885 .word 0x6666; rex64; call __tls_get_addr@PLT
3886 or
3887 leaq foo@tlsgd(%rip), %rdi
3888 .byte 0x66; rex64;
3889 call *__tls_get_addr@GOTPCREL(%rip)
3890 which may be converted to
3891 addr32 call __tls_get_addr
3892 into:
3893 movl %fs:0, %eax
3894 addq foo@gottpoff(%rip), %rax
3895 For largepic, change:
3896 leaq foo@tlsgd(%rip), %rdi
3897 movabsq $__tls_get_addr@pltoff, %rax
3898 addq %r15, %rax
3899 call *%rax
3900 into:
3901 movq %fs:0, %rax
3902 addq foo@gottpoff(%rax), %rax
3903 nopw 0x0(%rax,%rax,1) */
3904 int largepic = 0;
3905 if (ABI_64_P (output_bfd))
3906 {
3907 if (contents[roff + 5] == 0xb8)
3908 {
3909 if (roff < 3
3910 || (roff - 3 + 22) > input_section->size)
3911 goto corrupt_input;
3912 memcpy (contents + roff - 3,
3913 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3914 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3915 largepic = 1;
3916 }
3917 else
3918 {
3919 if (roff < 4
3920 || (roff - 4 + 16) > input_section->size)
3921 goto corrupt_input;
3922 memcpy (contents + roff - 4,
3923 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3924 16);
3925 }
3926 }
3927 else
3928 {
3929 if (roff < 3
3930 || (roff - 3 + 15) > input_section->size)
3931 goto corrupt_input;
3932 memcpy (contents + roff - 3,
3933 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3934 15);
3935 }
3936
3937 relocation = (htab->elf.sgot->output_section->vma
3938 + htab->elf.sgot->output_offset + off
3939 - roff
3940 - largepic
3941 - input_section->output_section->vma
3942 - input_section->output_offset
3943 - 12);
3944 bfd_put_32 (output_bfd, relocation,
3945 contents + roff + 8 + largepic);
3946 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3947 rel++;
3948 wrel++;
3949 continue;
3950 }
3951 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3952 {
3953 /* GDesc -> IE transition.
3954 It's originally something like:
3955 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3956 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3957
3958 Change it to:
3959 # before xchg %ax,%ax in LP64 mode.
3960 movq x@gottpoff(%rip), %rax
3961 # before nopl (%rax) in X32 mode.
3962 rex movl x@gottpoff(%rip), %eax
3963 */
3964
3965 /* Now modify the instruction as appropriate. To
3966 turn a lea into a mov in the form we use it, it
3967 suffices to change the second byte from 0x8d to
3968 0x8b. */
3969 if (roff < 2)
3970 goto corrupt_input;
3971 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3972
3973 bfd_put_32 (output_bfd,
3974 htab->elf.sgot->output_section->vma
3975 + htab->elf.sgot->output_offset + off
3976 - rel->r_offset
3977 - input_section->output_section->vma
3978 - input_section->output_offset
3979 - 4,
3980 contents + roff);
3981 continue;
3982 }
3983 else if (r_type == R_X86_64_TLSDESC_CALL)
3984 {
3985 /* GDesc -> IE transition.
3986 It's originally:
3987 call *(%rax) <--- LP64 mode.
3988 call *(%eax) <--- X32 mode.
3989
3990 Change it to:
3991 xchg %ax, %ax <-- LP64 mode.
3992 nopl (%rax) <-- X32 mode.
3993 */
3994
3995 unsigned int prefix = 0;
3996 if (!ABI_64_P (input_bfd))
3997 {
3998 /* Check for call *x@tlsdesc(%eax). */
3999 if (contents[roff] == 0x67)
4000 prefix = 1;
4001 }
4002 if (prefix)
4003 {
4004 bfd_put_8 (output_bfd, 0x0f, contents + roff);
4005 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
4006 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
4007 }
4008 else
4009 {
4010 bfd_put_8 (output_bfd, 0x66, contents + roff);
4011 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4012 }
4013 continue;
4014 }
4015 else
4016 BFD_ASSERT (false);
4017 }
4018 break;
4019
4020 case R_X86_64_TLSLD:
4021 if (! elf_x86_64_tls_transition (info, input_bfd,
4022 input_section, contents,
4023 symtab_hdr, sym_hashes,
4024 &r_type, GOT_UNKNOWN, rel,
4025 relend, h, r_symndx, true))
4026 return false;
4027
4028 if (r_type != R_X86_64_TLSLD)
4029 {
4030 /* LD->LE transition:
4031 leaq foo@tlsld(%rip), %rdi
4032 call __tls_get_addr@PLT
4033 For 64bit, we change it into:
4034 .word 0x6666; .byte 0x66; movq %fs:0, %rax
4035 For 32bit, we change it into:
4036 nopl 0x0(%rax); movl %fs:0, %eax
4037 Or
4038 leaq foo@tlsld(%rip), %rdi;
4039 call *__tls_get_addr@GOTPCREL(%rip)
4040 which may be converted to
4041 addr32 call __tls_get_addr
4042 For 64bit, we change it into:
4043 .word 0x6666; .word 0x6666; movq %fs:0, %rax
4044 For 32bit, we change it into:
4045 nopw 0x0(%rax); movl %fs:0, %eax
4046 For largepic, change:
4047 leaq foo@tlsgd(%rip), %rdi
4048 movabsq $__tls_get_addr@pltoff, %rax
4049 addq %rbx, %rax
4050 call *%rax
4051 into
4052 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
4053 movq %fs:0, %eax */
4054
4055 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4056 if (ABI_64_P (output_bfd))
4057 {
4058 if ((rel->r_offset + 5) >= input_section->size)
4059 goto corrupt_input;
4060 if (contents[rel->r_offset + 5] == 0xb8)
4061 {
4062 if (rel->r_offset < 3
4063 || (rel->r_offset - 3 + 22) > input_section->size)
4064 goto corrupt_input;
4065 memcpy (contents + rel->r_offset - 3,
4066 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4067 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4068 }
4069 else if (contents[rel->r_offset + 4] == 0xff
4070 || contents[rel->r_offset + 4] == 0x67)
4071 {
4072 if (rel->r_offset < 3
4073 || (rel->r_offset - 3 + 13) > input_section->size)
4074 goto corrupt_input;
4075 memcpy (contents + rel->r_offset - 3,
4076 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
4077 13);
4078
4079 }
4080 else
4081 {
4082 if (rel->r_offset < 3
4083 || (rel->r_offset - 3 + 12) > input_section->size)
4084 goto corrupt_input;
4085 memcpy (contents + rel->r_offset - 3,
4086 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4087 }
4088 }
4089 else
4090 {
4091 if ((rel->r_offset + 4) >= input_section->size)
4092 goto corrupt_input;
4093 if (contents[rel->r_offset + 4] == 0xff)
4094 {
4095 if (rel->r_offset < 3
4096 || (rel->r_offset - 3 + 13) > input_section->size)
4097 goto corrupt_input;
4098 memcpy (contents + rel->r_offset - 3,
4099 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
4100 13);
4101 }
4102 else
4103 {
4104 if (rel->r_offset < 3
4105 || (rel->r_offset - 3 + 12) > input_section->size)
4106 goto corrupt_input;
4107 memcpy (contents + rel->r_offset - 3,
4108 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4109 }
4110 }
4111 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
4112 and R_X86_64_PLTOFF64. */
4113 rel++;
4114 wrel++;
4115 continue;
4116 }
4117
4118 if (htab->elf.sgot == NULL)
4119 abort ();
4120
4121 off = htab->tls_ld_or_ldm_got.offset;
4122 if (off & 1)
4123 off &= ~1;
4124 else
4125 {
4126 Elf_Internal_Rela outrel;
4127
4128 if (htab->elf.srelgot == NULL)
4129 abort ();
4130
4131 outrel.r_offset = (htab->elf.sgot->output_section->vma
4132 + htab->elf.sgot->output_offset + off);
4133
4134 bfd_put_64 (output_bfd, 0,
4135 htab->elf.sgot->contents + off);
4136 bfd_put_64 (output_bfd, 0,
4137 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4138 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4139 outrel.r_addend = 0;
4140 elf_append_rela (output_bfd, htab->elf.srelgot,
4141 &outrel);
4142 htab->tls_ld_or_ldm_got.offset |= 1;
4143 }
4144 relocation = htab->elf.sgot->output_section->vma
4145 + htab->elf.sgot->output_offset + off;
4146 unresolved_reloc = false;
4147 break;
4148
4149 case R_X86_64_DTPOFF32:
4150 if (!bfd_link_executable (info)
4151 || (input_section->flags & SEC_CODE) == 0)
4152 relocation -= _bfd_x86_elf_dtpoff_base (info);
4153 else
4154 relocation = elf_x86_64_tpoff (info, relocation);
4155 break;
4156
4157 case R_X86_64_TPOFF32:
4158 case R_X86_64_TPOFF64:
4159 BFD_ASSERT (bfd_link_executable (info));
4160 relocation = elf_x86_64_tpoff (info, relocation);
4161 break;
4162
4163 case R_X86_64_DTPOFF64:
4164 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4165 relocation -= _bfd_x86_elf_dtpoff_base (info);
4166 break;
4167
4168 default:
4169 break;
4170 }
4171
4172 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4173 because such sections are not SEC_ALLOC and thus ld.so will
4174 not process them. */
4175 if (unresolved_reloc
4176 && !((input_section->flags & SEC_DEBUGGING) != 0
4177 && h->def_dynamic)
4178 && _bfd_elf_section_offset (output_bfd, info, input_section,
4179 rel->r_offset) != (bfd_vma) -1)
4180 {
4181 switch (r_type)
4182 {
4183 case R_X86_64_32S:
4184 sec = h->root.u.def.section;
4185 if ((info->nocopyreloc || eh->def_protected)
4186 && !(h->root.u.def.section->flags & SEC_CODE))
4187 return elf_x86_64_need_pic (info, input_bfd, input_section,
4188 h, NULL, NULL, howto);
4189 /* Fall through. */
4190
4191 default:
4192 _bfd_error_handler
4193 /* xgettext:c-format */
4194 (_("%pB(%pA+%#" PRIx64 "): "
4195 "unresolvable %s relocation against symbol `%s'"),
4196 input_bfd,
4197 input_section,
4198 (uint64_t) rel->r_offset,
4199 howto->name,
4200 h->root.root.string);
4201 return false;
4202 }
4203 }
4204
4205 do_relocation:
4206 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4207 contents, rel->r_offset,
4208 relocation, rel->r_addend);
4209
4210 check_relocation_error:
4211 if (r != bfd_reloc_ok)
4212 {
4213 const char *name;
4214
4215 if (h != NULL)
4216 name = h->root.root.string;
4217 else
4218 {
4219 name = bfd_elf_string_from_elf_section (input_bfd,
4220 symtab_hdr->sh_link,
4221 sym->st_name);
4222 if (name == NULL)
4223 return false;
4224 if (*name == '\0')
4225 name = bfd_section_name (sec);
4226 }
4227
4228 if (r == bfd_reloc_overflow)
4229 {
4230 if (converted_reloc)
4231 {
4232 info->callbacks->einfo
4233 ("%X%H:", input_bfd, input_section, rel->r_offset);
4234 info->callbacks->einfo
4235 (_(" failed to convert GOTPCREL relocation against "
4236 "'%s'; relink with --no-relax\n"),
4237 name);
4238 status = false;
4239 continue;
4240 }
4241 (*info->callbacks->reloc_overflow)
4242 (info, (h ? &h->root : NULL), name, howto->name,
4243 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4244 }
4245 else
4246 {
4247 _bfd_error_handler
4248 /* xgettext:c-format */
4249 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4250 input_bfd, input_section,
4251 (uint64_t) rel->r_offset, name, (int) r);
4252 return false;
4253 }
4254 }
4255
4256 if (wrel != rel)
4257 *wrel = *rel;
4258 }
4259
4260 if (wrel != rel)
4261 {
4262 Elf_Internal_Shdr *rel_hdr;
4263 size_t deleted = rel - wrel;
4264
4265 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4266 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4267 if (rel_hdr->sh_size == 0)
4268 {
4269 /* It is too late to remove an empty reloc section. Leave
4270 one NONE reloc.
4271 ??? What is wrong with an empty section??? */
4272 rel_hdr->sh_size = rel_hdr->sh_entsize;
4273 deleted -= 1;
4274 }
4275 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4276 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4277 input_section->reloc_count -= deleted;
4278 }
4279
4280 return status;
4281 }
4282
4283 /* Finish up dynamic symbol handling. We set the contents of various
4284 dynamic sections here. */
4285
4286 static bool
4287 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4288 struct bfd_link_info *info,
4289 struct elf_link_hash_entry *h,
4290 Elf_Internal_Sym *sym)
4291 {
4292 struct elf_x86_link_hash_table *htab;
4293 bool use_plt_second;
4294 struct elf_x86_link_hash_entry *eh;
4295 bool local_undefweak;
4296
4297 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4298 if (htab == NULL)
4299 return false;
4300
4301 /* Use the second PLT section only if there is .plt section. */
4302 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4303
4304 eh = (struct elf_x86_link_hash_entry *) h;
4305 if (eh->no_finish_dynamic_symbol)
4306 abort ();
4307
4308 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4309 resolved undefined weak symbols in executable so that their
4310 references have value 0 at run-time. */
4311 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4312
4313 if (h->plt.offset != (bfd_vma) -1)
4314 {
4315 bfd_vma plt_index;
4316 bfd_vma got_offset, plt_offset;
4317 Elf_Internal_Rela rela;
4318 bfd_byte *loc;
4319 asection *plt, *gotplt, *relplt, *resolved_plt;
4320 const struct elf_backend_data *bed;
4321 bfd_vma plt_got_pcrel_offset;
4322
4323 /* When building a static executable, use .iplt, .igot.plt and
4324 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4325 if (htab->elf.splt != NULL)
4326 {
4327 plt = htab->elf.splt;
4328 gotplt = htab->elf.sgotplt;
4329 relplt = htab->elf.srelplt;
4330 }
4331 else
4332 {
4333 plt = htab->elf.iplt;
4334 gotplt = htab->elf.igotplt;
4335 relplt = htab->elf.irelplt;
4336 }
4337
4338 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4339
4340 /* Get the index in the procedure linkage table which
4341 corresponds to this symbol. This is the index of this symbol
4342 in all the symbols for which we are making plt entries. The
4343 first entry in the procedure linkage table is reserved.
4344
4345 Get the offset into the .got table of the entry that
4346 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4347 bytes. The first three are reserved for the dynamic linker.
4348
4349 For static executables, we don't reserve anything. */
4350
4351 if (plt == htab->elf.splt)
4352 {
4353 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4354 - htab->plt.has_plt0);
4355 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4356 }
4357 else
4358 {
4359 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4360 got_offset = got_offset * GOT_ENTRY_SIZE;
4361 }
4362
4363 /* Fill in the entry in the procedure linkage table. */
4364 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4365 htab->plt.plt_entry_size);
4366 if (use_plt_second)
4367 {
4368 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4369 htab->non_lazy_plt->plt_entry,
4370 htab->non_lazy_plt->plt_entry_size);
4371
4372 resolved_plt = htab->plt_second;
4373 plt_offset = eh->plt_second.offset;
4374 }
4375 else
4376 {
4377 resolved_plt = plt;
4378 plt_offset = h->plt.offset;
4379 }
4380
4381 /* Insert the relocation positions of the plt section. */
4382
4383 /* Put offset the PC-relative instruction referring to the GOT entry,
4384 subtracting the size of that instruction. */
4385 plt_got_pcrel_offset = (gotplt->output_section->vma
4386 + gotplt->output_offset
4387 + got_offset
4388 - resolved_plt->output_section->vma
4389 - resolved_plt->output_offset
4390 - plt_offset
4391 - htab->plt.plt_got_insn_size);
4392
4393 /* Check PC-relative offset overflow in PLT entry. */
4394 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4395 /* xgettext:c-format */
4396 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4397 output_bfd, h->root.root.string);
4398
4399 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4400 (resolved_plt->contents + plt_offset
4401 + htab->plt.plt_got_offset));
4402
4403 /* Fill in the entry in the global offset table, initially this
4404 points to the second part of the PLT entry. Leave the entry
4405 as zero for undefined weak symbol in PIE. No PLT relocation
4406 against undefined weak symbol in PIE. */
4407 if (!local_undefweak)
4408 {
4409 if (htab->plt.has_plt0)
4410 bfd_put_64 (output_bfd, (plt->output_section->vma
4411 + plt->output_offset
4412 + h->plt.offset
4413 + htab->lazy_plt->plt_lazy_offset),
4414 gotplt->contents + got_offset);
4415
4416 /* Fill in the entry in the .rela.plt section. */
4417 rela.r_offset = (gotplt->output_section->vma
4418 + gotplt->output_offset
4419 + got_offset);
4420 if (PLT_LOCAL_IFUNC_P (info, h))
4421 {
4422 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4423 h->root.root.string,
4424 h->root.u.def.section->owner);
4425
4426 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4427 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4428 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4429 rela.r_addend = (h->root.u.def.value
4430 + h->root.u.def.section->output_section->vma
4431 + h->root.u.def.section->output_offset);
4432
4433 if (htab->params->report_relative_reloc)
4434 _bfd_x86_elf_link_report_relative_reloc
4435 (info, relplt, h, sym, "R_X86_64_IRELATIVE", &rela);
4436
4437 /* R_X86_64_IRELATIVE comes last. */
4438 plt_index = htab->next_irelative_index--;
4439 }
4440 else
4441 {
4442 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4443 rela.r_addend = 0;
4444 plt_index = htab->next_jump_slot_index++;
4445 }
4446
4447 /* Don't fill the second and third slots in PLT entry for
4448 static executables nor without PLT0. */
4449 if (plt == htab->elf.splt && htab->plt.has_plt0)
4450 {
4451 bfd_vma plt0_offset
4452 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4453
4454 /* Put relocation index. */
4455 bfd_put_32 (output_bfd, plt_index,
4456 (plt->contents + h->plt.offset
4457 + htab->lazy_plt->plt_reloc_offset));
4458
4459 /* Put offset for jmp .PLT0 and check for overflow. We don't
4460 check relocation index for overflow since branch displacement
4461 will overflow first. */
4462 if (plt0_offset > 0x80000000)
4463 /* xgettext:c-format */
4464 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4465 output_bfd, h->root.root.string);
4466 bfd_put_32 (output_bfd, - plt0_offset,
4467 (plt->contents + h->plt.offset
4468 + htab->lazy_plt->plt_plt_offset));
4469 }
4470
4471 bed = get_elf_backend_data (output_bfd);
4472 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4473 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4474 }
4475 }
4476 else if (eh->plt_got.offset != (bfd_vma) -1)
4477 {
4478 bfd_vma got_offset, plt_offset;
4479 asection *plt, *got;
4480 bool got_after_plt;
4481 int32_t got_pcrel_offset;
4482
4483 /* Set the entry in the GOT procedure linkage table. */
4484 plt = htab->plt_got;
4485 got = htab->elf.sgot;
4486 got_offset = h->got.offset;
4487
4488 if (got_offset == (bfd_vma) -1
4489 || (h->type == STT_GNU_IFUNC && h->def_regular)
4490 || plt == NULL
4491 || got == NULL)
4492 abort ();
4493
4494 /* Use the non-lazy PLT entry template for the GOT PLT since they
4495 are the identical. */
4496 /* Fill in the entry in the GOT procedure linkage table. */
4497 plt_offset = eh->plt_got.offset;
4498 memcpy (plt->contents + plt_offset,
4499 htab->non_lazy_plt->plt_entry,
4500 htab->non_lazy_plt->plt_entry_size);
4501
4502 /* Put offset the PC-relative instruction referring to the GOT
4503 entry, subtracting the size of that instruction. */
4504 got_pcrel_offset = (got->output_section->vma
4505 + got->output_offset
4506 + got_offset
4507 - plt->output_section->vma
4508 - plt->output_offset
4509 - plt_offset
4510 - htab->non_lazy_plt->plt_got_insn_size);
4511
4512 /* Check PC-relative offset overflow in GOT PLT entry. */
4513 got_after_plt = got->output_section->vma > plt->output_section->vma;
4514 if ((got_after_plt && got_pcrel_offset < 0)
4515 || (!got_after_plt && got_pcrel_offset > 0))
4516 /* xgettext:c-format */
4517 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4518 output_bfd, h->root.root.string);
4519
4520 bfd_put_32 (output_bfd, got_pcrel_offset,
4521 (plt->contents + plt_offset
4522 + htab->non_lazy_plt->plt_got_offset));
4523 }
4524
4525 if (!local_undefweak
4526 && !h->def_regular
4527 && (h->plt.offset != (bfd_vma) -1
4528 || eh->plt_got.offset != (bfd_vma) -1))
4529 {
4530 /* Mark the symbol as undefined, rather than as defined in
4531 the .plt section. Leave the value if there were any
4532 relocations where pointer equality matters (this is a clue
4533 for the dynamic linker, to make function pointer
4534 comparisons work between an application and shared
4535 library), otherwise set it to zero. If a function is only
4536 called from a binary, there is no need to slow down
4537 shared libraries because of that. */
4538 sym->st_shndx = SHN_UNDEF;
4539 if (!h->pointer_equality_needed)
4540 sym->st_value = 0;
4541 }
4542
4543 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4544
4545 /* Don't generate dynamic GOT relocation against undefined weak
4546 symbol in executable. */
4547 if (h->got.offset != (bfd_vma) -1
4548 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4549 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4550 && !local_undefweak)
4551 {
4552 Elf_Internal_Rela rela;
4553 asection *relgot = htab->elf.srelgot;
4554 const char *relative_reloc_name = NULL;
4555 bool generate_dynamic_reloc = true;
4556
4557 /* This symbol has an entry in the global offset table. Set it
4558 up. */
4559 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4560 abort ();
4561
4562 rela.r_offset = (htab->elf.sgot->output_section->vma
4563 + htab->elf.sgot->output_offset
4564 + (h->got.offset &~ (bfd_vma) 1));
4565
4566 /* If this is a static link, or it is a -Bsymbolic link and the
4567 symbol is defined locally or was forced to be local because
4568 of a version file, we just want to emit a RELATIVE reloc.
4569 The entry in the global offset table will already have been
4570 initialized in the relocate_section function. */
4571 if (h->def_regular
4572 && h->type == STT_GNU_IFUNC)
4573 {
4574 if (h->plt.offset == (bfd_vma) -1)
4575 {
4576 /* STT_GNU_IFUNC is referenced without PLT. */
4577 if (htab->elf.splt == NULL)
4578 {
4579 /* use .rel[a].iplt section to store .got relocations
4580 in static executable. */
4581 relgot = htab->elf.irelplt;
4582 }
4583 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4584 {
4585 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4586 h->root.root.string,
4587 h->root.u.def.section->owner);
4588
4589 rela.r_info = htab->r_info (0,
4590 R_X86_64_IRELATIVE);
4591 rela.r_addend = (h->root.u.def.value
4592 + h->root.u.def.section->output_section->vma
4593 + h->root.u.def.section->output_offset);
4594 relative_reloc_name = "R_X86_64_IRELATIVE";
4595 }
4596 else
4597 goto do_glob_dat;
4598 }
4599 else if (bfd_link_pic (info))
4600 {
4601 /* Generate R_X86_64_GLOB_DAT. */
4602 goto do_glob_dat;
4603 }
4604 else
4605 {
4606 asection *plt;
4607 bfd_vma plt_offset;
4608
4609 if (!h->pointer_equality_needed)
4610 abort ();
4611
4612 /* For non-shared object, we can't use .got.plt, which
4613 contains the real function addres if we need pointer
4614 equality. We load the GOT entry with the PLT entry. */
4615 if (htab->plt_second != NULL)
4616 {
4617 plt = htab->plt_second;
4618 plt_offset = eh->plt_second.offset;
4619 }
4620 else
4621 {
4622 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4623 plt_offset = h->plt.offset;
4624 }
4625 bfd_put_64 (output_bfd, (plt->output_section->vma
4626 + plt->output_offset
4627 + plt_offset),
4628 htab->elf.sgot->contents + h->got.offset);
4629 return true;
4630 }
4631 }
4632 else if (bfd_link_pic (info)
4633 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4634 {
4635 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4636 return false;
4637 BFD_ASSERT((h->got.offset & 1) != 0);
4638 if (info->enable_dt_relr)
4639 generate_dynamic_reloc = false;
4640 else
4641 {
4642 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4643 rela.r_addend = (h->root.u.def.value
4644 + h->root.u.def.section->output_section->vma
4645 + h->root.u.def.section->output_offset);
4646 relative_reloc_name = "R_X86_64_RELATIVE";
4647 }
4648 }
4649 else
4650 {
4651 BFD_ASSERT((h->got.offset & 1) == 0);
4652 do_glob_dat:
4653 bfd_put_64 (output_bfd, (bfd_vma) 0,
4654 htab->elf.sgot->contents + h->got.offset);
4655 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4656 rela.r_addend = 0;
4657 }
4658
4659 if (generate_dynamic_reloc)
4660 {
4661 if (relative_reloc_name != NULL
4662 && htab->params->report_relative_reloc)
4663 _bfd_x86_elf_link_report_relative_reloc
4664 (info, relgot, h, sym, relative_reloc_name, &rela);
4665
4666 elf_append_rela (output_bfd, relgot, &rela);
4667 }
4668 }
4669
4670 if (h->needs_copy)
4671 {
4672 Elf_Internal_Rela rela;
4673 asection *s;
4674
4675 /* This symbol needs a copy reloc. Set it up. */
4676 VERIFY_COPY_RELOC (h, htab)
4677
4678 rela.r_offset = (h->root.u.def.value
4679 + h->root.u.def.section->output_section->vma
4680 + h->root.u.def.section->output_offset);
4681 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4682 rela.r_addend = 0;
4683 if (h->root.u.def.section == htab->elf.sdynrelro)
4684 s = htab->elf.sreldynrelro;
4685 else
4686 s = htab->elf.srelbss;
4687 elf_append_rela (output_bfd, s, &rela);
4688 }
4689
4690 return true;
4691 }
4692
4693 /* Finish up local dynamic symbol handling. We set the contents of
4694 various dynamic sections here. */
4695
4696 static int
4697 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4698 {
4699 struct elf_link_hash_entry *h
4700 = (struct elf_link_hash_entry *) *slot;
4701 struct bfd_link_info *info
4702 = (struct bfd_link_info *) inf;
4703
4704 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4705 info, h, NULL);
4706 }
4707
4708 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4709 here since undefined weak symbol may not be dynamic and may not be
4710 called for elf_x86_64_finish_dynamic_symbol. */
4711
4712 static bool
4713 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4714 void *inf)
4715 {
4716 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4717 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4718
4719 if (h->root.type != bfd_link_hash_undefweak
4720 || h->dynindx != -1)
4721 return true;
4722
4723 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4724 info, h, NULL);
4725 }
4726
4727 /* Used to decide how to sort relocs in an optimal manner for the
4728 dynamic linker, before writing them out. */
4729
4730 static enum elf_reloc_type_class
4731 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4732 const asection *rel_sec ATTRIBUTE_UNUSED,
4733 const Elf_Internal_Rela *rela)
4734 {
4735 bfd *abfd = info->output_bfd;
4736 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4737 struct elf_x86_link_hash_table *htab
4738 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4739
4740 if (htab->elf.dynsym != NULL
4741 && htab->elf.dynsym->contents != NULL)
4742 {
4743 /* Check relocation against STT_GNU_IFUNC symbol if there are
4744 dynamic symbols. */
4745 unsigned long r_symndx = htab->r_sym (rela->r_info);
4746 if (r_symndx != STN_UNDEF)
4747 {
4748 Elf_Internal_Sym sym;
4749 if (!bed->s->swap_symbol_in (abfd,
4750 (htab->elf.dynsym->contents
4751 + r_symndx * bed->s->sizeof_sym),
4752 0, &sym))
4753 abort ();
4754
4755 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4756 return reloc_class_ifunc;
4757 }
4758 }
4759
4760 switch ((int) ELF32_R_TYPE (rela->r_info))
4761 {
4762 case R_X86_64_IRELATIVE:
4763 return reloc_class_ifunc;
4764 case R_X86_64_RELATIVE:
4765 case R_X86_64_RELATIVE64:
4766 return reloc_class_relative;
4767 case R_X86_64_JUMP_SLOT:
4768 return reloc_class_plt;
4769 case R_X86_64_COPY:
4770 return reloc_class_copy;
4771 default:
4772 return reloc_class_normal;
4773 }
4774 }
4775
4776 /* Finish up the dynamic sections. */
4777
4778 static bool
4779 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4780 struct bfd_link_info *info)
4781 {
4782 struct elf_x86_link_hash_table *htab;
4783
4784 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4785 if (htab == NULL)
4786 return false;
4787
4788 if (! htab->elf.dynamic_sections_created)
4789 return true;
4790
4791 if (htab->elf.splt && htab->elf.splt->size > 0)
4792 {
4793 if (bfd_is_abs_section (htab->elf.splt->output_section))
4794 {
4795 info->callbacks->einfo
4796 (_("%F%P: discarded output section: `%pA'\n"),
4797 htab->elf.splt);
4798 return false;
4799 }
4800
4801 elf_section_data (htab->elf.splt->output_section)
4802 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4803
4804 if (htab->plt.has_plt0)
4805 {
4806 /* Fill in the special first entry in the procedure linkage
4807 table. */
4808 memcpy (htab->elf.splt->contents,
4809 htab->lazy_plt->plt0_entry,
4810 htab->lazy_plt->plt0_entry_size);
4811 /* Add offset for pushq GOT+8(%rip), since the instruction
4812 uses 6 bytes subtract this value. */
4813 bfd_put_32 (output_bfd,
4814 (htab->elf.sgotplt->output_section->vma
4815 + htab->elf.sgotplt->output_offset
4816 + 8
4817 - htab->elf.splt->output_section->vma
4818 - htab->elf.splt->output_offset
4819 - 6),
4820 (htab->elf.splt->contents
4821 + htab->lazy_plt->plt0_got1_offset));
4822 /* Add offset for the PC-relative instruction accessing
4823 GOT+16, subtracting the offset to the end of that
4824 instruction. */
4825 bfd_put_32 (output_bfd,
4826 (htab->elf.sgotplt->output_section->vma
4827 + htab->elf.sgotplt->output_offset
4828 + 16
4829 - htab->elf.splt->output_section->vma
4830 - htab->elf.splt->output_offset
4831 - htab->lazy_plt->plt0_got2_insn_end),
4832 (htab->elf.splt->contents
4833 + htab->lazy_plt->plt0_got2_offset));
4834 }
4835
4836 if (htab->elf.tlsdesc_plt)
4837 {
4838 bfd_put_64 (output_bfd, (bfd_vma) 0,
4839 htab->elf.sgot->contents + htab->elf.tlsdesc_got);
4840
4841 memcpy (htab->elf.splt->contents + htab->elf.tlsdesc_plt,
4842 htab->lazy_plt->plt_tlsdesc_entry,
4843 htab->lazy_plt->plt_tlsdesc_entry_size);
4844
4845 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4846 bytes and the instruction uses 6 bytes, subtract these
4847 values. */
4848 bfd_put_32 (output_bfd,
4849 (htab->elf.sgotplt->output_section->vma
4850 + htab->elf.sgotplt->output_offset
4851 + 8
4852 - htab->elf.splt->output_section->vma
4853 - htab->elf.splt->output_offset
4854 - htab->elf.tlsdesc_plt
4855 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4856 (htab->elf.splt->contents
4857 + htab->elf.tlsdesc_plt
4858 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4859 /* Add offset for indirect branch via GOT+TDG, where TDG
4860 stands for htab->tlsdesc_got, subtracting the offset
4861 to the end of that instruction. */
4862 bfd_put_32 (output_bfd,
4863 (htab->elf.sgot->output_section->vma
4864 + htab->elf.sgot->output_offset
4865 + htab->elf.tlsdesc_got
4866 - htab->elf.splt->output_section->vma
4867 - htab->elf.splt->output_offset
4868 - htab->elf.tlsdesc_plt
4869 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4870 (htab->elf.splt->contents
4871 + htab->elf.tlsdesc_plt
4872 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4873 }
4874 }
4875
4876 /* Fill PLT entries for undefined weak symbols in PIE. */
4877 if (bfd_link_pie (info))
4878 bfd_hash_traverse (&info->hash->table,
4879 elf_x86_64_pie_finish_undefweak_symbol,
4880 info);
4881
4882 return true;
4883 }
4884
4885 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4886 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4887 It has to be done before elf_link_sort_relocs is called so that
4888 dynamic relocations are properly sorted. */
4889
4890 static bool
4891 elf_x86_64_output_arch_local_syms
4892 (bfd *output_bfd ATTRIBUTE_UNUSED,
4893 struct bfd_link_info *info,
4894 void *flaginfo ATTRIBUTE_UNUSED,
4895 int (*func) (void *, const char *,
4896 Elf_Internal_Sym *,
4897 asection *,
4898 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4899 {
4900 struct elf_x86_link_hash_table *htab
4901 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4902 if (htab == NULL)
4903 return false;
4904
4905 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4906 htab_traverse (htab->loc_hash_table,
4907 elf_x86_64_finish_local_dynamic_symbol,
4908 info);
4909
4910 return true;
4911 }
4912
4913 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4914 dynamic relocations. */
4915
4916 static long
4917 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4918 long symcount ATTRIBUTE_UNUSED,
4919 asymbol **syms ATTRIBUTE_UNUSED,
4920 long dynsymcount,
4921 asymbol **dynsyms,
4922 asymbol **ret)
4923 {
4924 long count, i, n;
4925 int j;
4926 bfd_byte *plt_contents;
4927 long relsize;
4928 const struct elf_x86_lazy_plt_layout *lazy_plt;
4929 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4930 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4931 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4932 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4933 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4934 const struct elf_x86_lazy_plt_layout *x32_lazy_ibt_plt;
4935 const struct elf_x86_non_lazy_plt_layout *x32_non_lazy_ibt_plt;
4936 asection *plt;
4937 enum elf_x86_plt_type plt_type;
4938 struct elf_x86_plt plts[] =
4939 {
4940 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4941 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4942 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4943 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4944 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4945 };
4946
4947 *ret = NULL;
4948
4949 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4950 return 0;
4951
4952 if (dynsymcount <= 0)
4953 return 0;
4954
4955 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4956 if (relsize <= 0)
4957 return -1;
4958
4959 lazy_plt = &elf_x86_64_lazy_plt;
4960 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4961 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4962 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4963 if (ABI_64_P (abfd))
4964 {
4965 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4966 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4967 x32_lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4968 x32_non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4969 }
4970 else
4971 {
4972 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4973 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4974 x32_lazy_ibt_plt = NULL;
4975 x32_non_lazy_ibt_plt = NULL;
4976 }
4977
4978 count = 0;
4979 for (j = 0; plts[j].name != NULL; j++)
4980 {
4981 plt = bfd_get_section_by_name (abfd, plts[j].name);
4982 if (plt == NULL
4983 || plt->size == 0
4984 || (plt->flags & SEC_HAS_CONTENTS) == 0)
4985 continue;
4986
4987 /* Get the PLT section contents. */
4988 if (!bfd_malloc_and_get_section (abfd, plt, &plt_contents))
4989 break;
4990
4991 /* Check what kind of PLT it is. */
4992 plt_type = plt_unknown;
4993 if (plts[j].type == plt_unknown
4994 && (plt->size >= (lazy_plt->plt_entry_size
4995 + lazy_plt->plt_entry_size)))
4996 {
4997 /* Match lazy PLT first. Need to check the first two
4998 instructions. */
4999 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
5000 lazy_plt->plt0_got1_offset) == 0)
5001 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
5002 2) == 0))
5003 {
5004 if (x32_lazy_ibt_plt != NULL
5005 && (memcmp (plt_contents
5006 + x32_lazy_ibt_plt->plt_entry_size,
5007 x32_lazy_ibt_plt->plt_entry,
5008 x32_lazy_ibt_plt->plt_got_offset) == 0))
5009 {
5010 /* The fist entry in the x32 lazy IBT PLT is the same
5011 as the lazy PLT. */
5012 plt_type = plt_lazy | plt_second;
5013 lazy_plt = x32_lazy_ibt_plt;
5014 }
5015 else
5016 plt_type = plt_lazy;
5017 }
5018 else if (lazy_bnd_plt != NULL
5019 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
5020 lazy_bnd_plt->plt0_got1_offset) == 0)
5021 && (memcmp (plt_contents + 6,
5022 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
5023 {
5024 plt_type = plt_lazy | plt_second;
5025 /* The fist entry in the lazy IBT PLT is the same as the
5026 lazy BND PLT. */
5027 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
5028 lazy_ibt_plt->plt_entry,
5029 lazy_ibt_plt->plt_got_offset) == 0))
5030 lazy_plt = lazy_ibt_plt;
5031 else
5032 lazy_plt = lazy_bnd_plt;
5033 }
5034 }
5035
5036 if (non_lazy_plt != NULL
5037 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
5038 && plt->size >= non_lazy_plt->plt_entry_size)
5039 {
5040 /* Match non-lazy PLT. */
5041 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
5042 non_lazy_plt->plt_got_offset) == 0)
5043 plt_type = plt_non_lazy;
5044 }
5045
5046 if (plt_type == plt_unknown || plt_type == plt_second)
5047 {
5048 if (non_lazy_bnd_plt != NULL
5049 && plt->size >= non_lazy_bnd_plt->plt_entry_size
5050 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
5051 non_lazy_bnd_plt->plt_got_offset) == 0))
5052 {
5053 /* Match BND PLT. */
5054 plt_type = plt_second;
5055 non_lazy_plt = non_lazy_bnd_plt;
5056 }
5057 else if (non_lazy_ibt_plt != NULL
5058 && plt->size >= non_lazy_ibt_plt->plt_entry_size
5059 && (memcmp (plt_contents,
5060 non_lazy_ibt_plt->plt_entry,
5061 non_lazy_ibt_plt->plt_got_offset) == 0))
5062 {
5063 /* Match IBT PLT. */
5064 plt_type = plt_second;
5065 non_lazy_plt = non_lazy_ibt_plt;
5066 }
5067 else if (x32_non_lazy_ibt_plt != NULL
5068 && plt->size >= x32_non_lazy_ibt_plt->plt_entry_size
5069 && (memcmp (plt_contents,
5070 x32_non_lazy_ibt_plt->plt_entry,
5071 x32_non_lazy_ibt_plt->plt_got_offset) == 0))
5072 {
5073 /* Match x32 IBT PLT. */
5074 plt_type = plt_second;
5075 non_lazy_plt = x32_non_lazy_ibt_plt;
5076 }
5077 }
5078
5079 if (plt_type == plt_unknown)
5080 {
5081 free (plt_contents);
5082 continue;
5083 }
5084
5085 plts[j].sec = plt;
5086 plts[j].type = plt_type;
5087
5088 if ((plt_type & plt_lazy))
5089 {
5090 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
5091 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
5092 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
5093 /* Skip PLT0 in lazy PLT. */
5094 i = 1;
5095 }
5096 else
5097 {
5098 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
5099 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
5100 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
5101 i = 0;
5102 }
5103
5104 /* Skip lazy PLT when the second PLT is used. */
5105 if (plt_type == (plt_lazy | plt_second))
5106 plts[j].count = 0;
5107 else
5108 {
5109 n = plt->size / plts[j].plt_entry_size;
5110 plts[j].count = n;
5111 count += n - i;
5112 }
5113
5114 plts[j].contents = plt_contents;
5115 }
5116
5117 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
5118 (bfd_vma) 0, plts, dynsyms,
5119 ret);
5120 }
5121
5122 /* Handle an x86-64 specific section when reading an object file. This
5123 is called when elfcode.h finds a section with an unknown type. */
5124
5125 static bool
5126 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5127 const char *name, int shindex)
5128 {
5129 if (hdr->sh_type != SHT_X86_64_UNWIND)
5130 return false;
5131
5132 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5133 return false;
5134
5135 return true;
5136 }
5137
5138 /* Hook called by the linker routine which adds symbols from an object
5139 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5140 of .bss. */
5141
5142 static bool
5143 elf_x86_64_add_symbol_hook (bfd *abfd,
5144 struct bfd_link_info *info ATTRIBUTE_UNUSED,
5145 Elf_Internal_Sym *sym,
5146 const char **namep ATTRIBUTE_UNUSED,
5147 flagword *flagsp ATTRIBUTE_UNUSED,
5148 asection **secp,
5149 bfd_vma *valp)
5150 {
5151 asection *lcomm;
5152
5153 switch (sym->st_shndx)
5154 {
5155 case SHN_X86_64_LCOMMON:
5156 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5157 if (lcomm == NULL)
5158 {
5159 lcomm = bfd_make_section_with_flags (abfd,
5160 "LARGE_COMMON",
5161 (SEC_ALLOC
5162 | SEC_IS_COMMON
5163 | SEC_LINKER_CREATED));
5164 if (lcomm == NULL)
5165 return false;
5166 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5167 }
5168 *secp = lcomm;
5169 *valp = sym->st_size;
5170 return true;
5171 }
5172
5173 return true;
5174 }
5175
5176
5177 /* Given a BFD section, try to locate the corresponding ELF section
5178 index. */
5179
5180 static bool
5181 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5182 asection *sec, int *index_return)
5183 {
5184 if (sec == &_bfd_elf_large_com_section)
5185 {
5186 *index_return = SHN_X86_64_LCOMMON;
5187 return true;
5188 }
5189 return false;
5190 }
5191
5192 /* Process a symbol. */
5193
5194 static void
5195 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5196 asymbol *asym)
5197 {
5198 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5199
5200 switch (elfsym->internal_elf_sym.st_shndx)
5201 {
5202 case SHN_X86_64_LCOMMON:
5203 asym->section = &_bfd_elf_large_com_section;
5204 asym->value = elfsym->internal_elf_sym.st_size;
5205 /* Common symbol doesn't set BSF_GLOBAL. */
5206 asym->flags &= ~BSF_GLOBAL;
5207 break;
5208 }
5209 }
5210
5211 static bool
5212 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5213 {
5214 return (sym->st_shndx == SHN_COMMON
5215 || sym->st_shndx == SHN_X86_64_LCOMMON);
5216 }
5217
5218 static unsigned int
5219 elf_x86_64_common_section_index (asection *sec)
5220 {
5221 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5222 return SHN_COMMON;
5223 else
5224 return SHN_X86_64_LCOMMON;
5225 }
5226
5227 static asection *
5228 elf_x86_64_common_section (asection *sec)
5229 {
5230 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5231 return bfd_com_section_ptr;
5232 else
5233 return &_bfd_elf_large_com_section;
5234 }
5235
5236 static bool
5237 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5238 const Elf_Internal_Sym *sym,
5239 asection **psec,
5240 bool newdef,
5241 bool olddef,
5242 bfd *oldbfd,
5243 const asection *oldsec)
5244 {
5245 /* A normal common symbol and a large common symbol result in a
5246 normal common symbol. We turn the large common symbol into a
5247 normal one. */
5248 if (!olddef
5249 && h->root.type == bfd_link_hash_common
5250 && !newdef
5251 && bfd_is_com_section (*psec)
5252 && oldsec != *psec)
5253 {
5254 if (sym->st_shndx == SHN_COMMON
5255 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5256 {
5257 h->root.u.c.p->section
5258 = bfd_make_section_old_way (oldbfd, "COMMON");
5259 h->root.u.c.p->section->flags = SEC_ALLOC;
5260 }
5261 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5262 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5263 *psec = bfd_com_section_ptr;
5264 }
5265
5266 return true;
5267 }
5268
5269 static int
5270 elf_x86_64_additional_program_headers (bfd *abfd,
5271 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5272 {
5273 asection *s;
5274 int count = 0;
5275
5276 /* Check to see if we need a large readonly segment. */
5277 s = bfd_get_section_by_name (abfd, ".lrodata");
5278 if (s && (s->flags & SEC_LOAD))
5279 count++;
5280
5281 /* Check to see if we need a large data segment. Since .lbss sections
5282 is placed right after the .bss section, there should be no need for
5283 a large data segment just because of .lbss. */
5284 s = bfd_get_section_by_name (abfd, ".ldata");
5285 if (s && (s->flags & SEC_LOAD))
5286 count++;
5287
5288 return count;
5289 }
5290
5291 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5292
5293 static bool
5294 elf_x86_64_relocs_compatible (const bfd_target *input,
5295 const bfd_target *output)
5296 {
5297 return ((xvec_get_elf_backend_data (input)->s->elfclass
5298 == xvec_get_elf_backend_data (output)->s->elfclass)
5299 && _bfd_elf_relocs_compatible (input, output));
5300 }
5301
5302 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5303 with GNU properties if found. Otherwise, return NULL. */
5304
5305 static bfd *
5306 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5307 {
5308 struct elf_x86_init_table init_table;
5309 const struct elf_backend_data *bed;
5310 struct elf_x86_link_hash_table *htab;
5311
5312 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5313 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5314 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5315 != (int) R_X86_64_GNU_VTINHERIT)
5316 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5317 != (int) R_X86_64_GNU_VTENTRY))
5318 abort ();
5319
5320 /* This is unused for x86-64. */
5321 init_table.plt0_pad_byte = 0x90;
5322
5323 bed = get_elf_backend_data (info->output_bfd);
5324 htab = elf_x86_hash_table (info, bed->target_id);
5325 if (!htab)
5326 abort ();
5327
5328 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5329 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5330
5331 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5332 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5333
5334 if (ABI_64_P (info->output_bfd))
5335 {
5336 init_table.sframe_lazy_plt = &elf_x86_64_sframe_plt;
5337 init_table.sframe_non_lazy_plt = &elf_x86_64_sframe_non_lazy_plt;
5338 init_table.sframe_lazy_ibt_plt = &elf_x86_64_sframe_plt;
5339 init_table.sframe_non_lazy_ibt_plt = &elf_x86_64_sframe_non_lazy_plt;
5340 }
5341 else
5342 {
5343 /* SFrame is not supported for non AMD64. */
5344 init_table.sframe_lazy_plt = NULL;
5345 init_table.sframe_non_lazy_plt = NULL;
5346 }
5347
5348 if (ABI_64_P (info->output_bfd))
5349 {
5350 init_table.r_info = elf64_r_info;
5351 init_table.r_sym = elf64_r_sym;
5352 }
5353 else
5354 {
5355 init_table.r_info = elf32_r_info;
5356 init_table.r_sym = elf32_r_sym;
5357 }
5358
5359 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5360 }
5361
5362 static const struct bfd_elf_special_section
5363 elf_x86_64_special_sections[]=
5364 {
5365 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5366 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5367 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5368 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5369 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5370 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5371 { NULL, 0, 0, 0, 0 }
5372 };
5373
5374 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5375 #define TARGET_LITTLE_NAME "elf64-x86-64"
5376 #define ELF_ARCH bfd_arch_i386
5377 #define ELF_TARGET_ID X86_64_ELF_DATA
5378 #define ELF_MACHINE_CODE EM_X86_64
5379 #define ELF_MAXPAGESIZE 0x1000
5380 #define ELF_COMMONPAGESIZE 0x1000
5381
5382 #define elf_backend_can_gc_sections 1
5383 #define elf_backend_can_refcount 1
5384 #define elf_backend_want_got_plt 1
5385 #define elf_backend_plt_readonly 1
5386 #define elf_backend_want_plt_sym 0
5387 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5388 #define elf_backend_rela_normal 1
5389 #define elf_backend_plt_alignment 4
5390 #define elf_backend_caches_rawsize 1
5391 #define elf_backend_dtrel_excludes_plt 1
5392 #define elf_backend_want_dynrelro 1
5393
5394 #define elf_info_to_howto elf_x86_64_info_to_howto
5395
5396 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5397 #define bfd_elf64_bfd_reloc_name_lookup \
5398 elf_x86_64_reloc_name_lookup
5399
5400 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5401 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5402 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5403 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5404 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5405 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5406 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5407 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5408 #ifdef CORE_HEADER
5409 #define elf_backend_write_core_note elf_x86_64_write_core_note
5410 #endif
5411 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5412 #define elf_backend_relocate_section elf_x86_64_relocate_section
5413 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5414 #define elf_backend_object_p elf64_x86_64_elf_object_p
5415 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5416
5417 #define elf_backend_section_from_shdr \
5418 elf_x86_64_section_from_shdr
5419
5420 #define elf_backend_section_from_bfd_section \
5421 elf_x86_64_elf_section_from_bfd_section
5422 #define elf_backend_add_symbol_hook \
5423 elf_x86_64_add_symbol_hook
5424 #define elf_backend_symbol_processing \
5425 elf_x86_64_symbol_processing
5426 #define elf_backend_common_section_index \
5427 elf_x86_64_common_section_index
5428 #define elf_backend_common_section \
5429 elf_x86_64_common_section
5430 #define elf_backend_common_definition \
5431 elf_x86_64_common_definition
5432 #define elf_backend_merge_symbol \
5433 elf_x86_64_merge_symbol
5434 #define elf_backend_special_sections \
5435 elf_x86_64_special_sections
5436 #define elf_backend_additional_program_headers \
5437 elf_x86_64_additional_program_headers
5438 #define elf_backend_setup_gnu_properties \
5439 elf_x86_64_link_setup_gnu_properties
5440 #define elf_backend_hide_symbol \
5441 _bfd_x86_elf_hide_symbol
5442
5443 #undef elf64_bed
5444 #define elf64_bed elf64_x86_64_bed
5445
5446 #include "elf64-target.h"
5447
5448 /* CloudABI support. */
5449
5450 #undef TARGET_LITTLE_SYM
5451 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5452 #undef TARGET_LITTLE_NAME
5453 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5454
5455 #undef ELF_OSABI
5456 #define ELF_OSABI ELFOSABI_CLOUDABI
5457
5458 #undef elf64_bed
5459 #define elf64_bed elf64_x86_64_cloudabi_bed
5460
5461 #include "elf64-target.h"
5462
5463 /* FreeBSD support. */
5464
5465 #undef TARGET_LITTLE_SYM
5466 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5467 #undef TARGET_LITTLE_NAME
5468 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5469
5470 #undef ELF_OSABI
5471 #define ELF_OSABI ELFOSABI_FREEBSD
5472
5473 #undef elf64_bed
5474 #define elf64_bed elf64_x86_64_fbsd_bed
5475
5476 #include "elf64-target.h"
5477
5478 /* Solaris 2 support. */
5479
5480 #undef TARGET_LITTLE_SYM
5481 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5482 #undef TARGET_LITTLE_NAME
5483 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5484
5485 #undef ELF_TARGET_OS
5486 #define ELF_TARGET_OS is_solaris
5487
5488 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5489 objects won't be recognized. */
5490 #undef ELF_OSABI
5491
5492 #undef elf64_bed
5493 #define elf64_bed elf64_x86_64_sol2_bed
5494
5495 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5496 boundary. */
5497 #undef elf_backend_static_tls_alignment
5498 #define elf_backend_static_tls_alignment 16
5499
5500 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5501
5502 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5503 File, p.63. */
5504 #undef elf_backend_want_plt_sym
5505 #define elf_backend_want_plt_sym 1
5506
5507 #undef elf_backend_strtab_flags
5508 #define elf_backend_strtab_flags SHF_STRINGS
5509
5510 static bool
5511 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5512 bfd *obfd ATTRIBUTE_UNUSED,
5513 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5514 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5515 {
5516 /* PR 19938: FIXME: Need to add code for setting the sh_info
5517 and sh_link fields of Solaris specific section types. */
5518 return false;
5519 }
5520
5521 #undef elf_backend_copy_special_section_fields
5522 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5523
5524 #include "elf64-target.h"
5525
5526 /* Restore defaults. */
5527 #undef ELF_OSABI
5528 #undef elf_backend_static_tls_alignment
5529 #undef elf_backend_want_plt_sym
5530 #define elf_backend_want_plt_sym 0
5531 #undef elf_backend_strtab_flags
5532 #undef elf_backend_copy_special_section_fields
5533
5534 /* 32bit x86-64 support. */
5535
5536 #undef TARGET_LITTLE_SYM
5537 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5538 #undef TARGET_LITTLE_NAME
5539 #define TARGET_LITTLE_NAME "elf32-x86-64"
5540 #undef elf32_bed
5541 #define elf32_bed elf32_x86_64_bed
5542
5543 #undef ELF_ARCH
5544 #define ELF_ARCH bfd_arch_i386
5545
5546 #undef ELF_MACHINE_CODE
5547 #define ELF_MACHINE_CODE EM_X86_64
5548
5549 #undef ELF_TARGET_OS
5550 #undef ELF_OSABI
5551
5552 #define bfd_elf32_bfd_reloc_type_lookup \
5553 elf_x86_64_reloc_type_lookup
5554 #define bfd_elf32_bfd_reloc_name_lookup \
5555 elf_x86_64_reloc_name_lookup
5556 #define bfd_elf32_get_synthetic_symtab \
5557 elf_x86_64_get_synthetic_symtab
5558
5559 #undef elf_backend_object_p
5560 #define elf_backend_object_p \
5561 elf32_x86_64_elf_object_p
5562
5563 #undef elf_backend_bfd_from_remote_memory
5564 #define elf_backend_bfd_from_remote_memory \
5565 _bfd_elf32_bfd_from_remote_memory
5566
5567 #undef elf_backend_size_info
5568 #define elf_backend_size_info \
5569 _bfd_elf32_size_info
5570
5571 #include "elf32-target.h"