From a06ea96464a2928865beb2ac6f12deb0464bfcd7 Mon Sep 17 00:00:00 2001 From: Nick Clifton Date: Mon, 13 Aug 2012 14:52:54 +0000 Subject: [PATCH] Add support for 64-bit ARM architecture: AArch64 --- bfd/ChangeLog | 44 + bfd/Makefile.am | 4 + bfd/Makefile.in | 6 + bfd/archures.c | 4 + bfd/bfd-in.h | 26 + bfd/bfd-in2.h | 242 + bfd/config.bfd | 21 + bfd/configure | 2 + bfd/configure.in | 2 + bfd/cpu-aarch64.c | 121 + bfd/doc/Makefile.in | 4 +- bfd/elf-bfd.h | 3 +- bfd/elf64-aarch64.c | 7016 ++++++++++++++ bfd/libbfd.h | 57 + bfd/reloc.c | 274 +- bfd/targets.c | 4 + binutils/ChangeLog | 23 + binutils/MAINTAINERS | 1 + binutils/doc/Makefile.in | 2 +- binutils/readelf.c | 50 + binutils/testsuite/ChangeLog | 13 + binutils/testsuite/binutils-all/objdump.exp | 2 +- gas/ChangeLog | 24 + gas/Makefile.am | 2 + gas/Makefile.in | 17 + gas/NEWS | 2 + gas/config/tc-aarch64.c | 7349 +++++++++++++++ gas/config/tc-aarch64.h | 231 + gas/configure.tgt | 7 +- gas/doc/Makefile.am | 1 + gas/doc/Makefile.in | 14 +- gas/doc/all.texi | 1 + gas/doc/as.texinfo | 31 + gas/doc/c-aarch64.texi | 275 + gas/testsuite/ChangeLog | 124 + gas/testsuite/gas/aarch64/aarch64.exp | 7 + gas/testsuite/gas/aarch64/addsub.d | 2371 +++++ gas/testsuite/gas/aarch64/addsub.s | 224 + gas/testsuite/gas/aarch64/advsimd-across.d | 46 + gas/testsuite/gas/aarch64/advsimd-across.s | 46 + gas/testsuite/gas/aarch64/advsimd-misc.d | 35 + gas/testsuite/gas/aarch64/advsimd-misc.s | 32 + gas/testsuite/gas/aarch64/advsisd-copy.d | 67 + gas/testsuite/gas/aarch64/advsisd-copy.s | 39 + gas/testsuite/gas/aarch64/advsisd-misc.d | 17 + gas/testsuite/gas/aarch64/advsisd-misc.s | 36 + gas/testsuite/gas/aarch64/alias.d | 74 + gas/testsuite/gas/aarch64/alias.s | 101 + gas/testsuite/gas/aarch64/bitfield-alias.d | 2 + gas/testsuite/gas/aarch64/bitfield-alias.s | 112 + gas/testsuite/gas/aarch64/bitfield-bfm.d | 2 + gas/testsuite/gas/aarch64/bitfield-bfm.s | 154 + gas/testsuite/gas/aarch64/bitfield-dump | 118 + .../gas/aarch64/bitfield-no-aliases.d | 121 + gas/testsuite/gas/aarch64/crypto.d | 26 + gas/testsuite/gas/aarch64/crypto.s | 44 + gas/testsuite/gas/aarch64/diagnostic.d | 3 + gas/testsuite/gas/aarch64/diagnostic.l | 87 + gas/testsuite/gas/aarch64/diagnostic.s | 89 + gas/testsuite/gas/aarch64/floatdp2.d | 25 + gas/testsuite/gas/aarch64/floatdp2.s | 32 + gas/testsuite/gas/aarch64/fp_cvt_int.d | 829 ++ gas/testsuite/gas/aarch64/fp_cvt_int.s | 117 + gas/testsuite/gas/aarch64/illegal-2.d | 4 + gas/testsuite/gas/aarch64/illegal-2.l | 8 + gas/testsuite/gas/aarch64/illegal-2.s | 22 + gas/testsuite/gas/aarch64/illegal.d | 4 + gas/testsuite/gas/aarch64/illegal.l | 549 ++ gas/testsuite/gas/aarch64/illegal.s | 538 ++ gas/testsuite/gas/aarch64/inst-directive.d | 8 + gas/testsuite/gas/aarch64/inst-directive.s | 5 + gas/testsuite/gas/aarch64/int-insns.d | 80 + gas/testsuite/gas/aarch64/int-insns.s | 151 + gas/testsuite/gas/aarch64/ldst-exclusive.d | 103 + gas/testsuite/gas/aarch64/ldst-exclusive.s | 139 + .../gas/aarch64/ldst-reg-imm-post-ind.d | 214 + .../gas/aarch64/ldst-reg-imm-post-ind.s | 62 + .../gas/aarch64/ldst-reg-imm-pre-ind.d | 214 + .../gas/aarch64/ldst-reg-imm-pre-ind.s | 62 + gas/testsuite/gas/aarch64/ldst-reg-pair.d | 265 + gas/testsuite/gas/aarch64/ldst-reg-pair.s | 111 + .../gas/aarch64/ldst-reg-reg-offset.d | 87 + .../gas/aarch64/ldst-reg-reg-offset.s | 88 + gas/testsuite/gas/aarch64/ldst-reg-uns-imm.d | 260 + gas/testsuite/gas/aarch64/ldst-reg-uns-imm.s | 102 + .../gas/aarch64/ldst-reg-unscaled-imm.d | 237 + .../gas/aarch64/ldst-reg-unscaled-imm.s | 82 + gas/testsuite/gas/aarch64/legacy_reg_names.d | 2 + gas/testsuite/gas/aarch64/legacy_reg_names.l | 4 + gas/testsuite/gas/aarch64/legacy_reg_names.s | 7 + gas/testsuite/gas/aarch64/mapmisc.d | 96 + gas/testsuite/gas/aarch64/mapmisc.dat | 1 + gas/testsuite/gas/aarch64/mapmisc.s | 40 + gas/testsuite/gas/aarch64/mapping.d | 20 + gas/testsuite/gas/aarch64/mapping.s | 16 + gas/testsuite/gas/aarch64/mapping2.d | 17 + gas/testsuite/gas/aarch64/mapping2.s | 13 + gas/testsuite/gas/aarch64/mapping3.d | 15 + gas/testsuite/gas/aarch64/mapping3.s | 3 + gas/testsuite/gas/aarch64/mapping4.d | 14 + gas/testsuite/gas/aarch64/mapping4.s | 7 + gas/testsuite/gas/aarch64/mov-no-aliases.d | 27 + gas/testsuite/gas/aarch64/mov.d | 26 + gas/testsuite/gas/aarch64/mov.s | 40 + gas/testsuite/gas/aarch64/movi.d | 8203 +++++++++++++++++ gas/testsuite/gas/aarch64/movi.s | 104 + gas/testsuite/gas/aarch64/msr.d | 15 + gas/testsuite/gas/aarch64/msr.s | 33 + gas/testsuite/gas/aarch64/neon-fp-cvt-int.d | 963 ++ gas/testsuite/gas/aarch64/neon-fp-cvt-int.s | 122 + gas/testsuite/gas/aarch64/neon-frint.d | 28 + gas/testsuite/gas/aarch64/neon-frint.s | 13 + gas/testsuite/gas/aarch64/neon-ins.d | 3607 ++++++++ gas/testsuite/gas/aarch64/neon-ins.s | 48 + gas/testsuite/gas/aarch64/neon-not.d | 131 + gas/testsuite/gas/aarch64/neon-not.s | 13 + .../gas/aarch64/neon-vfp-reglist-post.d | 347 + .../gas/aarch64/neon-vfp-reglist-post.s | 196 + gas/testsuite/gas/aarch64/neon-vfp-reglist.d | 193 + gas/testsuite/gas/aarch64/neon-vfp-reglist.s | 83 + gas/testsuite/gas/aarch64/no-aliases.d | 75 + gas/testsuite/gas/aarch64/optional.d | 34 + gas/testsuite/gas/aarch64/optional.s | 30 + .../gas/aarch64/programmer-friendly.d | 22 + .../gas/aarch64/programmer-friendly.s | 54 + gas/testsuite/gas/aarch64/reloc-data.d | 34 + gas/testsuite/gas/aarch64/reloc-data.s | 29 + gas/testsuite/gas/aarch64/reloc-insn.d | 157 + gas/testsuite/gas/aarch64/reloc-insn.s | 203 + gas/testsuite/gas/aarch64/shifted.d | 734 ++ gas/testsuite/gas/aarch64/shifted.s | 170 + gas/testsuite/gas/aarch64/symbol.d | 14 + gas/testsuite/gas/aarch64/symbol.s | 25 + gas/testsuite/gas/aarch64/sysreg-1.d | 4273 +++++++++ gas/testsuite/gas/aarch64/sysreg-1.s | 160 + gas/testsuite/gas/aarch64/sysreg.d | 25 + gas/testsuite/gas/aarch64/sysreg.s | 24 + gas/testsuite/gas/aarch64/system.d | 352 + gas/testsuite/gas/aarch64/system.s | 62 + gas/testsuite/gas/aarch64/tlbi_op.d | 39 + gas/testsuite/gas/aarch64/tlbi_op.s | 45 + gas/testsuite/gas/aarch64/tls.d | 33 + gas/testsuite/gas/aarch64/tls.s | 53 + gas/testsuite/gas/aarch64/verbose-error.d | 4 + gas/testsuite/gas/aarch64/verbose-error.l | 45 + gas/testsuite/gas/aarch64/verbose-error.s | 16 + include/ChangeLog | 15 + include/dis-asm.h | 3 + include/elf/ChangeLog | 15 + include/elf/aarch64.h | 214 + include/elf/common.h | 4 +- include/opcode/ChangeLog | 13 + include/opcode/aarch64.h | 928 ++ ld/ChangeLog | 21 + ld/Makefile.am | 20 + ld/Makefile.in | 24 + ld/NEWS | 2 + ld/configure.tgt | 8 + ld/emulparams/aarch64elf.sh | 35 + ld/emulparams/aarch64elfb.sh | 2 + ld/emulparams/aarch64linux.sh | 36 + ld/emulparams/aarch64linuxb.sh | 2 + ld/emultempl/aarch64elf.em | 415 + ld/testsuite/ChangeLog | 139 + ld/testsuite/ld-aarch64/aarch64-elf.exp | 105 + ld/testsuite/ld-aarch64/aarch64.ld | 19 + ld/testsuite/ld-aarch64/eh-frame-bar.s | 38 + ld/testsuite/ld-aarch64/eh-frame-foo.s | 55 + ld/testsuite/ld-aarch64/eh-frame.d | 86 + ld/testsuite/ld-aarch64/emit-relocs-257-be.d | 16 + ld/testsuite/ld-aarch64/emit-relocs-257.d | 16 + ld/testsuite/ld-aarch64/emit-relocs-257.s | 12 + ld/testsuite/ld-aarch64/emit-relocs-260-be.d | 14 + ld/testsuite/ld-aarch64/emit-relocs-260.d | 16 + ld/testsuite/ld-aarch64/emit-relocs-260.s | 13 + ld/testsuite/ld-aarch64/emit-relocs-262.d | 16 + ld/testsuite/ld-aarch64/emit-relocs-262.s | 13 + ld/testsuite/ld-aarch64/emit-relocs-263.d | 15 + ld/testsuite/ld-aarch64/emit-relocs-263.s | 9 + ld/testsuite/ld-aarch64/emit-relocs-264.d | 13 + ld/testsuite/ld-aarch64/emit-relocs-264.s | 9 + ld/testsuite/ld-aarch64/emit-relocs-265.d | 14 + ld/testsuite/ld-aarch64/emit-relocs-265.s | 10 + ld/testsuite/ld-aarch64/emit-relocs-266.d | 13 + ld/testsuite/ld-aarch64/emit-relocs-266.s | 10 + ld/testsuite/ld-aarch64/emit-relocs-267.d | 14 + ld/testsuite/ld-aarch64/emit-relocs-267.s | 9 + ld/testsuite/ld-aarch64/emit-relocs-268.d | 13 + ld/testsuite/ld-aarch64/emit-relocs-268.s | 9 + ld/testsuite/ld-aarch64/emit-relocs-269.d | 13 + ld/testsuite/ld-aarch64/emit-relocs-269.s | 9 + ld/testsuite/ld-aarch64/emit-relocs-270-bad.d | 15 + ld/testsuite/ld-aarch64/emit-relocs-270.d | 14 + ld/testsuite/ld-aarch64/emit-relocs-270.s | 9 + ld/testsuite/ld-aarch64/emit-relocs-271.d | 14 + ld/testsuite/ld-aarch64/emit-relocs-271.s | 9 + ld/testsuite/ld-aarch64/emit-relocs-272.d | 14 + ld/testsuite/ld-aarch64/emit-relocs-272.s | 9 + ld/testsuite/ld-aarch64/emit-relocs-273.d | 13 + ld/testsuite/ld-aarch64/emit-relocs-273.s | 9 + ld/testsuite/ld-aarch64/emit-relocs-274.d | 12 + ld/testsuite/ld-aarch64/emit-relocs-274.s | 5 + ld/testsuite/ld-aarch64/emit-relocs-275.d | 15 + ld/testsuite/ld-aarch64/emit-relocs-275.s | 8 + ld/testsuite/ld-aarch64/emit-relocs-276.d | 14 + ld/testsuite/ld-aarch64/emit-relocs-276.s | 8 + ld/testsuite/ld-aarch64/emit-relocs-277.d | 13 + ld/testsuite/ld-aarch64/emit-relocs-277.s | 8 + ld/testsuite/ld-aarch64/emit-relocs-278.d | 13 + ld/testsuite/ld-aarch64/emit-relocs-278.s | 8 + ld/testsuite/ld-aarch64/emit-relocs-279-bad.d | 13 + ld/testsuite/ld-aarch64/emit-relocs-279.d | 17 + ld/testsuite/ld-aarch64/emit-relocs-279.s | 8 + ld/testsuite/ld-aarch64/emit-relocs-280.d | 12 + ld/testsuite/ld-aarch64/emit-relocs-280.s | 6 + ld/testsuite/ld-aarch64/emit-relocs-282.d | 12 + ld/testsuite/ld-aarch64/emit-relocs-282.s | 6 + ld/testsuite/ld-aarch64/emit-relocs-283.d | 12 + ld/testsuite/ld-aarch64/emit-relocs-283.s | 6 + ld/testsuite/ld-aarch64/emit-relocs-284.d | 13 + ld/testsuite/ld-aarch64/emit-relocs-284.s | 8 + ld/testsuite/ld-aarch64/emit-relocs-285.d | 13 + ld/testsuite/ld-aarch64/emit-relocs-285.s | 8 + ld/testsuite/ld-aarch64/emit-relocs-286-bad.d | 14 + ld/testsuite/ld-aarch64/emit-relocs-286.d | 13 + ld/testsuite/ld-aarch64/emit-relocs-286.s | 8 + ld/testsuite/ld-aarch64/emit-relocs-287.d | 14 + ld/testsuite/ld-aarch64/emit-relocs-287.s | 9 + ld/testsuite/ld-aarch64/emit-relocs-299.d | 12 + ld/testsuite/ld-aarch64/emit-relocs-299.s | 8 + ld/testsuite/ld-aarch64/emit-relocs-311.d | 14 + ld/testsuite/ld-aarch64/emit-relocs-311.s | 8 + ld/testsuite/ld-aarch64/emit-relocs-312.d | 19 + ld/testsuite/ld-aarch64/emit-relocs-312.s | 13 + ld/testsuite/ld-aarch64/emit-relocs1.s | 6 + .../ld-aarch64/farcall-b-none-function.d | 5 + .../ld-aarch64/farcall-b-none-function.s | 16 + ld/testsuite/ld-aarch64/farcall-b.d | 22 + ld/testsuite/ld-aarch64/farcall-b.s | 17 + ld/testsuite/ld-aarch64/farcall-back.d | 72 + ld/testsuite/ld-aarch64/farcall-back.s | 42 + .../ld-aarch64/farcall-bl-none-function.d | 5 + .../ld-aarch64/farcall-bl-none-function.s | 16 + ld/testsuite/ld-aarch64/farcall-bl.d | 23 + ld/testsuite/ld-aarch64/farcall-bl.s | 17 + ld/testsuite/ld-aarch64/farcall-section.d | 5 + ld/testsuite/ld-aarch64/farcall-section.s | 19 + ld/testsuite/ld-aarch64/limit-b.d | 17 + ld/testsuite/ld-aarch64/limit-b.s | 19 + ld/testsuite/ld-aarch64/limit-bl.d | 17 + ld/testsuite/ld-aarch64/limit-bl.s | 19 + ld/testsuite/ld-aarch64/relocs.ld | 19 + ld/testsuite/ld-aarch64/tls-desc-ie.d | 36 + ld/testsuite/ld-aarch64/tls-desc-ie.s | 32 + ld/testsuite/ld-aarch64/tls-relax-all.d | 39 + ld/testsuite/ld-aarch64/tls-relax-all.s | 51 + ld/testsuite/ld-aarch64/tls-relax-gd-ie.d | 9 + ld/testsuite/ld-aarch64/tls-relax-gd-ie.s | 10 + ld/testsuite/ld-aarch64/tls-relax-gd-le.d | 9 + ld/testsuite/ld-aarch64/tls-relax-gd-le.s | 9 + .../ld-aarch64/tls-relax-gdesc-ie-2.d | 18 + .../ld-aarch64/tls-relax-gdesc-ie-2.s | 24 + ld/testsuite/ld-aarch64/tls-relax-gdesc-ie.d | 11 + ld/testsuite/ld-aarch64/tls-relax-gdesc-ie.s | 13 + .../ld-aarch64/tls-relax-gdesc-le-2.d | 18 + .../ld-aarch64/tls-relax-gdesc-le-2.s | 22 + ld/testsuite/ld-aarch64/tls-relax-gdesc-le.d | 11 + ld/testsuite/ld-aarch64/tls-relax-gdesc-le.s | 12 + ld/testsuite/ld-aarch64/tls-relax-ie-le-2.d | 17 + ld/testsuite/ld-aarch64/tls-relax-ie-le-2.s | 20 + ld/testsuite/ld-aarch64/tls-relax-ie-le-3.d | 9 + ld/testsuite/ld-aarch64/tls-relax-ie-le-3.s | 12 + ld/testsuite/ld-aarch64/tls-relax-ie-le.d | 9 + ld/testsuite/ld-aarch64/tls-relax-ie-le.s | 9 + ld/testsuite/ld-aarch64/weak-undefined.d | 18 + ld/testsuite/ld-aarch64/weak-undefined.s | 18 + ld/testsuite/ld-elf/binutils.exp | 14 +- ld/testsuite/ld-elf/group8a.d | 2 +- ld/testsuite/ld-elf/group8b.d | 2 +- ld/testsuite/ld-elf/group9a.d | 2 +- ld/testsuite/ld-elf/group9b.d | 2 +- ld/testsuite/ld-elf/pr12851.d | 2 +- ld/testsuite/ld-elf/pr12975.d | 2 +- ld/testsuite/ld-elf/pr13177.d | 2 +- ld/testsuite/ld-elf/pr13195.d | 2 +- ld/testsuite/ld-elfvers/vers.exp | 1 + ld/testsuite/ld-gc/gc.exp | 6 + ld/testsuite/ld-shared/shared.exp | 1 + ld/testsuite/ld-srec/srec.exp | 7 +- ld/testsuite/lib/ld-lib.exp | 3 +- opcodes/ChangeLog | 29 + opcodes/Makefile.am | 31 +- opcodes/Makefile.in | 37 +- opcodes/aarch64-asm-2.c | 345 + opcodes/aarch64-asm.c | 1268 +++ opcodes/aarch64-asm.h | 73 + opcodes/aarch64-dis-2.c | 7655 +++++++++++++++ opcodes/aarch64-dis.c | 2392 +++++ opcodes/aarch64-dis.h | 94 + opcodes/aarch64-gen.c | 1317 +++ opcodes/aarch64-opc-2.c | 195 + opcodes/aarch64-opc.c | 3074 ++++++ opcodes/aarch64-opc.h | 392 + opcodes/aarch64-tbl.h | 2253 +++++ opcodes/configure | 1 + opcodes/configure.in | 1 + opcodes/disassemble.c | 15 + 307 files changed, 68333 insertions(+), 39 deletions(-) create mode 100644 bfd/cpu-aarch64.c create mode 100644 bfd/elf64-aarch64.c create mode 100644 gas/config/tc-aarch64.c create mode 100644 gas/config/tc-aarch64.h create mode 100644 gas/doc/c-aarch64.texi create mode 100644 gas/testsuite/gas/aarch64/aarch64.exp create mode 100644 gas/testsuite/gas/aarch64/addsub.d create mode 100644 gas/testsuite/gas/aarch64/addsub.s create mode 100644 gas/testsuite/gas/aarch64/advsimd-across.d create mode 100644 gas/testsuite/gas/aarch64/advsimd-across.s create mode 100644 gas/testsuite/gas/aarch64/advsimd-misc.d create mode 100644 gas/testsuite/gas/aarch64/advsimd-misc.s create mode 100644 gas/testsuite/gas/aarch64/advsisd-copy.d create mode 100644 gas/testsuite/gas/aarch64/advsisd-copy.s create mode 100644 gas/testsuite/gas/aarch64/advsisd-misc.d create mode 100644 gas/testsuite/gas/aarch64/advsisd-misc.s create mode 100644 gas/testsuite/gas/aarch64/alias.d create mode 100644 gas/testsuite/gas/aarch64/alias.s create mode 100644 gas/testsuite/gas/aarch64/bitfield-alias.d create mode 100644 gas/testsuite/gas/aarch64/bitfield-alias.s create mode 100644 gas/testsuite/gas/aarch64/bitfield-bfm.d create mode 100644 gas/testsuite/gas/aarch64/bitfield-bfm.s create mode 100644 gas/testsuite/gas/aarch64/bitfield-dump create mode 100644 gas/testsuite/gas/aarch64/bitfield-no-aliases.d create mode 100644 gas/testsuite/gas/aarch64/crypto.d create mode 100644 gas/testsuite/gas/aarch64/crypto.s create mode 100644 gas/testsuite/gas/aarch64/diagnostic.d create mode 100644 gas/testsuite/gas/aarch64/diagnostic.l create mode 100644 gas/testsuite/gas/aarch64/diagnostic.s create mode 100644 gas/testsuite/gas/aarch64/floatdp2.d create mode 100644 gas/testsuite/gas/aarch64/floatdp2.s create mode 100644 gas/testsuite/gas/aarch64/fp_cvt_int.d create mode 100644 gas/testsuite/gas/aarch64/fp_cvt_int.s create mode 100644 gas/testsuite/gas/aarch64/illegal-2.d create mode 100644 gas/testsuite/gas/aarch64/illegal-2.l create mode 100644 gas/testsuite/gas/aarch64/illegal-2.s create mode 100644 gas/testsuite/gas/aarch64/illegal.d create mode 100644 gas/testsuite/gas/aarch64/illegal.l create mode 100644 gas/testsuite/gas/aarch64/illegal.s create mode 100644 gas/testsuite/gas/aarch64/inst-directive.d create mode 100644 gas/testsuite/gas/aarch64/inst-directive.s create mode 100644 gas/testsuite/gas/aarch64/int-insns.d create mode 100644 gas/testsuite/gas/aarch64/int-insns.s create mode 100644 gas/testsuite/gas/aarch64/ldst-exclusive.d create mode 100644 gas/testsuite/gas/aarch64/ldst-exclusive.s create mode 100644 gas/testsuite/gas/aarch64/ldst-reg-imm-post-ind.d create mode 100644 gas/testsuite/gas/aarch64/ldst-reg-imm-post-ind.s create mode 100644 gas/testsuite/gas/aarch64/ldst-reg-imm-pre-ind.d create mode 100644 gas/testsuite/gas/aarch64/ldst-reg-imm-pre-ind.s create mode 100644 gas/testsuite/gas/aarch64/ldst-reg-pair.d create mode 100644 gas/testsuite/gas/aarch64/ldst-reg-pair.s create mode 100644 gas/testsuite/gas/aarch64/ldst-reg-reg-offset.d create mode 100644 gas/testsuite/gas/aarch64/ldst-reg-reg-offset.s create mode 100644 gas/testsuite/gas/aarch64/ldst-reg-uns-imm.d create mode 100644 gas/testsuite/gas/aarch64/ldst-reg-uns-imm.s create mode 100644 gas/testsuite/gas/aarch64/ldst-reg-unscaled-imm.d create mode 100644 gas/testsuite/gas/aarch64/ldst-reg-unscaled-imm.s create mode 100644 gas/testsuite/gas/aarch64/legacy_reg_names.d create mode 100644 gas/testsuite/gas/aarch64/legacy_reg_names.l create mode 100644 gas/testsuite/gas/aarch64/legacy_reg_names.s create mode 100644 gas/testsuite/gas/aarch64/mapmisc.d create mode 100644 gas/testsuite/gas/aarch64/mapmisc.dat create mode 100644 gas/testsuite/gas/aarch64/mapmisc.s create mode 100644 gas/testsuite/gas/aarch64/mapping.d create mode 100644 gas/testsuite/gas/aarch64/mapping.s create mode 100644 gas/testsuite/gas/aarch64/mapping2.d create mode 100644 gas/testsuite/gas/aarch64/mapping2.s create mode 100644 gas/testsuite/gas/aarch64/mapping3.d create mode 100644 gas/testsuite/gas/aarch64/mapping3.s create mode 100644 gas/testsuite/gas/aarch64/mapping4.d create mode 100644 gas/testsuite/gas/aarch64/mapping4.s create mode 100644 gas/testsuite/gas/aarch64/mov-no-aliases.d create mode 100644 gas/testsuite/gas/aarch64/mov.d create mode 100644 gas/testsuite/gas/aarch64/mov.s create mode 100644 gas/testsuite/gas/aarch64/movi.d create mode 100644 gas/testsuite/gas/aarch64/movi.s create mode 100644 gas/testsuite/gas/aarch64/msr.d create mode 100644 gas/testsuite/gas/aarch64/msr.s create mode 100644 gas/testsuite/gas/aarch64/neon-fp-cvt-int.d create mode 100644 gas/testsuite/gas/aarch64/neon-fp-cvt-int.s create mode 100644 gas/testsuite/gas/aarch64/neon-frint.d create mode 100644 gas/testsuite/gas/aarch64/neon-frint.s create mode 100644 gas/testsuite/gas/aarch64/neon-ins.d create mode 100644 gas/testsuite/gas/aarch64/neon-ins.s create mode 100644 gas/testsuite/gas/aarch64/neon-not.d create mode 100644 gas/testsuite/gas/aarch64/neon-not.s create mode 100644 gas/testsuite/gas/aarch64/neon-vfp-reglist-post.d create mode 100644 gas/testsuite/gas/aarch64/neon-vfp-reglist-post.s create mode 100644 gas/testsuite/gas/aarch64/neon-vfp-reglist.d create mode 100644 gas/testsuite/gas/aarch64/neon-vfp-reglist.s create mode 100644 gas/testsuite/gas/aarch64/no-aliases.d create mode 100644 gas/testsuite/gas/aarch64/optional.d create mode 100644 gas/testsuite/gas/aarch64/optional.s create mode 100644 gas/testsuite/gas/aarch64/programmer-friendly.d create mode 100644 gas/testsuite/gas/aarch64/programmer-friendly.s create mode 100644 gas/testsuite/gas/aarch64/reloc-data.d create mode 100644 gas/testsuite/gas/aarch64/reloc-data.s create mode 100644 gas/testsuite/gas/aarch64/reloc-insn.d create mode 100644 gas/testsuite/gas/aarch64/reloc-insn.s create mode 100644 gas/testsuite/gas/aarch64/shifted.d create mode 100644 gas/testsuite/gas/aarch64/shifted.s create mode 100644 gas/testsuite/gas/aarch64/symbol.d create mode 100644 gas/testsuite/gas/aarch64/symbol.s create mode 100644 gas/testsuite/gas/aarch64/sysreg-1.d create mode 100644 gas/testsuite/gas/aarch64/sysreg-1.s create mode 100644 gas/testsuite/gas/aarch64/sysreg.d create mode 100644 gas/testsuite/gas/aarch64/sysreg.s create mode 100644 gas/testsuite/gas/aarch64/system.d create mode 100644 gas/testsuite/gas/aarch64/system.s create mode 100644 gas/testsuite/gas/aarch64/tlbi_op.d create mode 100644 gas/testsuite/gas/aarch64/tlbi_op.s create mode 100644 gas/testsuite/gas/aarch64/tls.d create mode 100644 gas/testsuite/gas/aarch64/tls.s create mode 100644 gas/testsuite/gas/aarch64/verbose-error.d create mode 100644 gas/testsuite/gas/aarch64/verbose-error.l create mode 100644 gas/testsuite/gas/aarch64/verbose-error.s create mode 100644 include/elf/aarch64.h create mode 100644 include/opcode/aarch64.h create mode 100644 ld/emulparams/aarch64elf.sh create mode 100644 ld/emulparams/aarch64elfb.sh create mode 100644 ld/emulparams/aarch64linux.sh create mode 100644 ld/emulparams/aarch64linuxb.sh create mode 100644 ld/emultempl/aarch64elf.em create mode 100644 ld/testsuite/ld-aarch64/aarch64-elf.exp create mode 100644 ld/testsuite/ld-aarch64/aarch64.ld create mode 100644 ld/testsuite/ld-aarch64/eh-frame-bar.s create mode 100644 ld/testsuite/ld-aarch64/eh-frame-foo.s create mode 100644 ld/testsuite/ld-aarch64/eh-frame.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-257-be.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-257.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-257.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-260-be.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-260.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-260.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-262.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-262.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-263.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-263.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-264.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-264.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-265.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-265.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-266.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-266.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-267.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-267.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-268.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-268.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-269.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-269.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-270-bad.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-270.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-270.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-271.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-271.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-272.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-272.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-273.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-273.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-274.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-274.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-275.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-275.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-276.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-276.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-277.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-277.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-278.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-278.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-279-bad.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-279.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-279.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-280.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-280.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-282.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-282.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-283.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-283.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-284.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-284.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-285.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-285.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-286-bad.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-286.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-286.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-287.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-287.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-299.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-299.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-311.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-311.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-312.d create mode 100644 ld/testsuite/ld-aarch64/emit-relocs-312.s create mode 100644 ld/testsuite/ld-aarch64/emit-relocs1.s create mode 100644 ld/testsuite/ld-aarch64/farcall-b-none-function.d create mode 100644 ld/testsuite/ld-aarch64/farcall-b-none-function.s create mode 100644 ld/testsuite/ld-aarch64/farcall-b.d create mode 100644 ld/testsuite/ld-aarch64/farcall-b.s create mode 100644 ld/testsuite/ld-aarch64/farcall-back.d create mode 100644 ld/testsuite/ld-aarch64/farcall-back.s create mode 100644 ld/testsuite/ld-aarch64/farcall-bl-none-function.d create mode 100644 ld/testsuite/ld-aarch64/farcall-bl-none-function.s create mode 100644 ld/testsuite/ld-aarch64/farcall-bl.d create mode 100644 ld/testsuite/ld-aarch64/farcall-bl.s create mode 100644 ld/testsuite/ld-aarch64/farcall-section.d create mode 100644 ld/testsuite/ld-aarch64/farcall-section.s create mode 100644 ld/testsuite/ld-aarch64/limit-b.d create mode 100644 ld/testsuite/ld-aarch64/limit-b.s create mode 100644 ld/testsuite/ld-aarch64/limit-bl.d create mode 100644 ld/testsuite/ld-aarch64/limit-bl.s create mode 100644 ld/testsuite/ld-aarch64/relocs.ld create mode 100644 ld/testsuite/ld-aarch64/tls-desc-ie.d create mode 100644 ld/testsuite/ld-aarch64/tls-desc-ie.s create mode 100644 ld/testsuite/ld-aarch64/tls-relax-all.d create mode 100644 ld/testsuite/ld-aarch64/tls-relax-all.s create mode 100644 ld/testsuite/ld-aarch64/tls-relax-gd-ie.d create mode 100644 ld/testsuite/ld-aarch64/tls-relax-gd-ie.s create mode 100644 ld/testsuite/ld-aarch64/tls-relax-gd-le.d create mode 100644 ld/testsuite/ld-aarch64/tls-relax-gd-le.s create mode 100644 ld/testsuite/ld-aarch64/tls-relax-gdesc-ie-2.d create mode 100644 ld/testsuite/ld-aarch64/tls-relax-gdesc-ie-2.s create mode 100644 ld/testsuite/ld-aarch64/tls-relax-gdesc-ie.d create mode 100644 ld/testsuite/ld-aarch64/tls-relax-gdesc-ie.s create mode 100644 ld/testsuite/ld-aarch64/tls-relax-gdesc-le-2.d create mode 100644 ld/testsuite/ld-aarch64/tls-relax-gdesc-le-2.s create mode 100644 ld/testsuite/ld-aarch64/tls-relax-gdesc-le.d create mode 100644 ld/testsuite/ld-aarch64/tls-relax-gdesc-le.s create mode 100644 ld/testsuite/ld-aarch64/tls-relax-ie-le-2.d create mode 100644 ld/testsuite/ld-aarch64/tls-relax-ie-le-2.s create mode 100644 ld/testsuite/ld-aarch64/tls-relax-ie-le-3.d create mode 100644 ld/testsuite/ld-aarch64/tls-relax-ie-le-3.s create mode 100644 ld/testsuite/ld-aarch64/tls-relax-ie-le.d create mode 100644 ld/testsuite/ld-aarch64/tls-relax-ie-le.s create mode 100644 ld/testsuite/ld-aarch64/weak-undefined.d create mode 100644 ld/testsuite/ld-aarch64/weak-undefined.s create mode 100644 opcodes/aarch64-asm-2.c create mode 100644 opcodes/aarch64-asm.c create mode 100644 opcodes/aarch64-asm.h create mode 100644 opcodes/aarch64-dis-2.c create mode 100644 opcodes/aarch64-dis.c create mode 100644 opcodes/aarch64-dis.h create mode 100644 opcodes/aarch64-gen.c create mode 100644 opcodes/aarch64-opc-2.c create mode 100644 opcodes/aarch64-opc.c create mode 100644 opcodes/aarch64-opc.h create mode 100644 opcodes/aarch64-tbl.h diff --git a/bfd/ChangeLog b/bfd/ChangeLog index 02cac165779..60ba3824ed3 100644 --- a/bfd/ChangeLog +++ b/bfd/ChangeLog @@ -1,3 +1,47 @@ +2012-08-13 Ian Bolton + Laurent Desnogues + Jim MacArthur + Marcus Shawcroft + Nigel Stephens + Ramana Radhakrishnan + Richard Earnshaw + Sofiane Naci + Tejas Belagod + Yufeng Zhang + + * Makefile.am: Add AArch64 files. + * Makefile.in: Regenerate. + * archures.c (bfd_aarch64_arch): New declaration. + (bfd_archures_list): Use bfd_archures_list. + * bfd-in.h (bfd_elf64_aarch64_init_maps): New declaration. + (bfd_aarch64_process_before_allocation): New declaration. + (bfd_elf64_aarch64_process_before_allocation): New declaration. + (bfd_elf64_aarch64_set_options): New declaration. + (bfd_elf64_aarch64_add_glue_sections_to_bfd): New declaration. + (BFD_AARCH64_SPECIAL_SYM_TYPE_MAP): New definition. + (BFD_AARCH64_SPECIAL_SYM_TYPE_TAG): New definition. + (BFD_AARCH64_SPECIAL_SYM_TYPE_OTHER): New definition. + (BFD_AARCH64_SPECIAL_SYM_TYPE_ANY): New definition. + (bfd_is_aarch64_special_symbol_name): New declaration. + (bfd_aarch64_merge_machines): New declaration. + (bfd_aarch64_update_notes): New declaration. + (int bfd_aarch64_get_mach_from_notes): New declaration. + (elf64_aarch64_setup_section_lists): New declaration. + (elf64_aarch64_next_input_section): New declaration. + (elf64_aarch64_size_stubs): New declaration. + (elf64_aarch64_build_stubs): New declaration. + * config.bfd: Add AArch64. + * configure.in: Add AArch64. + * configure: Regenerate. + * cpu-aarch64.c: New file. + * elf-bfd.h: Add AArch64. + * bfd-in2.h: Regenerate. + * libbfd.h: Regenerate. + * elf64-aarch64.c: New file. + * reloc.c: Add AArch64 relocations. + * targets.c: Add AArch64. + * po/SRC-POTFILES.in: Regenerate. + 2012-08-13 Maciej W. Rozycki * elfxx-mips.c (mips_elf_calculate_relocation): Fix the handling diff --git a/bfd/Makefile.am b/bfd/Makefile.am index b7271cc886f..49f9662272f 100644 --- a/bfd/Makefile.am +++ b/bfd/Makefile.am @@ -71,6 +71,7 @@ BFD64_LIBS_CFILES = archive64.c # This list is alphabetized to make it easier to keep in sync # with the decls and initializer in archures.c. ALL_MACHINES = \ + cpu-aarch64.lo \ cpu-alpha.lo \ cpu-arc.lo \ cpu-arm.lo \ @@ -151,6 +152,7 @@ ALL_MACHINES = \ cpu-z8k.lo ALL_MACHINES_CFILES = \ + cpu-aarch64.c \ cpu-alpha.c \ cpu-arc.c \ cpu-arm.c \ @@ -613,6 +615,7 @@ BFD32_BACKENDS_CFILES = \ # elf32-ia64.c requires a 64-bit bfd_vma, and hence can not be put in # BFD32_BACKENDS. BFD64_BACKENDS = \ + elf64-aarch64.lo \ aix5ppc-core.lo \ aout64.lo \ coff-alpha.lo \ @@ -651,6 +654,7 @@ BFD64_BACKENDS = \ vms-alpha.lo BFD64_BACKENDS_CFILES = \ + elf64-aarch64.c \ aix5ppc-core.c \ aout64.c \ coff-alpha.c \ diff --git a/bfd/Makefile.in b/bfd/Makefile.in index 8ee668117ae..5718fab60c3 100644 --- a/bfd/Makefile.in +++ b/bfd/Makefile.in @@ -370,6 +370,7 @@ BFD64_LIBS_CFILES = archive64.c # This list is alphabetized to make it easier to keep in sync # with the decls and initializer in archures.c. ALL_MACHINES = \ + cpu-aarch64.lo \ cpu-alpha.lo \ cpu-arc.lo \ cpu-arm.lo \ @@ -450,6 +451,7 @@ ALL_MACHINES = \ cpu-z8k.lo ALL_MACHINES_CFILES = \ + cpu-aarch64.c \ cpu-alpha.c \ cpu-arc.c \ cpu-arm.c \ @@ -914,6 +916,7 @@ BFD32_BACKENDS_CFILES = \ # elf32-ia64.c requires a 64-bit bfd_vma, and hence can not be put in # BFD32_BACKENDS. BFD64_BACKENDS = \ + elf64-aarch64.lo \ aix5ppc-core.lo \ aout64.lo \ coff-alpha.lo \ @@ -952,6 +955,7 @@ BFD64_BACKENDS = \ vms-alpha.lo BFD64_BACKENDS_CFILES = \ + elf64-aarch64.c \ aix5ppc-core.c \ aout64.c \ coff-alpha.c \ @@ -1270,6 +1274,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cofflink.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compress.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/corefile.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-aarch64.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-alpha.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-arc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-arm.Plo@am__quote@ @@ -1427,6 +1432,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf32-xstormy16.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf32-xtensa.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf32.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-aarch64.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-alpha.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-gen.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-hppa.Plo@am__quote@ diff --git a/bfd/archures.c b/bfd/archures.c index a23534b346a..3198436af0a 100644 --- a/bfd/archures.c +++ b/bfd/archures.c @@ -461,6 +461,8 @@ DESCRIPTION .#define bfd_mach_tilepro 1 .#define bfd_mach_tilegx 1 .#define bfd_mach_tilegx32 2 +. bfd_arch_aarch64, {* AArch64 *} +.#define bfd_mach_aarch64 0 . bfd_arch_last . }; */ @@ -505,6 +507,7 @@ DESCRIPTION . */ +extern const bfd_arch_info_type bfd_aarch64_arch; extern const bfd_arch_info_type bfd_alpha_arch; extern const bfd_arch_info_type bfd_arc_arch; extern const bfd_arch_info_type bfd_arm_arch; @@ -590,6 +593,7 @@ static const bfd_arch_info_type * const bfd_archures_list[] = #ifdef SELECT_ARCHITECTURES SELECT_ARCHITECTURES, #else + &bfd_aarch64_arch, &bfd_alpha_arch, &bfd_arc_arch, &bfd_arm_arch, diff --git a/bfd/bfd-in.h b/bfd/bfd-in.h index 5300b14dfce..a8c2db1daab 100644 --- a/bfd/bfd-in.h +++ b/bfd/bfd-in.h @@ -928,6 +928,32 @@ extern unsigned int _bfd_elf_ppc_at_tls_transform extern unsigned int _bfd_elf_ppc_at_tprel_transform (unsigned int, unsigned int); +extern void bfd_elf64_aarch64_init_maps + (bfd *); + +void bfd_elf64_aarch64_set_options + (bfd *, struct bfd_link_info *, int, int, int); + +/* ELF AArch64 mapping symbol support. */ +#define BFD_AARCH64_SPECIAL_SYM_TYPE_MAP (1 << 0) +#define BFD_AARCH64_SPECIAL_SYM_TYPE_TAG (1 << 1) +#define BFD_AARCH64_SPECIAL_SYM_TYPE_OTHER (1 << 2) +#define BFD_AARCH64_SPECIAL_SYM_TYPE_ANY (~0) +extern bfd_boolean bfd_is_aarch64_special_symbol_name + (const char * name, int type); + +/* AArch64 stub generation support. Called from the linker. */ +extern int elf64_aarch64_setup_section_lists + (bfd *, struct bfd_link_info *); +extern void elf64_aarch64_next_input_section + (struct bfd_link_info *, struct bfd_section *); +extern bfd_boolean elf64_aarch64_size_stubs + (bfd *, bfd *, struct bfd_link_info *, bfd_signed_vma, + struct bfd_section * (*) (const char *, struct bfd_section *), + void (*) (void)); +extern bfd_boolean elf64_aarch64_build_stubs + (struct bfd_link_info *); + /* TI COFF load page support. */ extern void bfd_ticoff_set_section_load_page (struct bfd_section *, int); diff --git a/bfd/bfd-in2.h b/bfd/bfd-in2.h index c7ce4cc67a6..e496083d6eb 100644 --- a/bfd/bfd-in2.h +++ b/bfd/bfd-in2.h @@ -935,6 +935,32 @@ extern unsigned int _bfd_elf_ppc_at_tls_transform extern unsigned int _bfd_elf_ppc_at_tprel_transform (unsigned int, unsigned int); +extern void bfd_elf64_aarch64_init_maps + (bfd *); + +void bfd_elf64_aarch64_set_options + (bfd *, struct bfd_link_info *, int, int, int); + +/* ELF AArch64 mapping symbol support. */ +#define BFD_AARCH64_SPECIAL_SYM_TYPE_MAP (1 << 0) +#define BFD_AARCH64_SPECIAL_SYM_TYPE_TAG (1 << 1) +#define BFD_AARCH64_SPECIAL_SYM_TYPE_OTHER (1 << 2) +#define BFD_AARCH64_SPECIAL_SYM_TYPE_ANY (~0) +extern bfd_boolean bfd_is_aarch64_special_symbol_name + (const char * name, int type); + +/* AArch64 stub generation support. Called from the linker. */ +extern int elf64_aarch64_setup_section_lists + (bfd *, struct bfd_link_info *); +extern void elf64_aarch64_next_input_section + (struct bfd_link_info *, struct bfd_section *); +extern bfd_boolean elf64_aarch64_size_stubs + (bfd *, bfd *, struct bfd_link_info *, bfd_signed_vma, + struct bfd_section * (*) (const char *, struct bfd_section *), + void (*) (void)); +extern bfd_boolean elf64_aarch64_build_stubs + (struct bfd_link_info *); + /* TI COFF load page support. */ extern void bfd_ticoff_set_section_load_page (struct bfd_section *, int); @@ -2164,6 +2190,8 @@ enum bfd_architecture #define bfd_mach_tilepro 1 #define bfd_mach_tilegx 1 #define bfd_mach_tilegx32 2 + bfd_arch_aarch64, /* AArch64 */ +#define bfd_mach_aarch64 0 bfd_arch_last }; @@ -5040,6 +5068,220 @@ value in a word. The relocation is relative offset from */ the dynamic object into the runtime process image. */ BFD_RELOC_MICROBLAZE_COPY, +/* AArch64 ADD immediate instruction, holding bits 0 to 11 of the address. +Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. */ + BFD_RELOC_AARCH64_ADD_LO12, + +/* Get to the page base of the global offset table entry for a symbol as +part of an ADRP instruction using a 21 bit PC relative value.Used in +conjunction with BFD_RELOC_AARCH64_LD64_GOT_LO12_NC. */ + BFD_RELOC_AARCH64_ADR_GOT_PAGE, + +/* AArch64 ADRP instruction, with bits 12 to 32 of a pc-relative page +offset, giving a 4KB aligned page base address. */ + BFD_RELOC_AARCH64_ADR_HI21_PCREL, + +/* AArch64 ADRP instruction, with bits 12 to 32 of a pc-relative page +offset, giving a 4KB aligned page base address, but with no overflow +checking. */ + BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, + +/* AArch64 ADR instruction, holding a simple 21 bit pc-relative byte offset. */ + BFD_RELOC_AARCH64_ADR_LO21_PCREL, + +/* AArch64 19 bit pc-relative conditional branch and compare & branch. +The lowest two bits must be zero and are not stored in the instruction, +giving a 21 bit signed byte offset. */ + BFD_RELOC_AARCH64_BRANCH19, + +/* AArch64 26 bit pc-relative unconditional branch and link. +The lowest two bits must be zero and are not stored in the instruction, +giving a 28 bit signed byte offset. */ + BFD_RELOC_AARCH64_CALL26, + +/* AArch64 pseudo relocation code to be used internally by the AArch64 +assembler and not (currently) written to any object files. */ + BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP, + +/* AArch64 26 bit pc-relative unconditional branch. +The lowest two bits must be zero and are not stored in the instruction, +giving a 28 bit signed byte offset. */ + BFD_RELOC_AARCH64_JUMP26, + +/* AArch64 Load Literal instruction, holding a 19 bit pc-relative word +offset. The lowest two bits must be zero and are not stored in the +instruction, giving a 21 bit signed byte offset. */ + BFD_RELOC_AARCH64_LD_LO19_PCREL, + +/* Unsigned 12 bit byte offset for 64 bit load/store from the page of +the GOT entry for this symbol. Used in conjunction with +BFD_RELOC_AARCH64_ADR_GOTPAGE. */ + BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, + +/* AArch64 unspecified load/store instruction, holding bits 0 to 11 of the +address. Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. */ + BFD_RELOC_AARCH64_LDST_LO12, + +/* AArch64 8-bit load/store instruction, holding bits 0 to 11 of the +address. Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. */ + BFD_RELOC_AARCH64_LDST8_LO12, + +/* AArch64 16-bit load/store instruction, holding bits 0 to 11 of the +address. Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. */ + BFD_RELOC_AARCH64_LDST16_LO12, + +/* AArch64 32-bit load/store instruction, holding bits 0 to 11 of the +address. Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. */ + BFD_RELOC_AARCH64_LDST32_LO12, + +/* AArch64 64-bit load/store instruction, holding bits 0 to 11 of the +address. Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. */ + BFD_RELOC_AARCH64_LDST64_LO12, + +/* AArch64 128-bit load/store instruction, holding bits 0 to 11 of the +address. Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. */ + BFD_RELOC_AARCH64_LDST128_LO12, + +/* AArch64 MOV[NZK] instruction with most significant bits 0 to 15 +of an unsigned address/value. */ + BFD_RELOC_AARCH64_MOVW_G0, + +/* AArch64 MOV[NZ] instruction with most significant bits 0 to 15 +of a signed value. Changes instruction to MOVZ or MOVN depending on the +value's sign. */ + BFD_RELOC_AARCH64_MOVW_G0_S, + +/* AArch64 MOV[NZK] instruction with less significant bits 0 to 15 of +an address/value. No overflow checking. */ + BFD_RELOC_AARCH64_MOVW_G0_NC, + +/* AArch64 MOV[NZK] instruction with most significant bits 16 to 31 +of an unsigned address/value. */ + BFD_RELOC_AARCH64_MOVW_G1, + +/* AArch64 MOV[NZK] instruction with less significant bits 16 to 31 +of an address/value. No overflow checking. */ + BFD_RELOC_AARCH64_MOVW_G1_NC, + +/* AArch64 MOV[NZ] instruction with most significant bits 16 to 31 +of a signed value. Changes instruction to MOVZ or MOVN depending on the +value's sign. */ + BFD_RELOC_AARCH64_MOVW_G1_S, + +/* AArch64 MOV[NZK] instruction with most significant bits 32 to 47 +of an unsigned address/value. */ + BFD_RELOC_AARCH64_MOVW_G2, + +/* AArch64 MOV[NZK] instruction with less significant bits 32 to 47 +of an address/value. No overflow checking. */ + BFD_RELOC_AARCH64_MOVW_G2_NC, + +/* AArch64 MOV[NZ] instruction with most significant bits 32 to 47 +of a signed value. Changes instruction to MOVZ or MOVN depending on the +value's sign. */ + BFD_RELOC_AARCH64_MOVW_G2_S, + +/* AArch64 MOV[NZK] instruction with most signficant bits 48 to 64 +of a signed or unsigned address/value. */ + BFD_RELOC_AARCH64_MOVW_G3, + +/* AArch64 TLS relocation. */ + BFD_RELOC_AARCH64_TLSDESC, + +/* AArch64 TLS DESC relocation. */ + BFD_RELOC_AARCH64_TLSDESC_ADD, + +/* AArch64 TLS DESC relocation. */ + BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, + +/* AArch64 TLS DESC relocation. */ + BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE, + +/* AArch64 TLS DESC relocation. */ + BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, + +/* AArch64 TLS DESC relocation. */ + BFD_RELOC_AARCH64_TLSDESC_CALL, + +/* AArch64 TLS DESC relocation. */ + BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, + +/* AArch64 TLS DESC relocation. */ + BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19, + +/* AArch64 TLS DESC relocation. */ + BFD_RELOC_AARCH64_TLSDESC_LDR, + +/* AArch64 TLS DESC relocation. */ + BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, + +/* AArch64 TLS DESC relocation. */ + BFD_RELOC_AARCH64_TLSDESC_OFF_G1, + +/* Unsigned 12 bit byte offset to global offset table entry for a symbols +tls_index structure. Used in conjunction with +BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21. */ + BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, + +/* Get to the page base of the global offset table entry for a symbols +tls_index structure as part of an adrp instruction using a 21 bit PC +relative value. Used in conjunction with +BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC. */ + BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, + +/* AArch64 TLS INITIAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, + +/* AArch64 TLS INITIAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19, + +/* AArch64 TLS INITIAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, + +/* AArch64 TLS INITIAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, + +/* AArch64 TLS INITIAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1, + +/* AArch64 TLS LOCAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, + +/* AArch64 TLS LOCAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, + +/* AArch64 TLS LOCAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC, + +/* AArch64 TLS LOCAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, + +/* AArch64 TLS LOCAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC, + +/* AArch64 TLS LOCAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, + +/* AArch64 TLS LOCAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC, + +/* AArch64 TLS LOCAL EXEC relocation. */ + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, + +/* AArch64 TLS relocation. */ + BFD_RELOC_AARCH64_TLS_DTPMOD64, + +/* AArch64 TLS relocation. */ + BFD_RELOC_AARCH64_TLS_DTPREL64, + +/* AArch64 TLS relocation. */ + BFD_RELOC_AARCH64_TLS_TPREL64, + +/* AArch64 14 bit pc-relative test bit and branch. +The lowest two bits must be zero and are not stored in the instruction, +giving a 16 bit signed byte offset. */ + BFD_RELOC_AARCH64_TSTBR14, + /* Tilera TILEPro Relocations. */ BFD_RELOC_TILEPRO_COPY, BFD_RELOC_TILEPRO_GLOB_DAT, diff --git a/bfd/config.bfd b/bfd/config.bfd index 783d1f3065f..4b9ee4ac14a 100644 --- a/bfd/config.bfd +++ b/bfd/config.bfd @@ -69,6 +69,7 @@ esac targ_cpu=`echo $targ | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` case "${targ_cpu}" in +aarch64*) targ_archs="bfd_aarch64_arch bfd_arm_arch";; alpha*) targ_archs=bfd_alpha_arch ;; am34*|am33_2.0*) targ_archs=bfd_mn10300_arch ;; arm*) targ_archs=bfd_arm_arch ;; @@ -143,6 +144,26 @@ case "${targ}" in # START OF targmatch.h #ifdef BFD64 + aarch64-*-elf) + targ_defvec=bfd_elf64_littleaarch64_vec + targ_selvecs="bfd_elf64_bigaarch64_vec bfd_elf32_littlearm_vec bfd_elf32_bigarm_vec" + want64=true + ;; + aarch64_be-*-elf) + targ_defvec=bfd_elf64_bigaarch64_vec + targ_selvecs="bfd_elf64_littleaarch64_vec bfd_elf32_bigarm_vec bfd_elf32_littlearm_vec" + want64=true + ;; + aarch64-*-linux*) + targ_defvec=bfd_elf64_littleaarch64_vec + targ_selvecs="bfd_elf64_bigaarch64_vec bfd_elf32_littlearm_vec bfd_elf32_bigarm_vec" + want64=true + ;; + aarch64_be-*-linux*) + targ_defvec=bfd_elf64_bigaarch64_vec + targ_selvecs="bfd_elf64_littleaarch64_vec bfd_elf32_bigarm_vec bfd_elf32_littlearm_vec" + want64=true + ;; alpha*-*-freebsd* | alpha*-*-kfreebsd*-gnu) targ_defvec=bfd_elf64_alpha_freebsd_vec targ_selvecs="bfd_elf64_alpha_vec ecoffalpha_little_vec" diff --git a/bfd/configure b/bfd/configure index 63f34ab9246..1c3cbd3dd0b 100755 --- a/bfd/configure +++ b/bfd/configure @@ -15342,6 +15342,7 @@ do bfd_elf32_xtensa_be_vec) tb="$tb xtensa-isa.lo xtensa-modules.lo elf32-xtensa.lo elf32.lo $elf" ;; bfd_elf64_alpha_freebsd_vec) tb="$tb elf64-alpha.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_alpha_vec) tb="$tb elf64-alpha.lo elf64.lo $elf"; target_size=64 ;; + bfd_elf64_bigaarch64_vec) tb="$tb elf64-aarch64.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_big_generic_vec) tb="$tb elf64-gen.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_bigmips_vec) tb="$tb elf64-mips.lo elf64.lo elfxx-mips.lo elf-vxworks.lo elf32.lo $elf ecofflink.lo"; target_size=64 ;; bfd_elf64_hppa_linux_vec) tb="$tb elf64-hppa.lo elf64.lo $elf"; target_size=64 ;; @@ -15350,6 +15351,7 @@ do bfd_elf64_ia64_hpux_big_vec) tb="$tb elf64-ia64.lo elfxx-ia64.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_ia64_little_vec) tb="$tb elf64-ia64.lo elfxx-ia64.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_ia64_vms_vec) tb="$tb elf64-ia64-vms.lo elf64-ia64.lo elfxx-ia64.lo elf64.lo vms-lib.lo vms-misc.lo $elf"; target_size=64 ;; + bfd_elf64_littleaarch64_vec)tb="$tb elf64-aarch64.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_little_generic_vec) tb="$tb elf64-gen.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_littlemips_vec) tb="$tb elf64-mips.lo elf64.lo elfxx-mips.lo elf-vxworks.lo elf32.lo $elf ecofflink.lo"; target_size=64 ;; bfd_elf64_mmix_vec) tb="$tb elf64-mmix.lo elf64.lo $elf" target_size=64 ;; diff --git a/bfd/configure.in b/bfd/configure.in index 97fff619292..36261737a70 100644 --- a/bfd/configure.in +++ b/bfd/configure.in @@ -832,6 +832,7 @@ do bfd_elf32_xtensa_be_vec) tb="$tb xtensa-isa.lo xtensa-modules.lo elf32-xtensa.lo elf32.lo $elf" ;; bfd_elf64_alpha_freebsd_vec) tb="$tb elf64-alpha.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_alpha_vec) tb="$tb elf64-alpha.lo elf64.lo $elf"; target_size=64 ;; + bfd_elf64_bigaarch64_vec) tb="$tb elf64-aarch64.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_big_generic_vec) tb="$tb elf64-gen.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_bigmips_vec) tb="$tb elf64-mips.lo elf64.lo elfxx-mips.lo elf-vxworks.lo elf32.lo $elf ecofflink.lo"; target_size=64 ;; bfd_elf64_hppa_linux_vec) tb="$tb elf64-hppa.lo elf64.lo $elf"; target_size=64 ;; @@ -840,6 +841,7 @@ do bfd_elf64_ia64_hpux_big_vec) tb="$tb elf64-ia64.lo elfxx-ia64.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_ia64_little_vec) tb="$tb elf64-ia64.lo elfxx-ia64.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_ia64_vms_vec) tb="$tb elf64-ia64-vms.lo elf64-ia64.lo elfxx-ia64.lo elf64.lo vms-lib.lo vms-misc.lo $elf"; target_size=64 ;; + bfd_elf64_littleaarch64_vec)tb="$tb elf64-aarch64.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_little_generic_vec) tb="$tb elf64-gen.lo elf64.lo $elf"; target_size=64 ;; bfd_elf64_littlemips_vec) tb="$tb elf64-mips.lo elf64.lo elfxx-mips.lo elf-vxworks.lo elf32.lo $elf ecofflink.lo"; target_size=64 ;; bfd_elf64_mmix_vec) tb="$tb elf64-mmix.lo elf64.lo $elf" target_size=64 ;; diff --git a/bfd/cpu-aarch64.c b/bfd/cpu-aarch64.c new file mode 100644 index 00000000000..81620371ffe --- /dev/null +++ b/bfd/cpu-aarch64.c @@ -0,0 +1,121 @@ +/* BFD support for AArch64. + Copyright 2009, 2010, 2011, 2012 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of BFD, the Binary File Descriptor library. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING3. If not, + see . */ + +#include "sysdep.h" +#include "bfd.h" +#include "libbfd.h" +#include "libiberty.h" + +/* This routine is provided two arch_infos and works out which Aarch64 + machine which would be compatible with both and returns a pointer + to its info structure. */ + +static const bfd_arch_info_type * +compatible (const bfd_arch_info_type * a, const bfd_arch_info_type * b) +{ + /* If a & b are for different architecture we can do nothing. */ + if (a->arch != b->arch) + return NULL; + + /* If a & b are for the same machine then all is well. */ + if (a->mach == b->mach) + return a; + + /* Otherwise if either a or b is the 'default' machine + then it can be polymorphed into the other. */ + if (a->the_default) + return b; + + if (b->the_default) + return a; + + /* So far all newer cores are + supersets of previous cores. */ + if (a->mach < b->mach) + return b; + else if (a->mach > b->mach) + return a; + + /* Never reached! */ + return NULL; +} + +static struct +{ + unsigned int mach; + char *name; +} +processors[] = +{ + /* These two are example CPUs supported in GCC, once we have real + CPUs they will be removed. */ + { bfd_mach_aarch64, "example-1" }, + { bfd_mach_aarch64, "example-2" } +}; + +static bfd_boolean +scan (const struct bfd_arch_info *info, const char *string) +{ + int i; + + /* First test for an exact match. */ + if (strcasecmp (string, info->printable_name) == 0) + return TRUE; + + /* Next check for a processor name instead of an Architecture name. */ + for (i = sizeof (processors) / sizeof (processors[0]); i--;) + { + if (strcasecmp (string, processors[i].name) == 0) + break; + } + + if (i != -1 && info->mach == processors[i].mach) + return TRUE; + + /* Finally check for the default architecture. */ + if (strcasecmp (string, "aarch64") == 0) + return info->the_default; + + return FALSE; +} + +#define N(NUMBER, PRINT, DEFAULT, NEXT) \ + { 64, 64, 8, bfd_arch_aarch64, NUMBER, \ + "aarch64", PRINT, 4, DEFAULT, compatible, scan, \ + bfd_arch_default_fill, NEXT } + +const bfd_arch_info_type bfd_aarch64_arch = + N (0, "aarch64", TRUE, NULL); + + +bfd_boolean +bfd_is_aarch64_special_symbol_name (const char *name, int type) +{ + if (!name || name[0] != '$') + return FALSE; + if (name[1] == 'x' || name[1] == 'd') + type &= BFD_AARCH64_SPECIAL_SYM_TYPE_MAP; + else if (name[1] == 'm' || name[1] == 'f' || name[1] == 'p') + type &= BFD_AARCH64_SPECIAL_SYM_TYPE_TAG; + else + return FALSE; + + return (type != 0 && (name[2] == 0 || name[2] == '.')); +} diff --git a/bfd/doc/Makefile.in b/bfd/doc/Makefile.in index 68e17f72d8e..7ba351d742b 100644 --- a/bfd/doc/Makefile.in +++ b/bfd/doc/Makefile.in @@ -43,7 +43,8 @@ subdir = doc DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(bfd_TEXINFOS) ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \ +am__aclocal_m4_deps = $(top_srcdir)/../bfd/bfd.m4 \ + $(top_srcdir)/../bfd/warning.m4 $(top_srcdir)/../config/acx.m4 \ $(top_srcdir)/../config/depstand.m4 \ $(top_srcdir)/../config/gettext-sister.m4 \ $(top_srcdir)/../config/largefile.m4 \ @@ -56,7 +57,6 @@ am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \ $(top_srcdir)/../config/stdint.m4 $(top_srcdir)/../libtool.m4 \ $(top_srcdir)/../ltoptions.m4 $(top_srcdir)/../ltsugar.m4 \ $(top_srcdir)/../ltversion.m4 $(top_srcdir)/../lt~obsolete.m4 \ - $(top_srcdir)/bfd.m4 $(top_srcdir)/warning.m4 \ $(top_srcdir)/acinclude.m4 $(top_srcdir)/../config/zlib.m4 \ $(top_srcdir)/configure.in am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ diff --git a/bfd/elf-bfd.h b/bfd/elf-bfd.h index ff0e615cfdc..6a44912fd48 100644 --- a/bfd/elf-bfd.h +++ b/bfd/elf-bfd.h @@ -402,7 +402,8 @@ struct eh_frame_hdr_info one line. */ enum elf_target_id { - ALPHA_ELF_DATA = 1, + AARCH64_ELF_DATA = 1, + ALPHA_ELF_DATA, ARM_ELF_DATA, AVR_ELF_DATA, BFIN_ELF_DATA, diff --git a/bfd/elf64-aarch64.c b/bfd/elf64-aarch64.c new file mode 100644 index 00000000000..629907403c1 --- /dev/null +++ b/bfd/elf64-aarch64.c @@ -0,0 +1,7016 @@ +/* ELF support for AArch64. + Copyright 2009, 2010, 2011, 2012 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of BFD, the Binary File Descriptor library. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING3. If not, + see . */ + +/* Notes on implementation: + + Thread Local Store (TLS) + + Overview: + + The implementation currently supports both traditional TLS and TLS + descriptors, but only general dynamic (GD). + + For traditional TLS the assembler will present us with code + fragments of the form: + + adrp x0, :tlsgd:foo + R_AARCH64_TLSGD_ADR_PAGE21(foo) + add x0, :tlsgd_lo12:foo + R_AARCH64_TLSGD_ADD_LO12_NC(foo) + bl __tls_get_addr + nop + + For TLS descriptors the assembler will present us with code + fragments of the form: + + adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE(foo) + ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo) + add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo) + .tlsdesccall foo + blr x1 R_AARCH64_TLSDESC_CALL(foo) + + The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo + indicate that foo is thread local and should be accessed via the + traditional TLS mechanims. + + The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} + against foo indicate that 'foo' is thread local and should be accessed + via a TLS descriptor mechanism. + + The precise instruction sequence is only relevant from the + perspective of linker relaxation which is currently not implemented. + + The static linker must detect that 'foo' is a TLS object and + allocate a double GOT entry. The GOT entry must be created for both + global and local TLS symbols. Note that this is different to none + TLS local objects which do not need a GOT entry. + + In the traditional TLS mechanism, the double GOT entry is used to + provide the tls_index structure, containing module and offset + entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64 + on the module entry. The loader will subsequently fixup this + relocation with the module identity. + + For global traditional TLS symbols the static linker places an + R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader + will subsequently fixup the offset. For local TLS symbols the static + linker fixes up offset. + + In the TLS descriptor mechanism the double GOT entry is used to + provide the descriptor. The static linker places the relocation + R_AARCH64_TLSDESC on the first GOT slot. The loader will + subsequently fix this up. + + Implementation: + + The handling of TLS symbols is implemented across a number of + different backend functions. The following is a top level view of + what processing is performed where. + + The TLS implementation maintains state information for each TLS + symbol. The state information for local and global symbols is kept + in different places. Global symbols use generic BFD structures while + local symbols use backend specific structures that are allocated and + maintained entirely by the backend. + + The flow: + + aarch64_check_relocs() + + This function is invoked for each relocation. + + The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and + R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are + spotted. One time creation of local symbol data structures are + created when the first local symbol is seen. + + The reference count for a symbol is incremented. The GOT type for + each symbol is marked as general dynamic. + + elf64_aarch64_allocate_dynrelocs () + + For each global with positive reference count we allocate a double + GOT slot. For a traditional TLS symbol we allocate space for two + relocation entries on the GOT, for a TLS descriptor symbol we + allocate space for one relocation on the slot. Record the GOT offset + for this symbol. + + elf64_aarch64_size_dynamic_sections () + + Iterate all input BFDS, look for in the local symbol data structure + constructed earlier for local TLS symbols and allocate them double + GOT slots along with space for a single GOT relocation. Update the + local symbol structure to record the GOT offset allocated. + + elf64_aarch64_relocate_section () + + Calls elf64_aarch64_final_link_relocate () + + Emit the relevant TLS relocations against the GOT for each TLS + symbol. For local TLS symbols emit the GOT offset directly. The GOT + relocations are emitted once the first time a TLS symbol is + encountered. The implementation uses the LSB of the GOT offset to + flag that the relevant GOT relocations for a symbol have been + emitted. All of the TLS code that uses the GOT offset needs to take + care to mask out this flag bit before using the offset. + + elf64_aarch64_final_link_relocate () + + Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */ + +#include "sysdep.h" +#include "bfd.h" +#include "libiberty.h" +#include "libbfd.h" +#include "bfd_stdint.h" +#include "elf-bfd.h" +#include "bfdlink.h" +#include "elf/aarch64.h" + +static bfd_reloc_status_type +bfd_elf_aarch64_put_addend (bfd *abfd, + bfd_byte *address, + reloc_howto_type *howto, bfd_signed_vma addend); + +#define IS_AARCH64_TLS_RELOC(R_TYPE) \ + ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \ + || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \ + || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \ + || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \ + || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \ + || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \ + || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \ + || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \ + || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \ + || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \ + || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \ + || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \ + || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \ + || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \ + || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \ + || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \ + || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \ + || (R_TYPE) == R_AARCH64_TLS_TPREL64 \ + || IS_AARCH64_TLSDESC_RELOC ((R_TYPE))) + +#define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \ + ((R_TYPE) == R_AARCH64_TLSDESC_LD64_PREL19 \ + || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \ + || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE \ + || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \ + || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \ + || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \ + || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \ + || (R_TYPE) == R_AARCH64_TLSDESC_LDR \ + || (R_TYPE) == R_AARCH64_TLSDESC_ADD \ + || (R_TYPE) == R_AARCH64_TLSDESC_CALL \ + || (R_TYPE) == R_AARCH64_TLSDESC) + +#define ELIMINATE_COPY_RELOCS 0 + +/* Return the relocation section associated with NAME. HTAB is the + bfd's elf64_aarch64_link_hash_entry. */ +#define RELOC_SECTION(HTAB, NAME) \ + ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME) + +/* Return size of a relocation entry. HTAB is the bfd's + elf64_aarch64_link_hash_entry. */ +#define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela)) + +/* Return function to swap relocations in. HTAB is the bfd's + elf64_aarch64_link_hash_entry. */ +#define SWAP_RELOC_IN(HTAB) (bfd_elf64_swap_reloca_in) + +/* Return function to swap relocations out. HTAB is the bfd's + elf64_aarch64_link_hash_entry. */ +#define SWAP_RELOC_OUT(HTAB) (bfd_elf64_swap_reloca_out) + +/* GOT Entry size - 8 bytes. */ +#define GOT_ENTRY_SIZE (8) +#define PLT_ENTRY_SIZE (32) +#define PLT_SMALL_ENTRY_SIZE (16) +#define PLT_TLSDESC_ENTRY_SIZE (32) + +/* Take the PAGE component of an address or offset. */ +#define PG(x) ((x) & ~ 0xfff) +#define PG_OFFSET(x) ((x) & 0xfff) + +/* Encoding of the nop instruction */ +#define INSN_NOP 0xd503201f + +#define aarch64_compute_jump_table_size(htab) \ + (((htab)->root.srelplt == NULL) ? 0 \ + : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE) + +/* The first entry in a procedure linkage table looks like this + if the distance between the PLTGOT and the PLT is < 4GB use + these PLT entries. Note that the dynamic linker gets &PLTGOT[2] + in x16 and needs to work out PLTGOT[1] by using an address of + [x16,#-8]. */ +static const bfd_byte elf64_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] = +{ + 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */ + 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */ + 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */ + 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */ + 0x20, 0x02, 0x1f, 0xd6, /* br x17 */ + 0x1f, 0x20, 0x03, 0xd5, /* nop */ + 0x1f, 0x20, 0x03, 0xd5, /* nop */ + 0x1f, 0x20, 0x03, 0xd5, /* nop */ +}; + +/* Per function entry in a procedure linkage table looks like this + if the distance between the PLTGOT and the PLT is < 4GB use + these PLT entries. */ +static const bfd_byte elf64_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] = +{ + 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */ + 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */ + 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */ + 0x20, 0x02, 0x1f, 0xd6, /* br x17. */ +}; + +static const bfd_byte +elf64_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] = +{ + 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */ + 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */ + 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */ + 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */ + 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */ + 0x40, 0x00, 0x1F, 0xD6, /* br x2 */ + 0x1f, 0x20, 0x03, 0xd5, /* nop */ + 0x1f, 0x20, 0x03, 0xd5, /* nop */ +}; + +#define elf_info_to_howto elf64_aarch64_info_to_howto +#define elf_info_to_howto_rel elf64_aarch64_info_to_howto + +#define AARCH64_ELF_ABI_VERSION 0 +#define AARCH64_ELF_OS_ABI_VERSION 0 + +/* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */ +#define ALL_ONES (~ (bfd_vma) 0) + +static reloc_howto_type elf64_aarch64_howto_none = + HOWTO (R_AARCH64_NONE, /* type */ + 0, /* rightshift */ + 0, /* size (0 = byte, 1 = short, 2 = long) */ + 0, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont,/* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_NONE", /* name */ + FALSE, /* partial_inplace */ + 0, /* src_mask */ + 0, /* dst_mask */ + FALSE); /* pcrel_offset */ + +static reloc_howto_type elf64_aarch64_howto_dynrelocs[] = +{ + HOWTO (R_AARCH64_COPY, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 64, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_bitfield, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_COPY", /* name */ + TRUE, /* partial_inplace */ + 0xffffffff, /* src_mask */ + 0xffffffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_GLOB_DAT, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 64, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_bitfield, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_GLOB_DAT", /* name */ + TRUE, /* partial_inplace */ + 0xffffffff, /* src_mask */ + 0xffffffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_JUMP_SLOT, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 64, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_bitfield, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_JUMP_SLOT", /* name */ + TRUE, /* partial_inplace */ + 0xffffffff, /* src_mask */ + 0xffffffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_RELATIVE, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 64, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_bitfield, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_RELATIVE", /* name */ + TRUE, /* partial_inplace */ + ALL_ONES, /* src_mask */ + ALL_ONES, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLS_DTPMOD64, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 64, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLS_DTPMOD64", /* name */ + FALSE, /* partial_inplace */ + 0, /* src_mask */ + ALL_ONES, /* dst_mask */ + FALSE), /* pc_reloffset */ + + HOWTO (R_AARCH64_TLS_DTPREL64, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 64, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLS_DTPREL64", /* name */ + FALSE, /* partial_inplace */ + 0, /* src_mask */ + ALL_ONES, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLS_TPREL64, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 64, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLS_TPREL64", /* name */ + FALSE, /* partial_inplace */ + 0, /* src_mask */ + ALL_ONES, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSDESC, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 64, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSDESC", /* name */ + FALSE, /* partial_inplace */ + 0, /* src_mask */ + ALL_ONES, /* dst_mask */ + FALSE), /* pcrel_offset */ + +}; + +/* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g. + R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO + in that slot. */ + +static reloc_howto_type elf64_aarch64_howto_table[] = +{ + /* Basic data relocations. */ + + HOWTO (R_AARCH64_NULL, /* type */ + 0, /* rightshift */ + 0, /* size (0 = byte, 1 = short, 2 = long) */ + 0, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_NULL", /* name */ + FALSE, /* partial_inplace */ + 0, /* src_mask */ + 0, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* .xword: (S+A) */ + HOWTO (R_AARCH64_ABS64, /* type */ + 0, /* rightshift */ + 4, /* size (4 = long long) */ + 64, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_unsigned, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_ABS64", /* name */ + FALSE, /* partial_inplace */ + ALL_ONES, /* src_mask */ + ALL_ONES, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* .word: (S+A) */ + HOWTO (R_AARCH64_ABS32, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 32, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_unsigned, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_ABS32", /* name */ + FALSE, /* partial_inplace */ + 0xffffffff, /* src_mask */ + 0xffffffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* .half: (S+A) */ + HOWTO (R_AARCH64_ABS16, /* type */ + 0, /* rightshift */ + 1, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_unsigned, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_ABS16", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* .xword: (S+A-P) */ + HOWTO (R_AARCH64_PREL64, /* type */ + 0, /* rightshift */ + 4, /* size (4 = long long) */ + 64, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_PREL64", /* name */ + FALSE, /* partial_inplace */ + ALL_ONES, /* src_mask */ + ALL_ONES, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* .word: (S+A-P) */ + HOWTO (R_AARCH64_PREL32, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 32, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_PREL32", /* name */ + FALSE, /* partial_inplace */ + 0xffffffff, /* src_mask */ + 0xffffffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* .half: (S+A-P) */ + HOWTO (R_AARCH64_PREL16, /* type */ + 0, /* rightshift */ + 1, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_PREL16", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* Group relocations to create a 16, 32, 48 or 64 bit + unsigned data or abs address inline. */ + + /* MOVZ: ((S+A) >> 0) & 0xffff */ + HOWTO (R_AARCH64_MOVW_UABS_G0, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_unsigned, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_MOVW_UABS_G0", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */ + HOWTO (R_AARCH64_MOVW_UABS_G0_NC, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_MOVW_UABS_G0_NC", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* MOVZ: ((S+A) >> 16) & 0xffff */ + HOWTO (R_AARCH64_MOVW_UABS_G1, /* type */ + 16, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_unsigned, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_MOVW_UABS_G1", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */ + HOWTO (R_AARCH64_MOVW_UABS_G1_NC, /* type */ + 16, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_MOVW_UABS_G1_NC", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* MOVZ: ((S+A) >> 32) & 0xffff */ + HOWTO (R_AARCH64_MOVW_UABS_G2, /* type */ + 32, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_unsigned, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_MOVW_UABS_G2", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */ + HOWTO (R_AARCH64_MOVW_UABS_G2_NC, /* type */ + 32, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_MOVW_UABS_G2_NC", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* MOVZ: ((S+A) >> 48) & 0xffff */ + HOWTO (R_AARCH64_MOVW_UABS_G3, /* type */ + 48, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_unsigned, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_MOVW_UABS_G3", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* Group relocations to create high part of a 16, 32, 48 or 64 bit + signed data or abs address inline. Will change instruction + to MOVN or MOVZ depending on sign of calculated value. */ + + /* MOV[ZN]: ((S+A) >> 0) & 0xffff */ + HOWTO (R_AARCH64_MOVW_SABS_G0, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_MOVW_SABS_G0", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* MOV[ZN]: ((S+A) >> 16) & 0xffff */ + HOWTO (R_AARCH64_MOVW_SABS_G1, /* type */ + 16, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_MOVW_SABS_G1", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* MOV[ZN]: ((S+A) >> 32) & 0xffff */ + HOWTO (R_AARCH64_MOVW_SABS_G2, /* type */ + 32, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_MOVW_SABS_G2", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + +/* Relocations to generate 19, 21 and 33 bit PC-relative load/store + addresses: PG(x) is (x & ~0xfff). */ + + /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */ + HOWTO (R_AARCH64_LD_PREL_LO19, /* type */ + 2, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 19, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_LD_PREL_LO19", /* name */ + FALSE, /* partial_inplace */ + 0x7ffff, /* src_mask */ + 0x7ffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* ADR: (S+A-P) & 0x1fffff */ + HOWTO (R_AARCH64_ADR_PREL_LO21, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 21, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_ADR_PREL_LO21", /* name */ + FALSE, /* partial_inplace */ + 0x1fffff, /* src_mask */ + 0x1fffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */ + HOWTO (R_AARCH64_ADR_PREL_PG_HI21, /* type */ + 12, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 21, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_ADR_PREL_PG_HI21", /* name */ + FALSE, /* partial_inplace */ + 0x1fffff, /* src_mask */ + 0x1fffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */ + HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC, /* type */ + 12, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 21, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */ + FALSE, /* partial_inplace */ + 0x1fffff, /* src_mask */ + 0x1fffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* ADD: (S+A) & 0xfff [no overflow check] */ + HOWTO (R_AARCH64_ADD_ABS_LO12_NC, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 10, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_ADD_ABS_LO12_NC", /* name */ + FALSE, /* partial_inplace */ + 0x3ffc00, /* src_mask */ + 0x3ffc00, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* LD/ST8: (S+A) & 0xfff */ + HOWTO (R_AARCH64_LDST8_ABS_LO12_NC, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_LDST8_ABS_LO12_NC", /* name */ + FALSE, /* partial_inplace */ + 0xfff, /* src_mask */ + 0xfff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* Relocations for control-flow instructions. */ + + /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */ + HOWTO (R_AARCH64_TSTBR14, /* type */ + 2, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 14, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TSTBR14", /* name */ + FALSE, /* partial_inplace */ + 0x3fff, /* src_mask */ + 0x3fff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* B.cond: ((S+A-P) >> 2) & 0x7ffff */ + HOWTO (R_AARCH64_CONDBR19, /* type */ + 2, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 19, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_CONDBR19", /* name */ + FALSE, /* partial_inplace */ + 0x7ffff, /* src_mask */ + 0x7ffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + EMPTY_HOWTO (281), + + /* B: ((S+A-P) >> 2) & 0x3ffffff */ + HOWTO (R_AARCH64_JUMP26, /* type */ + 2, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 26, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_JUMP26", /* name */ + FALSE, /* partial_inplace */ + 0x3ffffff, /* src_mask */ + 0x3ffffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* BL: ((S+A-P) >> 2) & 0x3ffffff */ + HOWTO (R_AARCH64_CALL26, /* type */ + 2, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 26, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_signed, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_CALL26", /* name */ + FALSE, /* partial_inplace */ + 0x3ffffff, /* src_mask */ + 0x3ffffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* LD/ST16: (S+A) & 0xffe */ + HOWTO (R_AARCH64_LDST16_ABS_LO12_NC, /* type */ + 1, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_LDST16_ABS_LO12_NC", /* name */ + FALSE, /* partial_inplace */ + 0xffe, /* src_mask */ + 0xffe, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* LD/ST32: (S+A) & 0xffc */ + HOWTO (R_AARCH64_LDST32_ABS_LO12_NC, /* type */ + 2, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_LDST32_ABS_LO12_NC", /* name */ + FALSE, /* partial_inplace */ + 0xffc, /* src_mask */ + 0xffc, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* LD/ST64: (S+A) & 0xff8 */ + HOWTO (R_AARCH64_LDST64_ABS_LO12_NC, /* type */ + 3, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_LDST64_ABS_LO12_NC", /* name */ + FALSE, /* partial_inplace */ + 0xff8, /* src_mask */ + 0xff8, /* dst_mask */ + FALSE), /* pcrel_offset */ + + EMPTY_HOWTO (287), + EMPTY_HOWTO (288), + EMPTY_HOWTO (289), + EMPTY_HOWTO (290), + EMPTY_HOWTO (291), + EMPTY_HOWTO (292), + EMPTY_HOWTO (293), + EMPTY_HOWTO (294), + EMPTY_HOWTO (295), + EMPTY_HOWTO (296), + EMPTY_HOWTO (297), + EMPTY_HOWTO (298), + + /* LD/ST128: (S+A) & 0xff0 */ + HOWTO (R_AARCH64_LDST128_ABS_LO12_NC, /* type */ + 4, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_LDST128_ABS_LO12_NC", /* name */ + FALSE, /* partial_inplace */ + 0xff0, /* src_mask */ + 0xff0, /* dst_mask */ + FALSE), /* pcrel_offset */ + + EMPTY_HOWTO (300), + EMPTY_HOWTO (301), + EMPTY_HOWTO (302), + EMPTY_HOWTO (303), + EMPTY_HOWTO (304), + EMPTY_HOWTO (305), + EMPTY_HOWTO (306), + EMPTY_HOWTO (307), + EMPTY_HOWTO (308), + EMPTY_HOWTO (309), + EMPTY_HOWTO (310), + + /* Get to the page for the GOT entry for the symbol + (G(S) - P) using an ADRP instruction. */ + HOWTO (R_AARCH64_ADR_GOT_PAGE, /* type */ + 12, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 21, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_ADR_GOT_PAGE", /* name */ + FALSE, /* partial_inplace */ + 0x1fffff, /* src_mask */ + 0x1fffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* LD64: GOT offset G(S) & 0xff8 */ + HOWTO (R_AARCH64_LD64_GOT_LO12_NC, /* type */ + 3, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_LD64_GOT_LO12_NC", /* name */ + FALSE, /* partial_inplace */ + 0xff8, /* src_mask */ + 0xff8, /* dst_mask */ + FALSE) /* pcrel_offset */ +}; + +static reloc_howto_type elf64_aarch64_tls_howto_table[] = +{ + EMPTY_HOWTO (512), + + /* Get to the page for the GOT entry for the symbol + (G(S) - P) using an ADRP instruction. */ + HOWTO (R_AARCH64_TLSGD_ADR_PAGE21, /* type */ + 12, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 21, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSGD_ADR_PAGE21", /* name */ + FALSE, /* partial_inplace */ + 0x1fffff, /* src_mask */ + 0x1fffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */ + HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */ + FALSE, /* partial_inplace */ + 0xfff, /* src_mask */ + 0xfff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + EMPTY_HOWTO (515), + EMPTY_HOWTO (516), + EMPTY_HOWTO (517), + EMPTY_HOWTO (518), + EMPTY_HOWTO (519), + EMPTY_HOWTO (520), + EMPTY_HOWTO (521), + EMPTY_HOWTO (522), + EMPTY_HOWTO (523), + EMPTY_HOWTO (524), + EMPTY_HOWTO (525), + EMPTY_HOWTO (526), + EMPTY_HOWTO (527), + EMPTY_HOWTO (528), + EMPTY_HOWTO (529), + EMPTY_HOWTO (530), + EMPTY_HOWTO (531), + EMPTY_HOWTO (532), + EMPTY_HOWTO (533), + EMPTY_HOWTO (534), + EMPTY_HOWTO (535), + EMPTY_HOWTO (536), + EMPTY_HOWTO (537), + EMPTY_HOWTO (538), + + HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, /* type */ + 16, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 16, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 32, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, /* type */ + 12, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 21, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */ + FALSE, /* partial_inplace */ + 0x1fffff, /* src_mask */ + 0x1fffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, /* type */ + 3, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */ + FALSE, /* partial_inplace */ + 0xff8, /* src_mask */ + 0xff8, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 21, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */ + FALSE, /* partial_inplace */ + 0x1ffffc, /* src_mask */ + 0x1ffffc, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2, /* type */ + 8, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1, /* type */ + 4, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, /* type */ + 4, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12, /* type */ + 3, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */ + FALSE, /* partial_inplace */ + 0xfff, /* src_mask */ + 0xfff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */ + FALSE, /* partial_inplace */ + 0xfff, /* src_mask */ + 0xfff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */ + FALSE, /* partial_inplace */ + 0xfff, /* src_mask */ + 0xfff, /* dst_mask */ + FALSE), /* pcrel_offset */ +}; + +static reloc_howto_type elf64_aarch64_tlsdesc_howto_table[] = +{ + HOWTO (R_AARCH64_TLSDESC_LD64_PREL19, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 21, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSDESC_LD64_PREL19", /* name */ + FALSE, /* partial_inplace */ + 0x1ffffc, /* src_mask */ + 0x1ffffc, /* dst_mask */ + TRUE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSDESC_ADR_PREL21, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 21, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSDESC_ADR_PREL21", /* name */ + FALSE, /* partial_inplace */ + 0x1fffff, /* src_mask */ + 0x1fffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* Get to the page for the GOT entry for the symbol + (G(S) - P) using an ADRP instruction. */ + HOWTO (R_AARCH64_TLSDESC_ADR_PAGE, /* type */ + 12, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 21, /* bitsize */ + TRUE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSDESC_ADR_PAGE", /* name */ + FALSE, /* partial_inplace */ + 0x1fffff, /* src_mask */ + 0x1fffff, /* dst_mask */ + TRUE), /* pcrel_offset */ + + /* LD64: GOT offset G(S) & 0xfff. */ + HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC, /* type */ + 3, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */ + FALSE, /* partial_inplace */ + 0xfff, /* src_mask */ + 0xfff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + /* ADD: GOT offset G(S) & 0xfff. */ + HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */ + FALSE, /* partial_inplace */ + 0xfff, /* src_mask */ + 0xfff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSDESC_OFF_G1, /* type */ + 4, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSDESC_OFF_G1", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */ + FALSE, /* partial_inplace */ + 0xffff, /* src_mask */ + 0xffff, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSDESC_LDR, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSDESC_LDR", /* name */ + FALSE, /* partial_inplace */ + 0x0, /* src_mask */ + 0x0, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSDESC_ADD, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSDESC_ADD", /* name */ + FALSE, /* partial_inplace */ + 0x0, /* src_mask */ + 0x0, /* dst_mask */ + FALSE), /* pcrel_offset */ + + HOWTO (R_AARCH64_TLSDESC_CALL, /* type */ + 0, /* rightshift */ + 2, /* size (0 = byte, 1 = short, 2 = long) */ + 12, /* bitsize */ + FALSE, /* pc_relative */ + 0, /* bitpos */ + complain_overflow_dont, /* complain_on_overflow */ + bfd_elf_generic_reloc, /* special_function */ + "R_AARCH64_TLSDESC_CALL", /* name */ + FALSE, /* partial_inplace */ + 0x0, /* src_mask */ + 0x0, /* dst_mask */ + FALSE), /* pcrel_offset */ +}; + +static reloc_howto_type * +elf64_aarch64_howto_from_type (unsigned int r_type) +{ + if (r_type >= R_AARCH64_static_min && r_type < R_AARCH64_static_max) + return &elf64_aarch64_howto_table[r_type - R_AARCH64_static_min]; + + if (r_type >= R_AARCH64_tls_min && r_type < R_AARCH64_tls_max) + return &elf64_aarch64_tls_howto_table[r_type - R_AARCH64_tls_min]; + + if (r_type >= R_AARCH64_tlsdesc_min && r_type < R_AARCH64_tlsdesc_max) + return &elf64_aarch64_tlsdesc_howto_table[r_type - R_AARCH64_tlsdesc_min]; + + if (r_type >= R_AARCH64_dyn_min && r_type < R_AARCH64_dyn_max) + return &elf64_aarch64_howto_dynrelocs[r_type - R_AARCH64_dyn_min]; + + switch (r_type) + { + case R_AARCH64_NONE: + return &elf64_aarch64_howto_none; + + } + bfd_set_error (bfd_error_bad_value); + return NULL; +} + +static void +elf64_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc, + Elf_Internal_Rela *elf_reloc) +{ + unsigned int r_type; + + r_type = ELF64_R_TYPE (elf_reloc->r_info); + bfd_reloc->howto = elf64_aarch64_howto_from_type (r_type); +} + +struct elf64_aarch64_reloc_map +{ + bfd_reloc_code_real_type bfd_reloc_val; + unsigned int elf_reloc_val; +}; + +/* All entries in this list must also be present in + elf64_aarch64_howto_table. */ +static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map[] = +{ + {BFD_RELOC_NONE, R_AARCH64_NONE}, + + /* Basic data relocations. */ + {BFD_RELOC_CTOR, R_AARCH64_ABS64}, + {BFD_RELOC_64, R_AARCH64_ABS64}, + {BFD_RELOC_32, R_AARCH64_ABS32}, + {BFD_RELOC_16, R_AARCH64_ABS16}, + {BFD_RELOC_64_PCREL, R_AARCH64_PREL64}, + {BFD_RELOC_32_PCREL, R_AARCH64_PREL32}, + {BFD_RELOC_16_PCREL, R_AARCH64_PREL16}, + + /* Group relocations to low order bits of a 16, 32, 48 or 64 bit + value inline. */ + {BFD_RELOC_AARCH64_MOVW_G0_NC, R_AARCH64_MOVW_UABS_G0_NC}, + {BFD_RELOC_AARCH64_MOVW_G1_NC, R_AARCH64_MOVW_UABS_G1_NC}, + {BFD_RELOC_AARCH64_MOVW_G2_NC, R_AARCH64_MOVW_UABS_G2_NC}, + + /* Group relocations to create high bits of a 16, 32, 48 or 64 bit + signed value inline. */ + {BFD_RELOC_AARCH64_MOVW_G0_S, R_AARCH64_MOVW_SABS_G0}, + {BFD_RELOC_AARCH64_MOVW_G1_S, R_AARCH64_MOVW_SABS_G1}, + {BFD_RELOC_AARCH64_MOVW_G2_S, R_AARCH64_MOVW_SABS_G2}, + + /* Group relocations to create high bits of a 16, 32, 48 or 64 bit + unsigned value inline. */ + {BFD_RELOC_AARCH64_MOVW_G0, R_AARCH64_MOVW_UABS_G0}, + {BFD_RELOC_AARCH64_MOVW_G1, R_AARCH64_MOVW_UABS_G1}, + {BFD_RELOC_AARCH64_MOVW_G2, R_AARCH64_MOVW_UABS_G2}, + {BFD_RELOC_AARCH64_MOVW_G3, R_AARCH64_MOVW_UABS_G3}, + + /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */ + {BFD_RELOC_AARCH64_LD_LO19_PCREL, R_AARCH64_LD_PREL_LO19}, + {BFD_RELOC_AARCH64_ADR_LO21_PCREL, R_AARCH64_ADR_PREL_LO21}, + {BFD_RELOC_AARCH64_ADR_HI21_PCREL, R_AARCH64_ADR_PREL_PG_HI21}, + {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, R_AARCH64_ADR_PREL_PG_HI21_NC}, + {BFD_RELOC_AARCH64_ADD_LO12, R_AARCH64_ADD_ABS_LO12_NC}, + {BFD_RELOC_AARCH64_LDST8_LO12, R_AARCH64_LDST8_ABS_LO12_NC}, + {BFD_RELOC_AARCH64_LDST16_LO12, R_AARCH64_LDST16_ABS_LO12_NC}, + {BFD_RELOC_AARCH64_LDST32_LO12, R_AARCH64_LDST32_ABS_LO12_NC}, + {BFD_RELOC_AARCH64_LDST64_LO12, R_AARCH64_LDST64_ABS_LO12_NC}, + {BFD_RELOC_AARCH64_LDST128_LO12, R_AARCH64_LDST128_ABS_LO12_NC}, + + /* Relocations for control-flow instructions. */ + {BFD_RELOC_AARCH64_TSTBR14, R_AARCH64_TSTBR14}, + {BFD_RELOC_AARCH64_BRANCH19, R_AARCH64_CONDBR19}, + {BFD_RELOC_AARCH64_JUMP26, R_AARCH64_JUMP26}, + {BFD_RELOC_AARCH64_CALL26, R_AARCH64_CALL26}, + + /* Relocations for PIC. */ + {BFD_RELOC_AARCH64_ADR_GOT_PAGE, R_AARCH64_ADR_GOT_PAGE}, + {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, R_AARCH64_LD64_GOT_LO12_NC}, + + /* Relocations for TLS. */ + {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, R_AARCH64_TLSGD_ADR_PAGE21}, + {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, R_AARCH64_TLSGD_ADD_LO12_NC}, + {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1, + R_AARCH64_TLSIE_MOVW_GOTTPREL_G1}, + {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, + R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC}, + {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, + R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21}, + {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, + R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC}, + {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19, + R_AARCH64_TLSIE_LD_GOTTPREL_PREL19}, + {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, R_AARCH64_TLSLE_MOVW_TPREL_G2}, + {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, R_AARCH64_TLSLE_MOVW_TPREL_G1}, + {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC, + R_AARCH64_TLSLE_MOVW_TPREL_G1_NC}, + {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, R_AARCH64_TLSLE_MOVW_TPREL_G0}, + {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC, + R_AARCH64_TLSLE_MOVW_TPREL_G0_NC}, + {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, R_AARCH64_TLSLE_ADD_TPREL_LO12}, + {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, R_AARCH64_TLSLE_ADD_TPREL_HI12}, + {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC, + R_AARCH64_TLSLE_ADD_TPREL_LO12_NC}, + {BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19, R_AARCH64_TLSDESC_LD64_PREL19}, + {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, R_AARCH64_TLSDESC_ADR_PREL21}, + {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE, R_AARCH64_TLSDESC_ADR_PAGE}, + {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, R_AARCH64_TLSDESC_ADD_LO12_NC}, + {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, R_AARCH64_TLSDESC_LD64_LO12_NC}, + {BFD_RELOC_AARCH64_TLSDESC_OFF_G1, R_AARCH64_TLSDESC_OFF_G1}, + {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, R_AARCH64_TLSDESC_OFF_G0_NC}, + {BFD_RELOC_AARCH64_TLSDESC_LDR, R_AARCH64_TLSDESC_LDR}, + {BFD_RELOC_AARCH64_TLSDESC_ADD, R_AARCH64_TLSDESC_ADD}, + {BFD_RELOC_AARCH64_TLSDESC_CALL, R_AARCH64_TLSDESC_CALL}, + {BFD_RELOC_AARCH64_TLS_DTPMOD64, R_AARCH64_TLS_DTPMOD64}, + {BFD_RELOC_AARCH64_TLS_DTPREL64, R_AARCH64_TLS_DTPREL64}, + {BFD_RELOC_AARCH64_TLS_TPREL64, R_AARCH64_TLS_TPREL64}, + {BFD_RELOC_AARCH64_TLSDESC, R_AARCH64_TLSDESC}, +}; + +static reloc_howto_type * +elf64_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED, + bfd_reloc_code_real_type code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE (elf64_aarch64_reloc_map); i++) + if (elf64_aarch64_reloc_map[i].bfd_reloc_val == code) + return elf64_aarch64_howto_from_type + (elf64_aarch64_reloc_map[i].elf_reloc_val); + + bfd_set_error (bfd_error_bad_value); + return NULL; +} + +static reloc_howto_type * +elf64_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, + const char *r_name) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE (elf64_aarch64_howto_table); i++) + if (elf64_aarch64_howto_table[i].name != NULL + && strcasecmp (elf64_aarch64_howto_table[i].name, r_name) == 0) + return &elf64_aarch64_howto_table[i]; + + return NULL; +} + +#define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec +#define TARGET_LITTLE_NAME "elf64-littleaarch64" +#define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec +#define TARGET_BIG_NAME "elf64-bigaarch64" + +typedef unsigned long int insn32; + +/* The linker script knows the section names for placement. + The entry_names are used to do simple name mangling on the stubs. + Given a function name, and its type, the stub can be found. The + name can be changed. The only requirement is the %s be present. */ +#define STUB_ENTRY_NAME "__%s_veneer" + +/* The name of the dynamic interpreter. This is put in the .interp + section. */ +#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1" + +#define AARCH64_MAX_FWD_BRANCH_OFFSET \ + (((1 << 25) - 1) << 2) +#define AARCH64_MAX_BWD_BRANCH_OFFSET \ + (-((1 << 25) << 2)) + +#define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1) +#define AARCH64_MIN_ADRP_IMM (-(1 << 20)) + +static int +aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place) +{ + bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12; + return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM; +} + +static int +aarch64_valid_branch_p (bfd_vma value, bfd_vma place) +{ + bfd_signed_vma offset = (bfd_signed_vma) (value - place); + return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET + && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET); +} + +static const uint32_t aarch64_adrp_branch_stub [] = +{ + 0x90000010, /* adrp ip0, X */ + /* R_AARCH64_ADR_HI21_PCREL(X) */ + 0x91000210, /* add ip0, ip0, :lo12:X */ + /* R_AARCH64_ADD_ABS_LO12_NC(X) */ + 0xd61f0200, /* br ip0 */ +}; + +static const uint32_t aarch64_long_branch_stub[] = +{ + 0x58000090, /* ldr ip0, 1f */ + 0x10000011, /* adr ip1, #0 */ + 0x8b110210, /* add ip0, ip0, ip1 */ + 0xd61f0200, /* br ip0 */ + 0x00000000, /* 1: .xword + R_AARCH64_PREL64(X) + 12 + */ + 0x00000000, +}; + +/* Section name for stubs is the associated section name plus this + string. */ +#define STUB_SUFFIX ".stub" + +enum elf64_aarch64_stub_type +{ + aarch64_stub_none, + aarch64_stub_adrp_branch, + aarch64_stub_long_branch, +}; + +struct elf64_aarch64_stub_hash_entry +{ + /* Base hash table entry structure. */ + struct bfd_hash_entry root; + + /* The stub section. */ + asection *stub_sec; + + /* Offset within stub_sec of the beginning of this stub. */ + bfd_vma stub_offset; + + /* Given the symbol's value and its section we can determine its final + value when building the stubs (so the stub knows where to jump). */ + bfd_vma target_value; + asection *target_section; + + enum elf64_aarch64_stub_type stub_type; + + /* The symbol table entry, if any, that this was derived from. */ + struct elf64_aarch64_link_hash_entry *h; + + /* Destination symbol type */ + unsigned char st_type; + + /* Where this stub is being called from, or, in the case of combined + stub sections, the first input section in the group. */ + asection *id_sec; + + /* The name for the local symbol at the start of this stub. The + stub name in the hash table has to be unique; this does not, so + it can be friendlier. */ + char *output_name; +}; + +/* Used to build a map of a section. This is required for mixed-endian + code/data. */ + +typedef struct elf64_elf_section_map +{ + bfd_vma vma; + char type; +} +elf64_aarch64_section_map; + + +typedef struct _aarch64_elf_section_data +{ + struct bfd_elf_section_data elf; + unsigned int mapcount; + unsigned int mapsize; + elf64_aarch64_section_map *map; +} +_aarch64_elf_section_data; + +#define elf64_aarch64_section_data(sec) \ + ((_aarch64_elf_section_data *) elf_section_data (sec)) + +/* The size of the thread control block. */ +#define TCB_SIZE 16 + +struct elf_aarch64_local_symbol +{ + unsigned int got_type; + bfd_signed_vma got_refcount; + bfd_vma got_offset; + + /* Offset of the GOTPLT entry reserved for the TLS descriptor. The + offset is from the end of the jump table and reserved entries + within the PLTGOT. + + The magic value (bfd_vma) -1 indicates that an offset has not be + allocated. */ + bfd_vma tlsdesc_got_jump_table_offset; +}; + +struct elf_aarch64_obj_tdata +{ + struct elf_obj_tdata root; + + /* local symbol descriptors */ + struct elf_aarch64_local_symbol *locals; + + /* Zero to warn when linking objects with incompatible enum sizes. */ + int no_enum_size_warning; + + /* Zero to warn when linking objects with incompatible wchar_t sizes. */ + int no_wchar_size_warning; +}; + +#define elf_aarch64_tdata(bfd) \ + ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any) + +#define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals) + +#define is_aarch64_elf(bfd) \ + (bfd_get_flavour (bfd) == bfd_target_elf_flavour \ + && elf_tdata (bfd) != NULL \ + && elf_object_id (bfd) == AARCH64_ELF_DATA) + +static bfd_boolean +elf64_aarch64_mkobject (bfd *abfd) +{ + return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata), + AARCH64_ELF_DATA); +} + +/* The AArch64 linker needs to keep track of the number of relocs that it + decides to copy in check_relocs for each symbol. This is so that + it can discard PC relative relocs if it doesn't need them when + linking with -Bsymbolic. We store the information in a field + extending the regular ELF linker hash table. */ + +/* This structure keeps track of the number of relocs we have copied + for a given symbol. */ +struct elf64_aarch64_relocs_copied +{ + /* Next section. */ + struct elf64_aarch64_relocs_copied *next; + /* A section in dynobj. */ + asection *section; + /* Number of relocs copied in this section. */ + bfd_size_type count; + /* Number of PC-relative relocs copied in this section. */ + bfd_size_type pc_count; +}; + +#define elf64_aarch64_hash_entry(ent) \ + ((struct elf64_aarch64_link_hash_entry *)(ent)) + +#define GOT_UNKNOWN 0 +#define GOT_NORMAL 1 +#define GOT_TLS_GD 2 +#define GOT_TLS_IE 4 +#define GOT_TLSDESC_GD 8 + +#define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD)) + +/* AArch64 ELF linker hash entry. */ +struct elf64_aarch64_link_hash_entry +{ + struct elf_link_hash_entry root; + + /* Track dynamic relocs copied for this symbol. */ + struct elf_dyn_relocs *dyn_relocs; + + /* Number of PC relative relocs copied for this symbol. */ + struct elf64_aarch64_relocs_copied *relocs_copied; + + /* Since PLT entries have variable size, we need to record the + index into .got.plt instead of recomputing it from the PLT + offset. */ + bfd_signed_vma plt_got_offset; + + /* Bit mask representing the type of GOT entry(s) if any required by + this symbol. */ + unsigned int got_type; + + /* A pointer to the most recently used stub hash entry against this + symbol. */ + struct elf64_aarch64_stub_hash_entry *stub_cache; + + /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset + is from the end of the jump table and reserved entries within the PLTGOT. + + The magic value (bfd_vma) -1 indicates that an offset has not + be allocated. */ + bfd_vma tlsdesc_got_jump_table_offset; +}; + +static unsigned int +elf64_aarch64_symbol_got_type (struct elf_link_hash_entry *h, + bfd *abfd, + unsigned long r_symndx) +{ + if (h) + return elf64_aarch64_hash_entry (h)->got_type; + + if (! elf64_aarch64_locals (abfd)) + return GOT_UNKNOWN; + + return elf64_aarch64_locals (abfd)[r_symndx].got_type; +} + +/* Traverse an AArch64 ELF linker hash table. */ +#define elf64_aarch64_link_hash_traverse(table, func, info) \ + (elf_link_hash_traverse \ + (&(table)->root, \ + (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \ + (info))) + +/* Get the AArch64 elf linker hash table from a link_info structure. */ +#define elf64_aarch64_hash_table(info) \ + ((struct elf64_aarch64_link_hash_table *) ((info)->hash)) + +#define aarch64_stub_hash_lookup(table, string, create, copy) \ + ((struct elf64_aarch64_stub_hash_entry *) \ + bfd_hash_lookup ((table), (string), (create), (copy))) + +/* AArch64 ELF linker hash table. */ +struct elf64_aarch64_link_hash_table +{ + /* The main hash table. */ + struct elf_link_hash_table root; + + /* Nonzero to force PIC branch veneers. */ + int pic_veneer; + + /* The number of bytes in the initial entry in the PLT. */ + bfd_size_type plt_header_size; + + /* The number of bytes in the subsequent PLT etries. */ + bfd_size_type plt_entry_size; + + /* Short-cuts to get to dynamic linker sections. */ + asection *sdynbss; + asection *srelbss; + + /* Small local sym cache. */ + struct sym_cache sym_cache; + + /* For convenience in allocate_dynrelocs. */ + bfd *obfd; + + /* The amount of space used by the reserved portion of the sgotplt + section, plus whatever space is used by the jump slots. */ + bfd_vma sgotplt_jump_table_size; + + /* The stub hash table. */ + struct bfd_hash_table stub_hash_table; + + /* Linker stub bfd. */ + bfd *stub_bfd; + + /* Linker call-backs. */ + asection *(*add_stub_section) (const char *, asection *); + void (*layout_sections_again) (void); + + /* Array to keep track of which stub sections have been created, and + information on stub grouping. */ + struct map_stub + { + /* This is the section to which stubs in the group will be + attached. */ + asection *link_sec; + /* The stub section. */ + asection *stub_sec; + } *stub_group; + + /* Assorted information used by elf64_aarch64_size_stubs. */ + unsigned int bfd_count; + int top_index; + asection **input_list; + + /* The offset into splt of the PLT entry for the TLS descriptor + resolver. Special values are 0, if not necessary (or not found + to be necessary yet), and -1 if needed but not determined + yet. */ + bfd_vma tlsdesc_plt; + + /* The GOT offset for the lazy trampoline. Communicated to the + loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1 + indicates an offset is not allocated. */ + bfd_vma dt_tlsdesc_got; +}; + + +/* Return non-zero if the indicated VALUE has overflowed the maximum + range expressible by a unsigned number with the indicated number of + BITS. */ + +static bfd_reloc_status_type +aarch64_unsigned_overflow (bfd_vma value, unsigned int bits) +{ + bfd_vma lim; + if (bits >= sizeof (bfd_vma) * 8) + return bfd_reloc_ok; + lim = (bfd_vma) 1 << bits; + if (value >= lim) + return bfd_reloc_overflow; + return bfd_reloc_ok; +} + + +/* Return non-zero if the indicated VALUE has overflowed the maximum + range expressible by an signed number with the indicated number of + BITS. */ + +static bfd_reloc_status_type +aarch64_signed_overflow (bfd_vma value, unsigned int bits) +{ + bfd_signed_vma svalue = (bfd_signed_vma) value; + bfd_signed_vma lim; + + if (bits >= sizeof (bfd_vma) * 8) + return bfd_reloc_ok; + lim = (bfd_signed_vma) 1 << (bits - 1); + if (svalue < -lim || svalue >= lim) + return bfd_reloc_overflow; + return bfd_reloc_ok; +} + +/* Create an entry in an AArch64 ELF linker hash table. */ + +static struct bfd_hash_entry * +elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry, + struct bfd_hash_table *table, + const char *string) +{ + struct elf64_aarch64_link_hash_entry *ret = + (struct elf64_aarch64_link_hash_entry *) entry; + + /* Allocate the structure if it has not already been allocated by a + subclass. */ + if (ret == NULL) + ret = bfd_hash_allocate (table, + sizeof (struct elf64_aarch64_link_hash_entry)); + if (ret == NULL) + return (struct bfd_hash_entry *) ret; + + /* Call the allocation method of the superclass. */ + ret = ((struct elf64_aarch64_link_hash_entry *) + _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret, + table, string)); + if (ret != NULL) + { + ret->dyn_relocs = NULL; + ret->relocs_copied = NULL; + ret->got_type = GOT_UNKNOWN; + ret->plt_got_offset = (bfd_vma) - 1; + ret->stub_cache = NULL; + ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1; + } + + return (struct bfd_hash_entry *) ret; +} + +/* Initialize an entry in the stub hash table. */ + +static struct bfd_hash_entry * +stub_hash_newfunc (struct bfd_hash_entry *entry, + struct bfd_hash_table *table, const char *string) +{ + /* Allocate the structure if it has not already been allocated by a + subclass. */ + if (entry == NULL) + { + entry = bfd_hash_allocate (table, + sizeof (struct + elf64_aarch64_stub_hash_entry)); + if (entry == NULL) + return entry; + } + + /* Call the allocation method of the superclass. */ + entry = bfd_hash_newfunc (entry, table, string); + if (entry != NULL) + { + struct elf64_aarch64_stub_hash_entry *eh; + + /* Initialize the local fields. */ + eh = (struct elf64_aarch64_stub_hash_entry *) entry; + eh->stub_sec = NULL; + eh->stub_offset = 0; + eh->target_value = 0; + eh->target_section = NULL; + eh->stub_type = aarch64_stub_none; + eh->h = NULL; + eh->id_sec = NULL; + } + + return entry; +} + + +/* Copy the extra info we tack onto an elf_link_hash_entry. */ + +static void +elf64_aarch64_copy_indirect_symbol (struct bfd_link_info *info, + struct elf_link_hash_entry *dir, + struct elf_link_hash_entry *ind) +{ + struct elf64_aarch64_link_hash_entry *edir, *eind; + + edir = (struct elf64_aarch64_link_hash_entry *) dir; + eind = (struct elf64_aarch64_link_hash_entry *) ind; + + if (eind->dyn_relocs != NULL) + { + if (edir->dyn_relocs != NULL) + { + struct elf_dyn_relocs **pp; + struct elf_dyn_relocs *p; + + /* Add reloc counts against the indirect sym to the direct sym + list. Merge any entries against the same section. */ + for (pp = &eind->dyn_relocs; (p = *pp) != NULL;) + { + struct elf_dyn_relocs *q; + + for (q = edir->dyn_relocs; q != NULL; q = q->next) + if (q->sec == p->sec) + { + q->pc_count += p->pc_count; + q->count += p->count; + *pp = p->next; + break; + } + if (q == NULL) + pp = &p->next; + } + *pp = edir->dyn_relocs; + } + + edir->dyn_relocs = eind->dyn_relocs; + eind->dyn_relocs = NULL; + } + + if (eind->relocs_copied != NULL) + { + if (edir->relocs_copied != NULL) + { + struct elf64_aarch64_relocs_copied **pp; + struct elf64_aarch64_relocs_copied *p; + + /* Add reloc counts against the indirect sym to the direct sym + list. Merge any entries against the same section. */ + for (pp = &eind->relocs_copied; (p = *pp) != NULL;) + { + struct elf64_aarch64_relocs_copied *q; + + for (q = edir->relocs_copied; q != NULL; q = q->next) + if (q->section == p->section) + { + q->pc_count += p->pc_count; + q->count += p->count; + *pp = p->next; + break; + } + if (q == NULL) + pp = &p->next; + } + *pp = edir->relocs_copied; + } + + edir->relocs_copied = eind->relocs_copied; + eind->relocs_copied = NULL; + } + + if (ind->root.type == bfd_link_hash_indirect) + { + /* Copy over PLT info. */ + if (dir->got.refcount <= 0) + { + edir->got_type = eind->got_type; + eind->got_type = GOT_UNKNOWN; + } + } + + _bfd_elf_link_hash_copy_indirect (info, dir, ind); +} + +/* Create an AArch64 elf linker hash table. */ + +static struct bfd_link_hash_table * +elf64_aarch64_link_hash_table_create (bfd *abfd) +{ + struct elf64_aarch64_link_hash_table *ret; + bfd_size_type amt = sizeof (struct elf64_aarch64_link_hash_table); + + ret = bfd_malloc (amt); + if (ret == NULL) + return NULL; + + if (!_bfd_elf_link_hash_table_init + (&ret->root, abfd, elf64_aarch64_link_hash_newfunc, + sizeof (struct elf64_aarch64_link_hash_entry), AARCH64_ELF_DATA)) + { + free (ret); + return NULL; + } + + ret->sdynbss = NULL; + ret->srelbss = NULL; + + ret->plt_header_size = PLT_ENTRY_SIZE; + ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE; + + ret->sym_cache.abfd = NULL; + ret->obfd = abfd; + + ret->stub_bfd = NULL; + ret->add_stub_section = NULL; + ret->layout_sections_again = NULL; + ret->stub_group = NULL; + ret->bfd_count = 0; + ret->top_index = 0; + ret->input_list = NULL; + ret->tlsdesc_plt = 0; + ret->dt_tlsdesc_got = (bfd_vma) - 1; + + if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc, + sizeof (struct elf64_aarch64_stub_hash_entry))) + { + free (ret); + return NULL; + } + + return &ret->root.root; +} + +/* Free the derived linker hash table. */ + +static void +elf64_aarch64_hash_table_free (struct bfd_link_hash_table *hash) +{ + struct elf64_aarch64_link_hash_table *ret + = (struct elf64_aarch64_link_hash_table *) hash; + + bfd_hash_table_free (&ret->stub_hash_table); + _bfd_generic_link_hash_table_free (hash); +} + +static bfd_vma +aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value, + bfd_vma addend, bfd_boolean weak_undef_p) +{ + switch (r_type) + { + case R_AARCH64_TLSDESC_CALL: + case R_AARCH64_NONE: + case R_AARCH64_NULL: + break; + + case R_AARCH64_ADR_PREL_LO21: + case R_AARCH64_CONDBR19: + case R_AARCH64_LD_PREL_LO19: + case R_AARCH64_PREL16: + case R_AARCH64_PREL32: + case R_AARCH64_PREL64: + case R_AARCH64_TSTBR14: + if (weak_undef_p) + value = place; + value = value + addend - place; + break; + + case R_AARCH64_CALL26: + case R_AARCH64_JUMP26: + value = value + addend - place; + break; + + case R_AARCH64_ABS16: + case R_AARCH64_ABS32: + case R_AARCH64_MOVW_SABS_G0: + case R_AARCH64_MOVW_SABS_G1: + case R_AARCH64_MOVW_SABS_G2: + case R_AARCH64_MOVW_UABS_G0: + case R_AARCH64_MOVW_UABS_G0_NC: + case R_AARCH64_MOVW_UABS_G1: + case R_AARCH64_MOVW_UABS_G1_NC: + case R_AARCH64_MOVW_UABS_G2: + case R_AARCH64_MOVW_UABS_G2_NC: + case R_AARCH64_MOVW_UABS_G3: + value = value + addend; + break; + + case R_AARCH64_ADR_PREL_PG_HI21: + case R_AARCH64_ADR_PREL_PG_HI21_NC: + if (weak_undef_p) + value = PG (place); + value = PG (value + addend) - PG (place); + break; + + case R_AARCH64_ADR_GOT_PAGE: + case R_AARCH64_TLSDESC_ADR_PAGE: + case R_AARCH64_TLSGD_ADR_PAGE21: + case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: + value = PG (value + addend) - PG (place); + break; + + case R_AARCH64_ADD_ABS_LO12_NC: + case R_AARCH64_LD64_GOT_LO12_NC: + case R_AARCH64_LDST8_ABS_LO12_NC: + case R_AARCH64_LDST16_ABS_LO12_NC: + case R_AARCH64_LDST32_ABS_LO12_NC: + case R_AARCH64_LDST64_ABS_LO12_NC: + case R_AARCH64_LDST128_ABS_LO12_NC: + case R_AARCH64_TLSDESC_ADD_LO12_NC: + case R_AARCH64_TLSDESC_ADD: + case R_AARCH64_TLSDESC_LD64_LO12_NC: + case R_AARCH64_TLSDESC_LDR: + case R_AARCH64_TLSGD_ADD_LO12_NC: + case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: + case R_AARCH64_TLSLE_ADD_TPREL_LO12: + case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: + value = PG_OFFSET (value + addend); + break; + + case R_AARCH64_TLSLE_MOVW_TPREL_G1: + case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: + value = (value + addend) & (bfd_vma) 0xffff0000; + break; + case R_AARCH64_TLSLE_ADD_TPREL_HI12: + value = (value + addend) & (bfd_vma) 0xfff000; + break; + + case R_AARCH64_TLSLE_MOVW_TPREL_G0: + case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: + value = (value + addend) & (bfd_vma) 0xffff; + break; + + case R_AARCH64_TLSLE_MOVW_TPREL_G2: + value = (value + addend) & ~(bfd_vma) 0xffffffff; + value -= place & ~(bfd_vma) 0xffffffff; + break; + } + return value; +} + +static bfd_boolean +aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section, + bfd_vma offset, bfd_vma value) +{ + reloc_howto_type *howto; + bfd_vma place; + + howto = elf64_aarch64_howto_from_type (r_type); + place = (input_section->output_section->vma + input_section->output_offset + + offset); + value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE); + return bfd_elf_aarch64_put_addend (input_bfd, + input_section->contents + offset, + howto, value); +} + +static enum elf64_aarch64_stub_type +aarch64_select_branch_stub (bfd_vma value, bfd_vma place) +{ + if (aarch64_valid_for_adrp_p (value, place)) + return aarch64_stub_adrp_branch; + return aarch64_stub_long_branch; +} + +/* Determine the type of stub needed, if any, for a call. */ + +static enum elf64_aarch64_stub_type +aarch64_type_of_stub (struct bfd_link_info *info, + asection *input_sec, + const Elf_Internal_Rela *rel, + unsigned char st_type, + struct elf64_aarch64_link_hash_entry *hash, + bfd_vma destination) +{ + bfd_vma location; + bfd_signed_vma branch_offset; + unsigned int r_type; + struct elf64_aarch64_link_hash_table *globals; + enum elf64_aarch64_stub_type stub_type = aarch64_stub_none; + bfd_boolean via_plt_p; + + if (st_type != STT_FUNC) + return stub_type; + + globals = elf64_aarch64_hash_table (info); + via_plt_p = (globals->root.splt != NULL && hash != NULL + && hash->root.plt.offset != (bfd_vma) - 1); + + if (via_plt_p) + return stub_type; + + /* Determine where the call point is. */ + location = (input_sec->output_offset + + input_sec->output_section->vma + rel->r_offset); + + branch_offset = (bfd_signed_vma) (destination - location); + + r_type = ELF64_R_TYPE (rel->r_info); + + /* We don't want to redirect any old unconditional jump in this way, + only one which is being used for a sibcall, where it is + acceptable for the IP0 and IP1 registers to be clobbered. */ + if ((r_type == R_AARCH64_CALL26 || r_type == R_AARCH64_JUMP26) + && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET + || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET)) + { + stub_type = aarch64_stub_long_branch; + } + + return stub_type; +} + +/* Build a name for an entry in the stub hash table. */ + +static char * +elf64_aarch64_stub_name (const asection *input_section, + const asection *sym_sec, + const struct elf64_aarch64_link_hash_entry *hash, + const Elf_Internal_Rela *rel) +{ + char *stub_name; + bfd_size_type len; + + if (hash) + { + len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1; + stub_name = bfd_malloc (len); + if (stub_name != NULL) + snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x", + (unsigned int) input_section->id, + hash->root.root.root.string, + rel->r_addend); + } + else + { + len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1; + stub_name = bfd_malloc (len); + if (stub_name != NULL) + snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x", + (unsigned int) input_section->id, + (unsigned int) sym_sec->id, + (unsigned int) ELF64_R_SYM (rel->r_info), + rel->r_addend); + } + + return stub_name; +} + +/* Look up an entry in the stub hash. Stub entries are cached because + creating the stub name takes a bit of time. */ + +static struct elf64_aarch64_stub_hash_entry * +elf64_aarch64_get_stub_entry (const asection *input_section, + const asection *sym_sec, + struct elf_link_hash_entry *hash, + const Elf_Internal_Rela *rel, + struct elf64_aarch64_link_hash_table *htab) +{ + struct elf64_aarch64_stub_hash_entry *stub_entry; + struct elf64_aarch64_link_hash_entry *h = + (struct elf64_aarch64_link_hash_entry *) hash; + const asection *id_sec; + + if ((input_section->flags & SEC_CODE) == 0) + return NULL; + + /* If this input section is part of a group of sections sharing one + stub section, then use the id of the first section in the group. + Stub names need to include a section id, as there may well be + more than one stub used to reach say, printf, and we need to + distinguish between them. */ + id_sec = htab->stub_group[input_section->id].link_sec; + + if (h != NULL && h->stub_cache != NULL + && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec) + { + stub_entry = h->stub_cache; + } + else + { + char *stub_name; + + stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, h, rel); + if (stub_name == NULL) + return NULL; + + stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, + stub_name, FALSE, FALSE); + if (h != NULL) + h->stub_cache = stub_entry; + + free (stub_name); + } + + return stub_entry; +} + +/* Add a new stub entry to the stub hash. Not all fields of the new + stub entry are initialised. */ + +static struct elf64_aarch64_stub_hash_entry * +elf64_aarch64_add_stub (const char *stub_name, + asection *section, + struct elf64_aarch64_link_hash_table *htab) +{ + asection *link_sec; + asection *stub_sec; + struct elf64_aarch64_stub_hash_entry *stub_entry; + + link_sec = htab->stub_group[section->id].link_sec; + stub_sec = htab->stub_group[section->id].stub_sec; + if (stub_sec == NULL) + { + stub_sec = htab->stub_group[link_sec->id].stub_sec; + if (stub_sec == NULL) + { + size_t namelen; + bfd_size_type len; + char *s_name; + + namelen = strlen (link_sec->name); + len = namelen + sizeof (STUB_SUFFIX); + s_name = bfd_alloc (htab->stub_bfd, len); + if (s_name == NULL) + return NULL; + + memcpy (s_name, link_sec->name, namelen); + memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX)); + stub_sec = (*htab->add_stub_section) (s_name, link_sec); + if (stub_sec == NULL) + return NULL; + htab->stub_group[link_sec->id].stub_sec = stub_sec; + } + htab->stub_group[section->id].stub_sec = stub_sec; + } + + /* Enter this entry into the linker stub hash table. */ + stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name, + TRUE, FALSE); + if (stub_entry == NULL) + { + (*_bfd_error_handler) (_("%s: cannot create stub entry %s"), + section->owner, stub_name); + return NULL; + } + + stub_entry->stub_sec = stub_sec; + stub_entry->stub_offset = 0; + stub_entry->id_sec = link_sec; + + return stub_entry; +} + +static bfd_boolean +aarch64_build_one_stub (struct bfd_hash_entry *gen_entry, + void *in_arg ATTRIBUTE_UNUSED) +{ + struct elf64_aarch64_stub_hash_entry *stub_entry; + asection *stub_sec; + bfd *stub_bfd; + bfd_byte *loc; + bfd_vma sym_value; + unsigned int template_size; + const uint32_t *template; + unsigned int i; + + /* Massage our args to the form they really have. */ + stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry; + + stub_sec = stub_entry->stub_sec; + + /* Make a note of the offset within the stubs for this entry. */ + stub_entry->stub_offset = stub_sec->size; + loc = stub_sec->contents + stub_entry->stub_offset; + + stub_bfd = stub_sec->owner; + + /* This is the address of the stub destination. */ + sym_value = (stub_entry->target_value + + stub_entry->target_section->output_offset + + stub_entry->target_section->output_section->vma); + + if (stub_entry->stub_type == aarch64_stub_long_branch) + { + bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma + + stub_sec->output_offset); + + /* See if we can relax the stub. */ + if (aarch64_valid_for_adrp_p (sym_value, place)) + stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place); + } + + switch (stub_entry->stub_type) + { + case aarch64_stub_adrp_branch: + template = aarch64_adrp_branch_stub; + template_size = sizeof (aarch64_adrp_branch_stub); + break; + case aarch64_stub_long_branch: + template = aarch64_long_branch_stub; + template_size = sizeof (aarch64_long_branch_stub); + break; + default: + BFD_FAIL (); + return FALSE; + } + + for (i = 0; i < (template_size / sizeof template[0]); i++) + { + bfd_putl32 (template[i], loc); + loc += 4; + } + + template_size = (template_size + 7) & ~7; + stub_sec->size += template_size; + + switch (stub_entry->stub_type) + { + case aarch64_stub_adrp_branch: + if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21, stub_bfd, stub_sec, + stub_entry->stub_offset, sym_value)) + /* The stub would not have been relaxed if the offset was out + of range. */ + BFD_FAIL (); + + _bfd_final_link_relocate + (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC), + stub_bfd, + stub_sec, + stub_sec->contents, + stub_entry->stub_offset + 4, + sym_value, + 0); + break; + + case aarch64_stub_long_branch: + /* We want the value relative to the address 12 bytes back from the + value itself. */ + _bfd_final_link_relocate (elf64_aarch64_howto_from_type + (R_AARCH64_PREL64), stub_bfd, stub_sec, + stub_sec->contents, + stub_entry->stub_offset + 16, + sym_value + 12, 0); + break; + default: + break; + } + + return TRUE; +} + +/* As above, but don't actually build the stub. Just bump offset so + we know stub section sizes. */ + +static bfd_boolean +aarch64_size_one_stub (struct bfd_hash_entry *gen_entry, + void *in_arg ATTRIBUTE_UNUSED) +{ + struct elf64_aarch64_stub_hash_entry *stub_entry; + int size; + + /* Massage our args to the form they really have. */ + stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry; + + switch (stub_entry->stub_type) + { + case aarch64_stub_adrp_branch: + size = sizeof (aarch64_adrp_branch_stub); + break; + case aarch64_stub_long_branch: + size = sizeof (aarch64_long_branch_stub); + break; + default: + BFD_FAIL (); + return FALSE; + break; + } + + size = (size + 7) & ~7; + stub_entry->stub_sec->size += size; + return TRUE; +} + +/* External entry points for sizing and building linker stubs. */ + +/* Set up various things so that we can make a list of input sections + for each output section included in the link. Returns -1 on error, + 0 when no stubs will be needed, and 1 on success. */ + +int +elf64_aarch64_setup_section_lists (bfd *output_bfd, + struct bfd_link_info *info) +{ + bfd *input_bfd; + unsigned int bfd_count; + int top_id, top_index; + asection *section; + asection **input_list, **list; + bfd_size_type amt; + struct elf64_aarch64_link_hash_table *htab = + elf64_aarch64_hash_table (info); + + if (!is_elf_hash_table (htab)) + return 0; + + /* Count the number of input BFDs and find the top input section id. */ + for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0; + input_bfd != NULL; input_bfd = input_bfd->link_next) + { + bfd_count += 1; + for (section = input_bfd->sections; + section != NULL; section = section->next) + { + if (top_id < section->id) + top_id = section->id; + } + } + htab->bfd_count = bfd_count; + + amt = sizeof (struct map_stub) * (top_id + 1); + htab->stub_group = bfd_zmalloc (amt); + if (htab->stub_group == NULL) + return -1; + + /* We can't use output_bfd->section_count here to find the top output + section index as some sections may have been removed, and + _bfd_strip_section_from_output doesn't renumber the indices. */ + for (section = output_bfd->sections, top_index = 0; + section != NULL; section = section->next) + { + if (top_index < section->index) + top_index = section->index; + } + + htab->top_index = top_index; + amt = sizeof (asection *) * (top_index + 1); + input_list = bfd_malloc (amt); + htab->input_list = input_list; + if (input_list == NULL) + return -1; + + /* For sections we aren't interested in, mark their entries with a + value we can check later. */ + list = input_list + top_index; + do + *list = bfd_abs_section_ptr; + while (list-- != input_list); + + for (section = output_bfd->sections; + section != NULL; section = section->next) + { + if ((section->flags & SEC_CODE) != 0) + input_list[section->index] = NULL; + } + + return 1; +} + +/* Used by elf64_aarch64_next_input_section and group_sections. */ +#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec) + +/* The linker repeatedly calls this function for each input section, + in the order that input sections are linked into output sections. + Build lists of input sections to determine groupings between which + we may insert linker stubs. */ + +void +elf64_aarch64_next_input_section (struct bfd_link_info *info, asection *isec) +{ + struct elf64_aarch64_link_hash_table *htab = + elf64_aarch64_hash_table (info); + + if (isec->output_section->index <= htab->top_index) + { + asection **list = htab->input_list + isec->output_section->index; + + if (*list != bfd_abs_section_ptr) + { + /* Steal the link_sec pointer for our list. */ + /* This happens to make the list in reverse order, + which is what we want. */ + PREV_SEC (isec) = *list; + *list = isec; + } + } +} + +/* See whether we can group stub sections together. Grouping stub + sections may result in fewer stubs. More importantly, we need to + put all .init* and .fini* stubs at the beginning of the .init or + .fini output sections respectively, because glibc splits the + _init and _fini functions into multiple parts. Putting a stub in + the middle of a function is not a good idea. */ + +static void +group_sections (struct elf64_aarch64_link_hash_table *htab, + bfd_size_type stub_group_size, + bfd_boolean stubs_always_before_branch) +{ + asection **list = htab->input_list + htab->top_index; + + do + { + asection *tail = *list; + + if (tail == bfd_abs_section_ptr) + continue; + + while (tail != NULL) + { + asection *curr; + asection *prev; + bfd_size_type total; + + curr = tail; + total = tail->size; + while ((prev = PREV_SEC (curr)) != NULL + && ((total += curr->output_offset - prev->output_offset) + < stub_group_size)) + curr = prev; + + /* OK, the size from the start of CURR to the end is less + than stub_group_size and thus can be handled by one stub + section. (Or the tail section is itself larger than + stub_group_size, in which case we may be toast.) + We should really be keeping track of the total size of + stubs added here, as stubs contribute to the final output + section size. */ + do + { + prev = PREV_SEC (tail); + /* Set up this stub group. */ + htab->stub_group[tail->id].link_sec = curr; + } + while (tail != curr && (tail = prev) != NULL); + + /* But wait, there's more! Input sections up to stub_group_size + bytes before the stub section can be handled by it too. */ + if (!stubs_always_before_branch) + { + total = 0; + while (prev != NULL + && ((total += tail->output_offset - prev->output_offset) + < stub_group_size)) + { + tail = prev; + prev = PREV_SEC (tail); + htab->stub_group[tail->id].link_sec = curr; + } + } + tail = prev; + } + } + while (list-- != htab->input_list); + + free (htab->input_list); +} + +#undef PREV_SEC + +/* Determine and set the size of the stub section for a final link. + + The basic idea here is to examine all the relocations looking for + PC-relative calls to a target that is unreachable with a "bl" + instruction. */ + +bfd_boolean +elf64_aarch64_size_stubs (bfd *output_bfd, + bfd *stub_bfd, + struct bfd_link_info *info, + bfd_signed_vma group_size, + asection * (*add_stub_section) (const char *, + asection *), + void (*layout_sections_again) (void)) +{ + bfd_size_type stub_group_size; + bfd_boolean stubs_always_before_branch; + bfd_boolean stub_changed = 0; + struct elf64_aarch64_link_hash_table *htab = elf64_aarch64_hash_table (info); + + /* Propagate mach to stub bfd, because it may not have been + finalized when we created stub_bfd. */ + bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd), + bfd_get_mach (output_bfd)); + + /* Stash our params away. */ + htab->stub_bfd = stub_bfd; + htab->add_stub_section = add_stub_section; + htab->layout_sections_again = layout_sections_again; + stubs_always_before_branch = group_size < 0; + if (group_size < 0) + stub_group_size = -group_size; + else + stub_group_size = group_size; + + if (stub_group_size == 1) + { + /* Default values. */ + /* Aarch64 branch range is +-128MB. The value used is 1MB less. */ + stub_group_size = 127 * 1024 * 1024; + } + + group_sections (htab, stub_group_size, stubs_always_before_branch); + + while (1) + { + bfd *input_bfd; + unsigned int bfd_indx; + asection *stub_sec; + + for (input_bfd = info->input_bfds, bfd_indx = 0; + input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++) + { + Elf_Internal_Shdr *symtab_hdr; + asection *section; + Elf_Internal_Sym *local_syms = NULL; + + /* We'll need the symbol table in a second. */ + symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; + if (symtab_hdr->sh_info == 0) + continue; + + /* Walk over each section attached to the input bfd. */ + for (section = input_bfd->sections; + section != NULL; section = section->next) + { + Elf_Internal_Rela *internal_relocs, *irelaend, *irela; + + /* If there aren't any relocs, then there's nothing more + to do. */ + if ((section->flags & SEC_RELOC) == 0 + || section->reloc_count == 0 + || (section->flags & SEC_CODE) == 0) + continue; + + /* If this section is a link-once section that will be + discarded, then don't create any stubs. */ + if (section->output_section == NULL + || section->output_section->owner != output_bfd) + continue; + + /* Get the relocs. */ + internal_relocs + = _bfd_elf_link_read_relocs (input_bfd, section, NULL, + NULL, info->keep_memory); + if (internal_relocs == NULL) + goto error_ret_free_local; + + /* Now examine each relocation. */ + irela = internal_relocs; + irelaend = irela + section->reloc_count; + for (; irela < irelaend; irela++) + { + unsigned int r_type, r_indx; + enum elf64_aarch64_stub_type stub_type; + struct elf64_aarch64_stub_hash_entry *stub_entry; + asection *sym_sec; + bfd_vma sym_value; + bfd_vma destination; + struct elf64_aarch64_link_hash_entry *hash; + const char *sym_name; + char *stub_name; + const asection *id_sec; + unsigned char st_type; + bfd_size_type len; + + r_type = ELF64_R_TYPE (irela->r_info); + r_indx = ELF64_R_SYM (irela->r_info); + + if (r_type >= (unsigned int) R_AARCH64_end) + { + bfd_set_error (bfd_error_bad_value); + error_ret_free_internal: + if (elf_section_data (section)->relocs == NULL) + free (internal_relocs); + goto error_ret_free_local; + } + + /* Only look for stubs on unconditional branch and + branch and link instructions. */ + if (r_type != (unsigned int) R_AARCH64_CALL26 + && r_type != (unsigned int) R_AARCH64_JUMP26) + continue; + + /* Now determine the call target, its name, value, + section. */ + sym_sec = NULL; + sym_value = 0; + destination = 0; + hash = NULL; + sym_name = NULL; + if (r_indx < symtab_hdr->sh_info) + { + /* It's a local symbol. */ + Elf_Internal_Sym *sym; + Elf_Internal_Shdr *hdr; + + if (local_syms == NULL) + { + local_syms + = (Elf_Internal_Sym *) symtab_hdr->contents; + if (local_syms == NULL) + local_syms + = bfd_elf_get_elf_syms (input_bfd, symtab_hdr, + symtab_hdr->sh_info, 0, + NULL, NULL, NULL); + if (local_syms == NULL) + goto error_ret_free_internal; + } + + sym = local_syms + r_indx; + hdr = elf_elfsections (input_bfd)[sym->st_shndx]; + sym_sec = hdr->bfd_section; + if (!sym_sec) + /* This is an undefined symbol. It can never + be resolved. */ + continue; + + if (ELF_ST_TYPE (sym->st_info) != STT_SECTION) + sym_value = sym->st_value; + destination = (sym_value + irela->r_addend + + sym_sec->output_offset + + sym_sec->output_section->vma); + st_type = ELF_ST_TYPE (sym->st_info); + sym_name + = bfd_elf_string_from_elf_section (input_bfd, + symtab_hdr->sh_link, + sym->st_name); + } + else + { + int e_indx; + + e_indx = r_indx - symtab_hdr->sh_info; + hash = ((struct elf64_aarch64_link_hash_entry *) + elf_sym_hashes (input_bfd)[e_indx]); + + while (hash->root.root.type == bfd_link_hash_indirect + || hash->root.root.type == bfd_link_hash_warning) + hash = ((struct elf64_aarch64_link_hash_entry *) + hash->root.root.u.i.link); + + if (hash->root.root.type == bfd_link_hash_defined + || hash->root.root.type == bfd_link_hash_defweak) + { + struct elf64_aarch64_link_hash_table *globals = + elf64_aarch64_hash_table (info); + sym_sec = hash->root.root.u.def.section; + sym_value = hash->root.root.u.def.value; + /* For a destination in a shared library, + use the PLT stub as target address to + decide whether a branch stub is + needed. */ + if (globals->root.splt != NULL && hash != NULL + && hash->root.plt.offset != (bfd_vma) - 1) + { + sym_sec = globals->root.splt; + sym_value = hash->root.plt.offset; + if (sym_sec->output_section != NULL) + destination = (sym_value + + sym_sec->output_offset + + + sym_sec->output_section->vma); + } + else if (sym_sec->output_section != NULL) + destination = (sym_value + irela->r_addend + + sym_sec->output_offset + + sym_sec->output_section->vma); + } + else if (hash->root.root.type == bfd_link_hash_undefined + || (hash->root.root.type + == bfd_link_hash_undefweak)) + { + /* For a shared library, use the PLT stub as + target address to decide whether a long + branch stub is needed. + For absolute code, they cannot be handled. */ + struct elf64_aarch64_link_hash_table *globals = + elf64_aarch64_hash_table (info); + + if (globals->root.splt != NULL && hash != NULL + && hash->root.plt.offset != (bfd_vma) - 1) + { + sym_sec = globals->root.splt; + sym_value = hash->root.plt.offset; + if (sym_sec->output_section != NULL) + destination = (sym_value + + sym_sec->output_offset + + + sym_sec->output_section->vma); + } + else + continue; + } + else + { + bfd_set_error (bfd_error_bad_value); + goto error_ret_free_internal; + } + st_type = ELF_ST_TYPE (hash->root.type); + sym_name = hash->root.root.root.string; + } + + /* Determine what (if any) linker stub is needed. */ + stub_type = aarch64_type_of_stub + (info, section, irela, st_type, hash, destination); + if (stub_type == aarch64_stub_none) + continue; + + /* Support for grouping stub sections. */ + id_sec = htab->stub_group[section->id].link_sec; + + /* Get the name of this stub. */ + stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, hash, + irela); + if (!stub_name) + goto error_ret_free_internal; + + stub_entry = + aarch64_stub_hash_lookup (&htab->stub_hash_table, + stub_name, FALSE, FALSE); + if (stub_entry != NULL) + { + /* The proper stub has already been created. */ + free (stub_name); + continue; + } + + stub_entry = elf64_aarch64_add_stub (stub_name, section, + htab); + if (stub_entry == NULL) + { + free (stub_name); + goto error_ret_free_internal; + } + + stub_entry->target_value = sym_value; + stub_entry->target_section = sym_sec; + stub_entry->stub_type = stub_type; + stub_entry->h = hash; + stub_entry->st_type = st_type; + + if (sym_name == NULL) + sym_name = "unnamed"; + len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name); + stub_entry->output_name = bfd_alloc (htab->stub_bfd, len); + if (stub_entry->output_name == NULL) + { + free (stub_name); + goto error_ret_free_internal; + } + + snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME, + sym_name); + + stub_changed = TRUE; + } + + /* We're done with the internal relocs, free them. */ + if (elf_section_data (section)->relocs == NULL) + free (internal_relocs); + } + } + + if (!stub_changed) + break; + + /* OK, we've added some stubs. Find out the new size of the + stub sections. */ + for (stub_sec = htab->stub_bfd->sections; + stub_sec != NULL; stub_sec = stub_sec->next) + stub_sec->size = 0; + + bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab); + + /* Ask the linker to do its stuff. */ + (*htab->layout_sections_again) (); + stub_changed = FALSE; + } + + return TRUE; + +error_ret_free_local: + return FALSE; +} + +/* Build all the stubs associated with the current output file. The + stubs are kept in a hash table attached to the main linker hash + table. We also set up the .plt entries for statically linked PIC + functions here. This function is called via aarch64_elf_finish in the + linker. */ + +bfd_boolean +elf64_aarch64_build_stubs (struct bfd_link_info *info) +{ + asection *stub_sec; + struct bfd_hash_table *table; + struct elf64_aarch64_link_hash_table *htab; + + htab = elf64_aarch64_hash_table (info); + + for (stub_sec = htab->stub_bfd->sections; + stub_sec != NULL; stub_sec = stub_sec->next) + { + bfd_size_type size; + + /* Ignore non-stub sections. */ + if (!strstr (stub_sec->name, STUB_SUFFIX)) + continue; + + /* Allocate memory to hold the linker stubs. */ + size = stub_sec->size; + stub_sec->contents = bfd_zalloc (htab->stub_bfd, size); + if (stub_sec->contents == NULL && size != 0) + return FALSE; + stub_sec->size = 0; + } + + /* Build the stubs as directed by the stub hash table. */ + table = &htab->stub_hash_table; + bfd_hash_traverse (table, aarch64_build_one_stub, info); + + return TRUE; +} + + +/* Add an entry to the code/data map for section SEC. */ + +static void +elf64_aarch64_section_map_add (asection *sec, char type, bfd_vma vma) +{ + struct _aarch64_elf_section_data *sec_data = + elf64_aarch64_section_data (sec); + unsigned int newidx; + + if (sec_data->map == NULL) + { + sec_data->map = bfd_malloc (sizeof (elf64_aarch64_section_map)); + sec_data->mapcount = 0; + sec_data->mapsize = 1; + } + + newidx = sec_data->mapcount++; + + if (sec_data->mapcount > sec_data->mapsize) + { + sec_data->mapsize *= 2; + sec_data->map = bfd_realloc_or_free + (sec_data->map, sec_data->mapsize * sizeof (elf64_aarch64_section_map)); + } + + if (sec_data->map) + { + sec_data->map[newidx].vma = vma; + sec_data->map[newidx].type = type; + } +} + + +/* Initialise maps of insn/data for input BFDs. */ +void +bfd_elf64_aarch64_init_maps (bfd *abfd) +{ + Elf_Internal_Sym *isymbuf; + Elf_Internal_Shdr *hdr; + unsigned int i, localsyms; + + /* Make sure that we are dealing with an AArch64 elf binary. */ + if (!is_aarch64_elf (abfd)) + return; + + if ((abfd->flags & DYNAMIC) != 0) + return; + + hdr = &elf_symtab_hdr (abfd); + localsyms = hdr->sh_info; + + /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field + should contain the number of local symbols, which should come before any + global symbols. Mapping symbols are always local. */ + isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL); + + /* No internal symbols read? Skip this BFD. */ + if (isymbuf == NULL) + return; + + for (i = 0; i < localsyms; i++) + { + Elf_Internal_Sym *isym = &isymbuf[i]; + asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx); + const char *name; + + if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL) + { + name = bfd_elf_string_from_elf_section (abfd, + hdr->sh_link, + isym->st_name); + + if (bfd_is_aarch64_special_symbol_name + (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP)) + elf64_aarch64_section_map_add (sec, name[1], isym->st_value); + } + } +} + +/* Set option values needed during linking. */ +void +bfd_elf64_aarch64_set_options (struct bfd *output_bfd, + struct bfd_link_info *link_info, + int no_enum_warn, + int no_wchar_warn, int pic_veneer) +{ + struct elf64_aarch64_link_hash_table *globals; + + globals = elf64_aarch64_hash_table (link_info); + globals->pic_veneer = pic_veneer; + + BFD_ASSERT (is_aarch64_elf (output_bfd)); + elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn; + elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn; +} + +#define MASK(n) ((1u << (n)) - 1) + +/* Decode the 26-bit offset of unconditional branch. */ +static inline uint32_t +decode_branch_ofs_26 (uint32_t insn) +{ + return insn & MASK (26); +} + +/* Decode the 19-bit offset of conditional branch and compare & branch. */ +static inline uint32_t +decode_cond_branch_ofs_19 (uint32_t insn) +{ + return (insn >> 5) & MASK (19); +} + +/* Decode the 19-bit offset of load literal. */ +static inline uint32_t +decode_ld_lit_ofs_19 (uint32_t insn) +{ + return (insn >> 5) & MASK (19); +} + +/* Decode the 14-bit offset of test & branch. */ +static inline uint32_t +decode_tst_branch_ofs_14 (uint32_t insn) +{ + return (insn >> 5) & MASK (14); +} + +/* Decode the 16-bit imm of move wide. */ +static inline uint32_t +decode_movw_imm (uint32_t insn) +{ + return (insn >> 5) & MASK (16); +} + +/* Decode the 21-bit imm of adr. */ +static inline uint32_t +decode_adr_imm (uint32_t insn) +{ + return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2)); +} + +/* Decode the 12-bit imm of add immediate. */ +static inline uint32_t +decode_add_imm (uint32_t insn) +{ + return (insn >> 10) & MASK (12); +} + + +/* Encode the 26-bit offset of unconditional branch. */ +static inline uint32_t +reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs) +{ + return (insn & ~MASK (26)) | (ofs & MASK (26)); +} + +/* Encode the 19-bit offset of conditional branch and compare & branch. */ +static inline uint32_t +reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs) +{ + return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5); +} + +/* Decode the 19-bit offset of load literal. */ +static inline uint32_t +reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs) +{ + return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5); +} + +/* Encode the 14-bit offset of test & branch. */ +static inline uint32_t +reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs) +{ + return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5); +} + +/* Reencode the imm field of move wide. */ +static inline uint32_t +reencode_movw_imm (uint32_t insn, uint32_t imm) +{ + return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5); +} + +/* Reencode the imm field of adr. */ +static inline uint32_t +reencode_adr_imm (uint32_t insn, uint32_t imm) +{ + return (insn & ~((MASK (2) << 29) | (MASK (19) << 5))) + | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3); +} + +/* Reencode the imm field of ld/st pos immediate. */ +static inline uint32_t +reencode_ldst_pos_imm (uint32_t insn, uint32_t imm) +{ + return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10); +} + +/* Reencode the imm field of add immediate. */ +static inline uint32_t +reencode_add_imm (uint32_t insn, uint32_t imm) +{ + return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10); +} + +/* Reencode mov[zn] to movz. */ +static inline uint32_t +reencode_movzn_to_movz (uint32_t opcode) +{ + return opcode | (1 << 30); +} + +/* Reencode mov[zn] to movn. */ +static inline uint32_t +reencode_movzn_to_movn (uint32_t opcode) +{ + return opcode & ~(1 << 30); +} + +/* Insert the addend/value into the instruction or data object being + relocated. */ +static bfd_reloc_status_type +bfd_elf_aarch64_put_addend (bfd *abfd, + bfd_byte *address, + reloc_howto_type *howto, bfd_signed_vma addend) +{ + bfd_reloc_status_type status = bfd_reloc_ok; + bfd_signed_vma old_addend = addend; + bfd_vma contents; + int size; + + size = bfd_get_reloc_size (howto); + switch (size) + { + case 2: + contents = bfd_get_16 (abfd, address); + break; + case 4: + if (howto->src_mask != 0xffffffff) + /* Must be 32-bit instruction, always little-endian. */ + contents = bfd_getl32 (address); + else + /* Must be 32-bit data (endianness dependent). */ + contents = bfd_get_32 (abfd, address); + break; + case 8: + contents = bfd_get_64 (abfd, address); + break; + default: + abort (); + } + + switch (howto->complain_on_overflow) + { + case complain_overflow_dont: + break; + case complain_overflow_signed: + status = aarch64_signed_overflow (addend, + howto->bitsize + howto->rightshift); + break; + case complain_overflow_unsigned: + status = aarch64_unsigned_overflow (addend, + howto->bitsize + howto->rightshift); + break; + case complain_overflow_bitfield: + default: + abort (); + } + + addend >>= howto->rightshift; + + switch (howto->type) + { + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + contents = reencode_branch_ofs_26 (contents, addend); + break; + + case R_AARCH64_CONDBR19: + contents = reencode_cond_branch_ofs_19 (contents, addend); + break; + + case R_AARCH64_TSTBR14: + contents = reencode_tst_branch_ofs_14 (contents, addend); + break; + + case R_AARCH64_LD_PREL_LO19: + if (old_addend & ((1 << howto->rightshift) - 1)) + return bfd_reloc_overflow; + contents = reencode_ld_lit_ofs_19 (contents, addend); + break; + + case R_AARCH64_TLSDESC_CALL: + break; + + case R_AARCH64_TLSGD_ADR_PAGE21: + case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: + case R_AARCH64_TLSDESC_ADR_PAGE: + case R_AARCH64_ADR_GOT_PAGE: + case R_AARCH64_ADR_PREL_LO21: + case R_AARCH64_ADR_PREL_PG_HI21: + case R_AARCH64_ADR_PREL_PG_HI21_NC: + contents = reencode_adr_imm (contents, addend); + break; + + case R_AARCH64_TLSGD_ADD_LO12_NC: + case R_AARCH64_TLSLE_ADD_TPREL_LO12: + case R_AARCH64_TLSLE_ADD_TPREL_HI12: + case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: + case R_AARCH64_TLSDESC_ADD_LO12_NC: + case R_AARCH64_ADD_ABS_LO12_NC: + /* Corresponds to: add rd, rn, #uimm12 to provide the low order + 12 bits of the page offset following + R_AARCH64_ADR_PREL_PG_HI21 which computes the + (pc-relative) page base. */ + contents = reencode_add_imm (contents, addend); + break; + + case R_AARCH64_LDST8_ABS_LO12_NC: + case R_AARCH64_LDST16_ABS_LO12_NC: + case R_AARCH64_LDST32_ABS_LO12_NC: + case R_AARCH64_LDST64_ABS_LO12_NC: + case R_AARCH64_LDST128_ABS_LO12_NC: + case R_AARCH64_TLSDESC_LD64_LO12_NC: + case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: + case R_AARCH64_LD64_GOT_LO12_NC: + if (old_addend & ((1 << howto->rightshift) - 1)) + return bfd_reloc_overflow; + /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order + 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21 + which computes the (pc-relative) page base. */ + contents = reencode_ldst_pos_imm (contents, addend); + break; + + /* Group relocations to create high bits of a 16, 32, 48 or 64 + bit signed data or abs address inline. Will change + instruction to MOVN or MOVZ depending on sign of calculated + value. */ + + case R_AARCH64_TLSLE_MOVW_TPREL_G2: + case R_AARCH64_TLSLE_MOVW_TPREL_G1: + case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: + case R_AARCH64_TLSLE_MOVW_TPREL_G0: + case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: + case R_AARCH64_MOVW_SABS_G0: + case R_AARCH64_MOVW_SABS_G1: + case R_AARCH64_MOVW_SABS_G2: + /* NOTE: We can only come here with movz or movn. */ + if (addend < 0) + { + /* Force use of MOVN. */ + addend = ~addend; + contents = reencode_movzn_to_movn (contents); + } + else + { + /* Force use of MOVZ. */ + contents = reencode_movzn_to_movz (contents); + } + /* fall through */ + + /* Group relocations to create a 16, 32, 48 or 64 bit unsigned + data or abs address inline. */ + + case R_AARCH64_MOVW_UABS_G0: + case R_AARCH64_MOVW_UABS_G0_NC: + case R_AARCH64_MOVW_UABS_G1: + case R_AARCH64_MOVW_UABS_G1_NC: + case R_AARCH64_MOVW_UABS_G2: + case R_AARCH64_MOVW_UABS_G2_NC: + case R_AARCH64_MOVW_UABS_G3: + contents = reencode_movw_imm (contents, addend); + break; + + default: + /* Repack simple data */ + if (howto->dst_mask & (howto->dst_mask + 1)) + return bfd_reloc_notsupported; + + contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask)); + break; + } + + switch (size) + { + case 2: + bfd_put_16 (abfd, contents, address); + break; + case 4: + if (howto->dst_mask != 0xffffffff) + /* must be 32-bit instruction, always little-endian */ + bfd_putl32 (contents, address); + else + /* must be 32-bit data (endianness dependent) */ + bfd_put_32 (abfd, contents, address); + break; + case 8: + bfd_put_64 (abfd, contents, address); + break; + default: + abort (); + } + + return status; +} + +static bfd_vma +aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h, + struct elf64_aarch64_link_hash_table + *globals, struct bfd_link_info *info, + bfd_vma value, bfd *output_bfd, + bfd_boolean *unresolved_reloc_p) +{ + bfd_vma off = (bfd_vma) - 1; + asection *basegot = globals->root.sgot; + bfd_boolean dyn = globals->root.dynamic_sections_created; + + if (h != NULL) + { + off = h->got.offset; + BFD_ASSERT (off != (bfd_vma) - 1); + if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h) + || (info->shared + && SYMBOL_REFERENCES_LOCAL (info, h)) + || (ELF_ST_VISIBILITY (h->other) + && h->root.type == bfd_link_hash_undefweak)) + { + /* This is actually a static link, or it is a -Bsymbolic link + and the symbol is defined locally. We must initialize this + entry in the global offset table. Since the offset must + always be a multiple of 8, we use the least significant bit + to record whether we have initialized it already. + When doing a dynamic link, we create a .rel(a).got relocation + entry to initialize the value. This is done in the + finish_dynamic_symbol routine. */ + if ((off & 1) != 0) + off &= ~1; + else + { + bfd_put_64 (output_bfd, value, basegot->contents + off); + h->got.offset |= 1; + } + } + else + *unresolved_reloc_p = FALSE; + + off = off + basegot->output_section->vma + basegot->output_offset; + } + + return off; +} + +/* Change R_TYPE to a more efficient access model where possible, + return the new reloc type. */ + +static unsigned int +aarch64_tls_transition_without_check (unsigned int r_type, + struct elf_link_hash_entry *h) +{ + bfd_boolean is_local = h == NULL; + switch (r_type) + { + case R_AARCH64_TLSGD_ADR_PAGE21: + case R_AARCH64_TLSDESC_ADR_PAGE: + return is_local + ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21; + + case R_AARCH64_TLSGD_ADD_LO12_NC: + case R_AARCH64_TLSDESC_LD64_LO12_NC: + return is_local + ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC + : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC; + + case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: + return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type; + + case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: + return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type; + + case R_AARCH64_TLSDESC_ADD_LO12_NC: + case R_AARCH64_TLSDESC_CALL: + /* Instructions with these relocations will become NOPs. */ + return R_AARCH64_NONE; + } + + return r_type; +} + +static unsigned int +aarch64_reloc_got_type (unsigned int r_type) +{ + switch (r_type) + { + case R_AARCH64_LD64_GOT_LO12_NC: + case R_AARCH64_ADR_GOT_PAGE: + return GOT_NORMAL; + + case R_AARCH64_TLSGD_ADR_PAGE21: + case R_AARCH64_TLSGD_ADD_LO12_NC: + return GOT_TLS_GD; + + case R_AARCH64_TLSDESC_ADD_LO12_NC: + case R_AARCH64_TLSDESC_ADR_PAGE: + case R_AARCH64_TLSDESC_CALL: + case R_AARCH64_TLSDESC_LD64_LO12_NC: + return GOT_TLSDESC_GD; + + case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: + case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: + return GOT_TLS_IE; + + case R_AARCH64_TLSLE_ADD_TPREL_HI12: + case R_AARCH64_TLSLE_ADD_TPREL_LO12: + case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: + case R_AARCH64_TLSLE_MOVW_TPREL_G0: + case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: + case R_AARCH64_TLSLE_MOVW_TPREL_G1: + case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: + case R_AARCH64_TLSLE_MOVW_TPREL_G2: + return GOT_UNKNOWN; + } + return GOT_UNKNOWN; +} + +static bfd_boolean +aarch64_can_relax_tls (bfd *input_bfd, + struct bfd_link_info *info, + unsigned int r_type, + struct elf_link_hash_entry *h, + unsigned long r_symndx) +{ + unsigned int symbol_got_type; + unsigned int reloc_got_type; + + if (! IS_AARCH64_TLS_RELOC (r_type)) + return FALSE; + + symbol_got_type = elf64_aarch64_symbol_got_type (h, input_bfd, r_symndx); + reloc_got_type = aarch64_reloc_got_type (r_type); + + if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type)) + return TRUE; + + if (info->shared) + return FALSE; + + if (h && h->root.type == bfd_link_hash_undefweak) + return FALSE; + + return TRUE; +} + +static unsigned int +aarch64_tls_transition (bfd *input_bfd, + struct bfd_link_info *info, + unsigned int r_type, + struct elf_link_hash_entry *h, + unsigned long r_symndx) +{ + if (! aarch64_can_relax_tls (input_bfd, info, r_type, h, r_symndx)) + return r_type; + + return aarch64_tls_transition_without_check (r_type, h); +} + +/* Return the base VMA address which should be subtracted from real addresses + when resolving R_AARCH64_TLS_DTPREL64 relocation. */ + +static bfd_vma +dtpoff_base (struct bfd_link_info *info) +{ + /* If tls_sec is NULL, we should have signalled an error already. */ + BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL); + return elf_hash_table (info)->tls_sec->vma; +} + + +/* Return the base VMA address which should be subtracted from real addresses + when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */ + +static bfd_vma +tpoff_base (struct bfd_link_info *info) +{ + struct elf_link_hash_table *htab = elf_hash_table (info); + + /* If tls_sec is NULL, we should have signalled an error already. */ + if (htab->tls_sec == NULL) + return 0; + + bfd_vma base = align_power ((bfd_vma) TCB_SIZE, + htab->tls_sec->alignment_power); + return htab->tls_sec->vma - base; +} + +static bfd_vma * +symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h, + unsigned long r_symndx) +{ + /* Calculate the address of the GOT entry for symbol + referred to in h. */ + if (h != NULL) + return &h->got.offset; + else + { + /* local symbol */ + struct elf_aarch64_local_symbol *l; + + l = elf64_aarch64_locals (input_bfd); + return &l[r_symndx].got_offset; + } +} + +static void +symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h, + unsigned long r_symndx) +{ + bfd_vma *p; + p = symbol_got_offset_ref (input_bfd, h, r_symndx); + *p |= 1; +} + +static int +symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h, + unsigned long r_symndx) +{ + bfd_vma value; + value = * symbol_got_offset_ref (input_bfd, h, r_symndx); + return value & 1; +} + +static bfd_vma +symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h, + unsigned long r_symndx) +{ + bfd_vma value; + value = * symbol_got_offset_ref (input_bfd, h, r_symndx); + value &= ~1; + return value; +} + +static bfd_vma * +symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h, + unsigned long r_symndx) +{ + /* Calculate the address of the GOT entry for symbol + referred to in h. */ + if (h != NULL) + { + struct elf64_aarch64_link_hash_entry *eh; + eh = (struct elf64_aarch64_link_hash_entry *) h; + return &eh->tlsdesc_got_jump_table_offset; + } + else + { + /* local symbol */ + struct elf_aarch64_local_symbol *l; + + l = elf64_aarch64_locals (input_bfd); + return &l[r_symndx].tlsdesc_got_jump_table_offset; + } +} + +static void +symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h, + unsigned long r_symndx) +{ + bfd_vma *p; + p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx); + *p |= 1; +} + +static int +symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd, + struct elf_link_hash_entry *h, + unsigned long r_symndx) +{ + bfd_vma value; + value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx); + return value & 1; +} + +static bfd_vma +symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h, + unsigned long r_symndx) +{ + bfd_vma value; + value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx); + value &= ~1; + return value; +} + +/* Perform a relocation as part of a final link. */ +static bfd_reloc_status_type +elf64_aarch64_final_link_relocate (reloc_howto_type *howto, + bfd *input_bfd, + bfd *output_bfd, + asection *input_section, + bfd_byte *contents, + Elf_Internal_Rela *rel, + bfd_vma value, + struct bfd_link_info *info, + asection *sym_sec, + struct elf_link_hash_entry *h, + bfd_boolean *unresolved_reloc_p, + bfd_boolean save_addend, + bfd_vma *saved_addend) +{ + unsigned int r_type = howto->type; + unsigned long r_symndx; + bfd_byte *hit_data = contents + rel->r_offset; + bfd_vma place; + bfd_signed_vma signed_addend; + struct elf64_aarch64_link_hash_table *globals; + bfd_boolean weak_undef_p; + + globals = elf64_aarch64_hash_table (info); + + BFD_ASSERT (is_aarch64_elf (input_bfd)); + + r_symndx = ELF64_R_SYM (rel->r_info); + + /* It is possible to have linker relaxations on some TLS access + models. Update our information here. */ + r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx); + + if (r_type != howto->type) + howto = elf64_aarch64_howto_from_type (r_type); + + place = input_section->output_section->vma + + input_section->output_offset + rel->r_offset; + + /* Get addend, accumulating the addend for consecutive relocs + which refer to the same offset. */ + signed_addend = saved_addend ? *saved_addend : 0; + signed_addend += rel->r_addend; + + weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak + : bfd_is_und_section (sym_sec)); + switch (r_type) + { + case R_AARCH64_NONE: + case R_AARCH64_NULL: + case R_AARCH64_TLSDESC_CALL: + *unresolved_reloc_p = FALSE; + return bfd_reloc_ok; + + case R_AARCH64_ABS64: + + /* When generating a shared object or relocatable executable, these + relocations are copied into the output file to be resolved at + run time. */ + if (((info->shared == TRUE) || globals->root.is_relocatable_executable) + && (input_section->flags & SEC_ALLOC) + && (h == NULL + || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT + || h->root.type != bfd_link_hash_undefweak)) + { + Elf_Internal_Rela outrel; + bfd_byte *loc; + bfd_boolean skip, relocate; + asection *sreloc; + + *unresolved_reloc_p = FALSE; + + sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, + input_section, 1); + if (sreloc == NULL) + return bfd_reloc_notsupported; + + skip = FALSE; + relocate = FALSE; + + outrel.r_addend = signed_addend; + outrel.r_offset = + _bfd_elf_section_offset (output_bfd, info, input_section, + rel->r_offset); + if (outrel.r_offset == (bfd_vma) - 1) + skip = TRUE; + else if (outrel.r_offset == (bfd_vma) - 2) + { + skip = TRUE; + relocate = TRUE; + } + + outrel.r_offset += (input_section->output_section->vma + + input_section->output_offset); + + if (skip) + memset (&outrel, 0, sizeof outrel); + else if (h != NULL + && h->dynindx != -1 + && (!info->shared || !info->symbolic || !h->def_regular)) + outrel.r_info = ELF64_R_INFO (h->dynindx, r_type); + else + { + int symbol; + + /* On SVR4-ish systems, the dynamic loader cannot + relocate the text and data segments independently, + so the symbol does not matter. */ + symbol = 0; + outrel.r_info = ELF64_R_INFO (symbol, R_AARCH64_RELATIVE); + outrel.r_addend += value; + } + + loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (htab); + bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc); + + if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size) + { + /* Sanity to check that we have previously allocated + sufficient space in the relocation section for the + number of relocations we actually want to emit. */ + abort (); + } + + /* If this reloc is against an external symbol, we do not want to + fiddle with the addend. Otherwise, we need to include the symbol + value so that it becomes an addend for the dynamic reloc. */ + if (!relocate) + return bfd_reloc_ok; + + return _bfd_final_link_relocate (howto, input_bfd, input_section, + contents, rel->r_offset, value, + signed_addend); + } + else + value += signed_addend; + break; + + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + { + asection *splt = globals->root.splt; + bfd_boolean via_plt_p = + splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1; + + /* A call to an undefined weak symbol is converted to a jump to + the next instruction unless a PLT entry will be created. + The jump to the next instruction is optimized as a NOP. + Do the same for local undefined symbols. */ + if (weak_undef_p && ! via_plt_p) + { + bfd_putl32 (INSN_NOP, hit_data); + return bfd_reloc_ok; + } + + /* If the call goes through a PLT entry, make sure to + check distance to the right destination address. */ + if (via_plt_p) + { + value = (splt->output_section->vma + + splt->output_offset + h->plt.offset); + *unresolved_reloc_p = FALSE; + } + + /* If the target symbol is global and marked as a function the + relocation applies a function call or a tail call. In this + situation we can veneer out of range branches. The veneers + use IP0 and IP1 hence cannot be used arbitrary out of range + branches that occur within the body of a function. */ + if (h && h->type == STT_FUNC) + { + /* Check if a stub has to be inserted because the destination + is too far away. */ + if (! aarch64_valid_branch_p (value, place)) + { + /* The target is out of reach, so redirect the branch to + the local stub for this function. */ + struct elf64_aarch64_stub_hash_entry *stub_entry; + stub_entry = elf64_aarch64_get_stub_entry (input_section, + sym_sec, h, + rel, globals); + if (stub_entry != NULL) + value = (stub_entry->stub_offset + + stub_entry->stub_sec->output_offset + + stub_entry->stub_sec->output_section->vma); + } + } + } + value = aarch64_resolve_relocation (r_type, place, value, + signed_addend, weak_undef_p); + break; + + case R_AARCH64_ABS16: + case R_AARCH64_ABS32: + case R_AARCH64_ADD_ABS_LO12_NC: + case R_AARCH64_ADR_PREL_LO21: + case R_AARCH64_ADR_PREL_PG_HI21: + case R_AARCH64_ADR_PREL_PG_HI21_NC: + case R_AARCH64_CONDBR19: + case R_AARCH64_LD_PREL_LO19: + case R_AARCH64_LDST8_ABS_LO12_NC: + case R_AARCH64_LDST16_ABS_LO12_NC: + case R_AARCH64_LDST32_ABS_LO12_NC: + case R_AARCH64_LDST64_ABS_LO12_NC: + case R_AARCH64_LDST128_ABS_LO12_NC: + case R_AARCH64_MOVW_SABS_G0: + case R_AARCH64_MOVW_SABS_G1: + case R_AARCH64_MOVW_SABS_G2: + case R_AARCH64_MOVW_UABS_G0: + case R_AARCH64_MOVW_UABS_G0_NC: + case R_AARCH64_MOVW_UABS_G1: + case R_AARCH64_MOVW_UABS_G1_NC: + case R_AARCH64_MOVW_UABS_G2: + case R_AARCH64_MOVW_UABS_G2_NC: + case R_AARCH64_MOVW_UABS_G3: + case R_AARCH64_PREL16: + case R_AARCH64_PREL32: + case R_AARCH64_PREL64: + case R_AARCH64_TSTBR14: + value = aarch64_resolve_relocation (r_type, place, value, + signed_addend, weak_undef_p); + break; + + case R_AARCH64_LD64_GOT_LO12_NC: + case R_AARCH64_ADR_GOT_PAGE: + if (globals->root.sgot == NULL) + BFD_ASSERT (h != NULL); + + if (h != NULL) + { + value = aarch64_calculate_got_entry_vma (h, globals, info, value, + output_bfd, + unresolved_reloc_p); + value = aarch64_resolve_relocation (r_type, place, value, + 0, weak_undef_p); + } + break; + + case R_AARCH64_TLSGD_ADR_PAGE21: + case R_AARCH64_TLSGD_ADD_LO12_NC: + case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: + case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: + if (globals->root.sgot == NULL) + return bfd_reloc_notsupported; + + value = (symbol_got_offset (input_bfd, h, r_symndx) + + globals->root.sgot->output_section->vma + + globals->root.sgot->output_section->output_offset); + + value = aarch64_resolve_relocation (r_type, place, value, + 0, weak_undef_p); + *unresolved_reloc_p = FALSE; + break; + + case R_AARCH64_TLSLE_ADD_TPREL_HI12: + case R_AARCH64_TLSLE_ADD_TPREL_LO12: + case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: + case R_AARCH64_TLSLE_MOVW_TPREL_G0: + case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: + case R_AARCH64_TLSLE_MOVW_TPREL_G1: + case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: + case R_AARCH64_TLSLE_MOVW_TPREL_G2: + value = aarch64_resolve_relocation (r_type, place, value, + - tpoff_base (info), weak_undef_p); + *unresolved_reloc_p = FALSE; + break; + + case R_AARCH64_TLSDESC_ADR_PAGE: + case R_AARCH64_TLSDESC_LD64_LO12_NC: + case R_AARCH64_TLSDESC_ADD_LO12_NC: + case R_AARCH64_TLSDESC_ADD: + case R_AARCH64_TLSDESC_LDR: + if (globals->root.sgot == NULL) + return bfd_reloc_notsupported; + + value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx) + + globals->root.sgotplt->output_section->vma + + globals->root.sgotplt->output_section->output_offset + + globals->sgotplt_jump_table_size); + + value = aarch64_resolve_relocation (r_type, place, value, + 0, weak_undef_p); + *unresolved_reloc_p = FALSE; + break; + + default: + return bfd_reloc_notsupported; + } + + if (saved_addend) + *saved_addend = value; + + /* Only apply the final relocation in a sequence. */ + if (save_addend) + return bfd_reloc_continue; + + return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value); +} + +/* Handle TLS relaxations. Relaxing is possible for symbols that use + R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static + link. + + Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller + is to then call final_link_relocate. Return other values in the + case of error. */ + +static bfd_reloc_status_type +elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table *globals, + bfd *input_bfd, bfd_byte *contents, + Elf_Internal_Rela *rel, struct elf_link_hash_entry *h) +{ + bfd_boolean is_local = h == NULL; + unsigned int r_type = ELF64_R_TYPE (rel->r_info); + unsigned long insn; + + BFD_ASSERT (globals && input_bfd && contents && rel); + + switch (r_type) + { + case R_AARCH64_TLSGD_ADR_PAGE21: + case R_AARCH64_TLSDESC_ADR_PAGE: + if (is_local) + { + /* GD->LE relaxation: + adrp x0, :tlsgd:var => movz x0, :tprel_g1:var + or + adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var + */ + bfd_putl32 (0xd2a00000, contents + rel->r_offset); + return bfd_reloc_continue; + } + else + { + /* GD->IE relaxation: + adrp x0, :tlsgd:var => adrp x0, :gottprel:var + or + adrp x0, :tlsdesc:var => adrp x0, :gottprel:var + */ + insn = bfd_getl32 (contents + rel->r_offset); + return bfd_reloc_continue; + } + + case R_AARCH64_TLSDESC_LD64_LO12_NC: + if (is_local) + { + /* GD->LE relaxation: + ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var + */ + bfd_putl32 (0xf2800000, contents + rel->r_offset); + return bfd_reloc_continue; + } + else + { + /* GD->IE relaxation: + ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var] + */ + insn = bfd_getl32 (contents + rel->r_offset); + insn &= 0xfffffff0; + bfd_putl32 (insn, contents + rel->r_offset); + return bfd_reloc_continue; + } + + case R_AARCH64_TLSGD_ADD_LO12_NC: + if (is_local) + { + /* GD->LE relaxation + add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var + bl __tls_get_addr => mrs x1, tpidr_el0 + nop => add x0, x1, x0 + */ + + /* First kill the tls_get_addr reloc on the bl instruction. */ + BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset); + rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE); + + bfd_putl32 (0xf2800000, contents + rel->r_offset); + bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4); + bfd_putl32 (0x8b000020, contents + rel->r_offset + 8); + return bfd_reloc_continue; + } + else + { + /* GD->IE relaxation + ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var] + BL __tls_get_addr => mrs x1, tpidr_el0 + R_AARCH64_CALL26 + NOP => add x0, x1, x0 + */ + + BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_AARCH64_CALL26); + + /* Remove the relocation on the BL instruction. */ + rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE); + + bfd_putl32 (0xf9400000, contents + rel->r_offset); + + /* We choose to fixup the BL and NOP instructions using the + offset from the second relocation to allow flexibility in + scheduling instructions between the ADD and BL. */ + bfd_putl32 (0xd53bd041, contents + rel[1].r_offset); + bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4); + return bfd_reloc_continue; + } + + case R_AARCH64_TLSDESC_ADD_LO12_NC: + case R_AARCH64_TLSDESC_CALL: + /* GD->IE/LE relaxation: + add x0, x0, #:tlsdesc_lo12:var => nop + blr xd => nop + */ + bfd_putl32 (INSN_NOP, contents + rel->r_offset); + return bfd_reloc_ok; + + case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: + /* IE->LE relaxation: + adrp xd, :gottprel:var => movz xd, :tprel_g1:var + */ + if (is_local) + { + insn = bfd_getl32 (contents + rel->r_offset); + bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset); + } + return bfd_reloc_continue; + + case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: + /* IE->LE relaxation: + ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var + */ + if (is_local) + { + insn = bfd_getl32 (contents + rel->r_offset); + bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset); + } + return bfd_reloc_continue; + + default: + return bfd_reloc_continue; + } + + return bfd_reloc_ok; +} + +/* Relocate an AArch64 ELF section. */ + +static bfd_boolean +elf64_aarch64_relocate_section (bfd *output_bfd, + struct bfd_link_info *info, + bfd *input_bfd, + asection *input_section, + bfd_byte *contents, + Elf_Internal_Rela *relocs, + Elf_Internal_Sym *local_syms, + asection **local_sections) +{ + Elf_Internal_Shdr *symtab_hdr; + struct elf_link_hash_entry **sym_hashes; + Elf_Internal_Rela *rel; + Elf_Internal_Rela *relend; + const char *name; + struct elf64_aarch64_link_hash_table *globals; + bfd_boolean save_addend = FALSE; + bfd_vma addend = 0; + + globals = elf64_aarch64_hash_table (info); + + symtab_hdr = &elf_symtab_hdr (input_bfd); + sym_hashes = elf_sym_hashes (input_bfd); + + rel = relocs; + relend = relocs + input_section->reloc_count; + for (; rel < relend; rel++) + { + unsigned int r_type; + unsigned int relaxed_r_type; + reloc_howto_type *howto; + unsigned long r_symndx; + Elf_Internal_Sym *sym; + asection *sec; + struct elf_link_hash_entry *h; + bfd_vma relocation; + bfd_reloc_status_type r; + arelent bfd_reloc; + char sym_type; + bfd_boolean unresolved_reloc = FALSE; + char *error_message = NULL; + + r_symndx = ELF64_R_SYM (rel->r_info); + r_type = ELF64_R_TYPE (rel->r_info); + + bfd_reloc.howto = elf64_aarch64_howto_from_type (r_type); + howto = bfd_reloc.howto; + + h = NULL; + sym = NULL; + sec = NULL; + + if (r_symndx < symtab_hdr->sh_info) + { + sym = local_syms + r_symndx; + sym_type = ELF64_ST_TYPE (sym->st_info); + sec = local_sections[r_symndx]; + + /* An object file might have a reference to a local + undefined symbol. This is a daft object file, but we + should at least do something about it. */ + if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL + && bfd_is_und_section (sec) + && ELF_ST_BIND (sym->st_info) != STB_WEAK) + { + if (!info->callbacks->undefined_symbol + (info, bfd_elf_string_from_elf_section + (input_bfd, symtab_hdr->sh_link, sym->st_name), + input_bfd, input_section, rel->r_offset, TRUE)) + return FALSE; + } + + if (r_type >= R_AARCH64_dyn_max) + { + bfd_set_error (bfd_error_bad_value); + return FALSE; + } + + relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel); + } + else + { + bfd_boolean warned; + + RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, + r_symndx, symtab_hdr, sym_hashes, + h, sec, relocation, + unresolved_reloc, warned); + + sym_type = h->type; + } + + if (sec != NULL && discarded_section (sec)) + RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, + rel, 1, relend, howto, 0, contents); + + if (info->relocatable) + { + /* This is a relocatable link. We don't have to change + anything, unless the reloc is against a section symbol, + in which case we have to adjust according to where the + section symbol winds up in the output section. */ + if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION) + rel->r_addend += sec->output_offset; + continue; + } + + if (h != NULL) + name = h->root.root.string; + else + { + name = (bfd_elf_string_from_elf_section + (input_bfd, symtab_hdr->sh_link, sym->st_name)); + if (name == NULL || *name == '\0') + name = bfd_section_name (input_bfd, sec); + } + + if (r_symndx != 0 + && r_type != R_AARCH64_NONE + && r_type != R_AARCH64_NULL + && (h == NULL + || h->root.type == bfd_link_hash_defined + || h->root.type == bfd_link_hash_defweak) + && IS_AARCH64_TLS_RELOC (r_type) != (sym_type == STT_TLS)) + { + (*_bfd_error_handler) + ((sym_type == STT_TLS + ? _("%B(%A+0x%lx): %s used with TLS symbol %s") + : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")), + input_bfd, + input_section, (long) rel->r_offset, howto->name, name); + } + + + /* We relax only if we can see that there can be a valid transition + from a reloc type to another. + We call elf64_aarch64_final_link_relocate unless we're completely + done, i.e., the relaxation produced the final output we want. */ + + relaxed_r_type = aarch64_tls_transition (input_bfd, info, r_type, + h, r_symndx); + if (relaxed_r_type != r_type) + { + r_type = relaxed_r_type; + howto = elf64_aarch64_howto_from_type (r_type); + + r = elf64_aarch64_tls_relax (globals, input_bfd, contents, rel, h); + unresolved_reloc = 0; + } + else + r = bfd_reloc_continue; + + /* There may be multiple consecutive relocations for the + same offset. In that case we are supposed to treat the + output of each relocation as the addend for the next. */ + if (rel + 1 < relend + && rel->r_offset == rel[1].r_offset + && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NONE + && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NULL) + save_addend = TRUE; + else + save_addend = FALSE; + + if (r == bfd_reloc_continue) + r = elf64_aarch64_final_link_relocate (howto, input_bfd, output_bfd, + input_section, contents, rel, + relocation, info, sec, + h, &unresolved_reloc, + save_addend, &addend); + + switch (r_type) + { + case R_AARCH64_TLSGD_ADR_PAGE21: + case R_AARCH64_TLSGD_ADD_LO12_NC: + if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx)) + { + bfd_boolean need_relocs = FALSE; + bfd_byte *loc; + int indx; + bfd_vma off; + + off = symbol_got_offset (input_bfd, h, r_symndx); + indx = h && h->dynindx != -1 ? h->dynindx : 0; + + need_relocs = + (info->shared || indx != 0) && + (h == NULL + || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT + || h->root.type != bfd_link_hash_undefweak); + + BFD_ASSERT (globals->root.srelgot != NULL); + + if (need_relocs) + { + Elf_Internal_Rela rela; + rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_DTPMOD64); + rela.r_addend = 0; + rela.r_offset = globals->root.sgot->output_section->vma + + globals->root.sgot->output_offset + off; + + + loc = globals->root.srelgot->contents; + loc += globals->root.srelgot->reloc_count++ + * RELOC_SIZE (htab); + bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); + + if (indx == 0) + { + bfd_put_64 (output_bfd, + relocation - dtpoff_base (info), + globals->root.sgot->contents + off + + GOT_ENTRY_SIZE); + } + else + { + /* This TLS symbol is global. We emit a + relocation to fixup the tls offset at load + time. */ + rela.r_info = + ELF64_R_INFO (indx, R_AARCH64_TLS_DTPREL64); + rela.r_addend = 0; + rela.r_offset = + (globals->root.sgot->output_section->vma + + globals->root.sgot->output_offset + off + + GOT_ENTRY_SIZE); + + loc = globals->root.srelgot->contents; + loc += globals->root.srelgot->reloc_count++ + * RELOC_SIZE (globals); + bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); + bfd_put_64 (output_bfd, (bfd_vma) 0, + globals->root.sgot->contents + off + + GOT_ENTRY_SIZE); + } + } + else + { + bfd_put_64 (output_bfd, (bfd_vma) 1, + globals->root.sgot->contents + off); + bfd_put_64 (output_bfd, + relocation - dtpoff_base (info), + globals->root.sgot->contents + off + + GOT_ENTRY_SIZE); + } + + symbol_got_offset_mark (input_bfd, h, r_symndx); + } + break; + + case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: + case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: + if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx)) + { + bfd_boolean need_relocs = FALSE; + bfd_byte *loc; + int indx; + bfd_vma off; + + off = symbol_got_offset (input_bfd, h, r_symndx); + + indx = h && h->dynindx != -1 ? h->dynindx : 0; + + need_relocs = + (info->shared || indx != 0) && + (h == NULL + || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT + || h->root.type != bfd_link_hash_undefweak); + + BFD_ASSERT (globals->root.srelgot != NULL); + + if (need_relocs) + { + Elf_Internal_Rela rela; + + if (indx == 0) + rela.r_addend = relocation - dtpoff_base (info); + else + rela.r_addend = 0; + + rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_TPREL64); + rela.r_offset = globals->root.sgot->output_section->vma + + globals->root.sgot->output_offset + off; + + loc = globals->root.srelgot->contents; + loc += globals->root.srelgot->reloc_count++ + * RELOC_SIZE (htab); + + bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); + + bfd_put_64 (output_bfd, rela.r_addend, + globals->root.sgot->contents + off); + } + else + bfd_put_64 (output_bfd, relocation - tpoff_base (info), + globals->root.sgot->contents + off); + + symbol_got_offset_mark (input_bfd, h, r_symndx); + } + break; + + case R_AARCH64_TLSLE_ADD_TPREL_LO12: + case R_AARCH64_TLSLE_ADD_TPREL_HI12: + case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: + case R_AARCH64_TLSLE_MOVW_TPREL_G2: + case R_AARCH64_TLSLE_MOVW_TPREL_G1: + case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: + case R_AARCH64_TLSLE_MOVW_TPREL_G0: + case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: + break; + + case R_AARCH64_TLSDESC_ADR_PAGE: + case R_AARCH64_TLSDESC_LD64_LO12_NC: + case R_AARCH64_TLSDESC_ADD_LO12_NC: + if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx)) + { + bfd_boolean need_relocs = FALSE; + int indx = h && h->dynindx != -1 ? h->dynindx : 0; + bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx); + + need_relocs = (h == NULL + || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT + || h->root.type != bfd_link_hash_undefweak); + + BFD_ASSERT (globals->root.srelgot != NULL); + BFD_ASSERT (globals->root.sgot != NULL); + + if (need_relocs) + { + bfd_byte *loc; + Elf_Internal_Rela rela; + rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLSDESC); + rela.r_addend = 0; + rela.r_offset = (globals->root.sgotplt->output_section->vma + + globals->root.sgotplt->output_offset + + off + globals->sgotplt_jump_table_size); + + if (indx == 0) + rela.r_addend = relocation - dtpoff_base (info); + + /* Allocate the next available slot in the PLT reloc + section to hold our R_AARCH64_TLSDESC, the next + available slot is determined from reloc_count, + which we step. But note, reloc_count was + artifically moved down while allocating slots for + real PLT relocs such that all of the PLT relocs + will fit above the initial reloc_count and the + extra stuff will fit below. */ + loc = globals->root.srelplt->contents; + loc += globals->root.srelplt->reloc_count++ + * RELOC_SIZE (globals); + + bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); + + bfd_put_64 (output_bfd, (bfd_vma) 0, + globals->root.sgotplt->contents + off + + globals->sgotplt_jump_table_size); + bfd_put_64 (output_bfd, (bfd_vma) 0, + globals->root.sgotplt->contents + off + + globals->sgotplt_jump_table_size + + GOT_ENTRY_SIZE); + } + + symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx); + } + break; + } + + if (!save_addend) + addend = 0; + + + /* Dynamic relocs are not propagated for SEC_DEBUGGING sections + because such sections are not SEC_ALLOC and thus ld.so will + not process them. */ + if (unresolved_reloc + && !((input_section->flags & SEC_DEBUGGING) != 0 + && h->def_dynamic) + && _bfd_elf_section_offset (output_bfd, info, input_section, + +rel->r_offset) != (bfd_vma) - 1) + { + (*_bfd_error_handler) + (_ + ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"), + input_bfd, input_section, (long) rel->r_offset, howto->name, + h->root.root.string); + return FALSE; + } + + if (r != bfd_reloc_ok && r != bfd_reloc_continue) + { + switch (r) + { + case bfd_reloc_overflow: + /* If the overflowing reloc was to an undefined symbol, + we have already printed one error message and there + is no point complaining again. */ + if ((!h || + h->root.type != bfd_link_hash_undefined) + && (!((*info->callbacks->reloc_overflow) + (info, (h ? &h->root : NULL), name, howto->name, + (bfd_vma) 0, input_bfd, input_section, + rel->r_offset)))) + return FALSE; + break; + + case bfd_reloc_undefined: + if (!((*info->callbacks->undefined_symbol) + (info, name, input_bfd, input_section, + rel->r_offset, TRUE))) + return FALSE; + break; + + case bfd_reloc_outofrange: + error_message = _("out of range"); + goto common_error; + + case bfd_reloc_notsupported: + error_message = _("unsupported relocation"); + goto common_error; + + case bfd_reloc_dangerous: + /* error_message should already be set. */ + goto common_error; + + default: + error_message = _("unknown error"); + /* Fall through. */ + + common_error: + BFD_ASSERT (error_message != NULL); + if (!((*info->callbacks->reloc_dangerous) + (info, error_message, input_bfd, input_section, + rel->r_offset))) + return FALSE; + break; + } + } + } + + return TRUE; +} + +/* Set the right machine number. */ + +static bfd_boolean +elf64_aarch64_object_p (bfd *abfd) +{ + bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64); + return TRUE; +} + +/* Function to keep AArch64 specific flags in the ELF header. */ + +static bfd_boolean +elf64_aarch64_set_private_flags (bfd *abfd, flagword flags) +{ + if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags) + { + } + else + { + elf_elfheader (abfd)->e_flags = flags; + elf_flags_init (abfd) = TRUE; + } + + return TRUE; +} + +/* Copy backend specific data from one object module to another. */ + +static bfd_boolean +elf64_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd) +{ + flagword in_flags; + + if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd)) + return TRUE; + + in_flags = elf_elfheader (ibfd)->e_flags; + + elf_elfheader (obfd)->e_flags = in_flags; + elf_flags_init (obfd) = TRUE; + + /* Also copy the EI_OSABI field. */ + elf_elfheader (obfd)->e_ident[EI_OSABI] = + elf_elfheader (ibfd)->e_ident[EI_OSABI]; + + /* Copy object attributes. */ + _bfd_elf_copy_obj_attributes (ibfd, obfd); + + return TRUE; +} + +/* Merge backend specific data from an object file to the output + object file when linking. */ + +static bfd_boolean +elf64_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd) +{ + flagword out_flags; + flagword in_flags; + bfd_boolean flags_compatible = TRUE; + asection *sec; + + /* Check if we have the same endianess. */ + if (!_bfd_generic_verify_endian_match (ibfd, obfd)) + return FALSE; + + if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd)) + return TRUE; + + /* The input BFD must have had its flags initialised. */ + /* The following seems bogus to me -- The flags are initialized in + the assembler but I don't think an elf_flags_init field is + written into the object. */ + /* BFD_ASSERT (elf_flags_init (ibfd)); */ + + in_flags = elf_elfheader (ibfd)->e_flags; + out_flags = elf_elfheader (obfd)->e_flags; + + if (!elf_flags_init (obfd)) + { + /* If the input is the default architecture and had the default + flags then do not bother setting the flags for the output + architecture, instead allow future merges to do this. If no + future merges ever set these flags then they will retain their + uninitialised values, which surprise surprise, correspond + to the default values. */ + if (bfd_get_arch_info (ibfd)->the_default + && elf_elfheader (ibfd)->e_flags == 0) + return TRUE; + + elf_flags_init (obfd) = TRUE; + elf_elfheader (obfd)->e_flags = in_flags; + + if (bfd_get_arch (obfd) == bfd_get_arch (ibfd) + && bfd_get_arch_info (obfd)->the_default) + return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), + bfd_get_mach (ibfd)); + + return TRUE; + } + + /* Identical flags must be compatible. */ + if (in_flags == out_flags) + return TRUE; + + /* Check to see if the input BFD actually contains any sections. If + not, its flags may not have been initialised either, but it + cannot actually cause any incompatiblity. Do not short-circuit + dynamic objects; their section list may be emptied by + elf_link_add_object_symbols. + + Also check to see if there are no code sections in the input. + In this case there is no need to check for code specific flags. + XXX - do we need to worry about floating-point format compatability + in data sections ? */ + if (!(ibfd->flags & DYNAMIC)) + { + bfd_boolean null_input_bfd = TRUE; + bfd_boolean only_data_sections = TRUE; + + for (sec = ibfd->sections; sec != NULL; sec = sec->next) + { + if ((bfd_get_section_flags (ibfd, sec) + & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) + == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) + only_data_sections = FALSE; + + null_input_bfd = FALSE; + break; + } + + if (null_input_bfd || only_data_sections) + return TRUE; + } + + return flags_compatible; +} + +/* Display the flags field. */ + +static bfd_boolean +elf64_aarch64_print_private_bfd_data (bfd *abfd, void *ptr) +{ + FILE *file = (FILE *) ptr; + unsigned long flags; + + BFD_ASSERT (abfd != NULL && ptr != NULL); + + /* Print normal ELF private data. */ + _bfd_elf_print_private_bfd_data (abfd, ptr); + + flags = elf_elfheader (abfd)->e_flags; + /* Ignore init flag - it may not be set, despite the flags field + containing valid data. */ + + /* xgettext:c-format */ + fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags); + + if (flags) + fprintf (file, _("")); + + fputc ('\n', file); + + return TRUE; +} + +/* Update the got entry reference counts for the section being removed. */ + +static bfd_boolean +elf64_aarch64_gc_sweep_hook (bfd *abfd ATTRIBUTE_UNUSED, + struct bfd_link_info *info ATTRIBUTE_UNUSED, + asection *sec ATTRIBUTE_UNUSED, + const Elf_Internal_Rela * + relocs ATTRIBUTE_UNUSED) +{ + return TRUE; +} + +/* Adjust a symbol defined by a dynamic object and referenced by a + regular object. The current definition is in some section of the + dynamic object, but we're not including those sections. We have to + change the definition to something the rest of the link can + understand. */ + +static bfd_boolean +elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info, + struct elf_link_hash_entry *h) +{ + struct elf64_aarch64_link_hash_table *htab; + asection *s; + + /* If this is a function, put it in the procedure linkage table. We + will fill in the contents of the procedure linkage table later, + when we know the address of the .got section. */ + if (h->type == STT_FUNC || h->needs_plt) + { + if (h->plt.refcount <= 0 + || SYMBOL_CALLS_LOCAL (info, h) + || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT + && h->root.type == bfd_link_hash_undefweak)) + { + /* This case can occur if we saw a CALL26 reloc in + an input file, but the symbol wasn't referred to + by a dynamic object or all references were + garbage collected. In which case we can end up + resolving. */ + h->plt.offset = (bfd_vma) - 1; + h->needs_plt = 0; + } + + return TRUE; + } + else + /* It's possible that we incorrectly decided a .plt reloc was + needed for an R_X86_64_PC32 reloc to a non-function sym in + check_relocs. We can't decide accurately between function and + non-function syms in check-relocs; Objects loaded later in + the link may change h->type. So fix it now. */ + h->plt.offset = (bfd_vma) - 1; + + + /* If this is a weak symbol, and there is a real definition, the + processor independent code will have arranged for us to see the + real definition first, and we can just use the same value. */ + if (h->u.weakdef != NULL) + { + BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined + || h->u.weakdef->root.type == bfd_link_hash_defweak); + h->root.u.def.section = h->u.weakdef->root.u.def.section; + h->root.u.def.value = h->u.weakdef->root.u.def.value; + if (ELIMINATE_COPY_RELOCS || info->nocopyreloc) + h->non_got_ref = h->u.weakdef->non_got_ref; + return TRUE; + } + + /* If we are creating a shared library, we must presume that the + only references to the symbol are via the global offset table. + For such cases we need not do anything here; the relocations will + be handled correctly by relocate_section. */ + if (info->shared) + return TRUE; + + /* If there are no references to this symbol that do not use the + GOT, we don't need to generate a copy reloc. */ + if (!h->non_got_ref) + return TRUE; + + /* If -z nocopyreloc was given, we won't generate them either. */ + if (info->nocopyreloc) + { + h->non_got_ref = 0; + return TRUE; + } + + /* We must allocate the symbol in our .dynbss section, which will + become part of the .bss section of the executable. There will be + an entry for this symbol in the .dynsym section. The dynamic + object will contain position independent code, so all references + from the dynamic object to this symbol will go through the global + offset table. The dynamic linker will use the .dynsym entry to + determine the address it must put in the global offset table, so + both the dynamic object and the regular object will refer to the + same memory location for the variable. */ + + htab = elf64_aarch64_hash_table (info); + + /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker + to copy the initial value out of the dynamic object and into the + runtime process image. */ + if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0) + { + htab->srelbss->size += RELOC_SIZE (htab); + h->needs_copy = 1; + } + + s = htab->sdynbss; + + return _bfd_elf_adjust_dynamic_copy (h, s); + +} + +static bfd_boolean +elf64_aarch64_allocate_local_symbols (bfd *abfd, unsigned number) +{ + struct elf_aarch64_local_symbol *locals; + locals = elf64_aarch64_locals (abfd); + if (locals == NULL) + { + locals = (struct elf_aarch64_local_symbol *) + bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol)); + if (locals == NULL) + return FALSE; + elf64_aarch64_locals (abfd) = locals; + } + return TRUE; +} + +/* Look through the relocs for a section during the first phase. */ + +static bfd_boolean +elf64_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info, + asection *sec, const Elf_Internal_Rela *relocs) +{ + Elf_Internal_Shdr *symtab_hdr; + struct elf_link_hash_entry **sym_hashes; + const Elf_Internal_Rela *rel; + const Elf_Internal_Rela *rel_end; + asection *sreloc; + + struct elf64_aarch64_link_hash_table *htab; + + unsigned long nsyms; + + if (info->relocatable) + return TRUE; + + BFD_ASSERT (is_aarch64_elf (abfd)); + + htab = elf64_aarch64_hash_table (info); + sreloc = NULL; + + symtab_hdr = &elf_symtab_hdr (abfd); + sym_hashes = elf_sym_hashes (abfd); + nsyms = NUM_SHDR_ENTRIES (symtab_hdr); + + rel_end = relocs + sec->reloc_count; + for (rel = relocs; rel < rel_end; rel++) + { + struct elf_link_hash_entry *h; + unsigned long r_symndx; + unsigned int r_type; + + r_symndx = ELF64_R_SYM (rel->r_info); + r_type = ELF64_R_TYPE (rel->r_info); + + if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr)) + { + (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd, + r_symndx); + return FALSE; + } + + if (r_symndx >= nsyms + /* PR 9934: It is possible to have relocations that do not + refer to symbols, thus it is also possible to have an + object file containing relocations but no symbol table. */ + && (r_symndx > 0 || nsyms > 0)) + { + (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd, + r_symndx); + return FALSE; + } + + if (nsyms == 0 || r_symndx < symtab_hdr->sh_info) + h = NULL; + else + { + h = sym_hashes[r_symndx - symtab_hdr->sh_info]; + while (h->root.type == bfd_link_hash_indirect + || h->root.type == bfd_link_hash_warning) + h = (struct elf_link_hash_entry *) h->root.u.i.link; + } + + /* Could be done earlier, if h were already available. */ + r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx); + + switch (r_type) + { + case R_AARCH64_ABS64: + + /* We don't need to handle relocs into sections not going into + the "real" output. */ + if ((sec->flags & SEC_ALLOC) == 0) + break; + + if (h != NULL) + { + if (!info->shared) + h->non_got_ref = 1; + + h->plt.refcount += 1; + h->pointer_equality_needed = 1; + } + + /* No need to do anything if we're not creating a shared + object. */ + if (! info->shared) + break; + + { + struct elf_dyn_relocs *p; + struct elf_dyn_relocs **head; + + /* We must copy these reloc types into the output file. + Create a reloc section in dynobj and make room for + this reloc. */ + if (sreloc == NULL) + { + if (htab->root.dynobj == NULL) + htab->root.dynobj = abfd; + + sreloc = _bfd_elf_make_dynamic_reloc_section + (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE); + + if (sreloc == NULL) + return FALSE; + } + + /* If this is a global symbol, we count the number of + relocations we need for this symbol. */ + if (h != NULL) + { + struct elf64_aarch64_link_hash_entry *eh; + eh = (struct elf64_aarch64_link_hash_entry *) h; + head = &eh->dyn_relocs; + } + else + { + /* Track dynamic relocs needed for local syms too. + We really need local syms available to do this + easily. Oh well. */ + + asection *s; + void **vpp; + Elf_Internal_Sym *isym; + + isym = bfd_sym_from_r_symndx (&htab->sym_cache, + abfd, r_symndx); + if (isym == NULL) + return FALSE; + + s = bfd_section_from_elf_index (abfd, isym->st_shndx); + if (s == NULL) + s = sec; + + /* Beware of type punned pointers vs strict aliasing + rules. */ + vpp = &(elf_section_data (s)->local_dynrel); + head = (struct elf_dyn_relocs **) vpp; + } + + p = *head; + if (p == NULL || p->sec != sec) + { + bfd_size_type amt = sizeof *p; + p = ((struct elf_dyn_relocs *) + bfd_zalloc (htab->root.dynobj, amt)); + if (p == NULL) + return FALSE; + p->next = *head; + *head = p; + p->sec = sec; + } + + p->count += 1; + + } + break; + + /* RR: We probably want to keep a consistency check that + there are no dangling GOT_PAGE relocs. */ + case R_AARCH64_LD64_GOT_LO12_NC: + case R_AARCH64_ADR_GOT_PAGE: + case R_AARCH64_TLSGD_ADR_PAGE21: + case R_AARCH64_TLSGD_ADD_LO12_NC: + case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: + case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: + case R_AARCH64_TLSLE_ADD_TPREL_LO12: + case R_AARCH64_TLSLE_ADD_TPREL_HI12: + case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: + case R_AARCH64_TLSLE_MOVW_TPREL_G2: + case R_AARCH64_TLSLE_MOVW_TPREL_G1: + case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: + case R_AARCH64_TLSLE_MOVW_TPREL_G0: + case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: + case R_AARCH64_TLSDESC_ADR_PAGE: + case R_AARCH64_TLSDESC_ADD_LO12_NC: + case R_AARCH64_TLSDESC_LD64_LO12_NC: + { + unsigned got_type; + unsigned old_got_type; + + got_type = aarch64_reloc_got_type (r_type); + + if (h) + { + h->got.refcount += 1; + old_got_type = elf64_aarch64_hash_entry (h)->got_type; + } + else + { + struct elf_aarch64_local_symbol *locals; + + if (!elf64_aarch64_allocate_local_symbols + (abfd, symtab_hdr->sh_info)) + return FALSE; + + locals = elf64_aarch64_locals (abfd); + BFD_ASSERT (r_symndx < symtab_hdr->sh_info); + locals[r_symndx].got_refcount += 1; + old_got_type = locals[r_symndx].got_type; + } + + /* If a variable is accessed with both general dynamic TLS + methods, two slots may be created. */ + if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type)) + got_type |= old_got_type; + + /* We will already have issued an error message if there + is a TLS/non-TLS mismatch, based on the symbol type. + So just combine any TLS types needed. */ + if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL + && got_type != GOT_NORMAL) + got_type |= old_got_type; + + /* If the symbol is accessed by both IE and GD methods, we + are able to relax. Turn off the GD flag, without + messing up with any other kind of TLS types that may be + involved. */ + if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type)) + got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD); + + if (old_got_type != got_type) + { + if (h != NULL) + elf64_aarch64_hash_entry (h)->got_type = got_type; + else + { + struct elf_aarch64_local_symbol *locals; + locals = elf64_aarch64_locals (abfd); + BFD_ASSERT (r_symndx < symtab_hdr->sh_info); + locals[r_symndx].got_type = got_type; + } + } + + if (htab->root.sgot == NULL) + { + if (htab->root.dynobj == NULL) + htab->root.dynobj = abfd; + if (!_bfd_elf_create_got_section (htab->root.dynobj, info)) + return FALSE; + } + break; + } + + case R_AARCH64_ADR_PREL_PG_HI21_NC: + case R_AARCH64_ADR_PREL_PG_HI21: + if (h != NULL && info->executable) + { + /* If this reloc is in a read-only section, we might + need a copy reloc. We can't check reliably at this + stage whether the section is read-only, as input + sections have not yet been mapped to output sections. + Tentatively set the flag for now, and correct in + adjust_dynamic_symbol. */ + h->non_got_ref = 1; + h->plt.refcount += 1; + h->pointer_equality_needed = 1; + } + /* FIXME:: RR need to handle these in shared libraries + and essentially bomb out as these being non-PIC + relocations in shared libraries. */ + break; + + case R_AARCH64_CALL26: + case R_AARCH64_JUMP26: + /* If this is a local symbol then we resolve it + directly without creating a PLT entry. */ + if (h == NULL) + continue; + + h->needs_plt = 1; + h->plt.refcount += 1; + break; + } + } + return TRUE; +} + +/* Treat mapping symbols as special target symbols. */ + +static bfd_boolean +elf64_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED, + asymbol *sym) +{ + return bfd_is_aarch64_special_symbol_name (sym->name, + BFD_AARCH64_SPECIAL_SYM_TYPE_ANY); +} + +/* This is a copy of elf_find_function () from elf.c except that + AArch64 mapping symbols are ignored when looking for function names. */ + +static bfd_boolean +aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED, + asection *section, + asymbol **symbols, + bfd_vma offset, + const char **filename_ptr, + const char **functionname_ptr) +{ + const char *filename = NULL; + asymbol *func = NULL; + bfd_vma low_func = 0; + asymbol **p; + + for (p = symbols; *p != NULL; p++) + { + elf_symbol_type *q; + + q = (elf_symbol_type *) * p; + + switch (ELF_ST_TYPE (q->internal_elf_sym.st_info)) + { + default: + break; + case STT_FILE: + filename = bfd_asymbol_name (&q->symbol); + break; + case STT_FUNC: + case STT_NOTYPE: + /* Skip mapping symbols. */ + if ((q->symbol.flags & BSF_LOCAL) + && (bfd_is_aarch64_special_symbol_name + (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY))) + continue; + /* Fall through. */ + if (bfd_get_section (&q->symbol) == section + && q->symbol.value >= low_func && q->symbol.value <= offset) + { + func = (asymbol *) q; + low_func = q->symbol.value; + } + break; + } + } + + if (func == NULL) + return FALSE; + + if (filename_ptr) + *filename_ptr = filename; + if (functionname_ptr) + *functionname_ptr = bfd_asymbol_name (func); + + return TRUE; +} + + +/* Find the nearest line to a particular section and offset, for error + reporting. This code is a duplicate of the code in elf.c, except + that it uses aarch64_elf_find_function. */ + +static bfd_boolean +elf64_aarch64_find_nearest_line (bfd *abfd, + asection *section, + asymbol **symbols, + bfd_vma offset, + const char **filename_ptr, + const char **functionname_ptr, + unsigned int *line_ptr) +{ + bfd_boolean found = FALSE; + + /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64 + toolchain uses it. */ + + if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections, + section, symbols, offset, + filename_ptr, functionname_ptr, + line_ptr, NULL, 0, + &elf_tdata (abfd)->dwarf2_find_line_info)) + { + if (!*functionname_ptr) + aarch64_elf_find_function (abfd, section, symbols, offset, + *filename_ptr ? NULL : filename_ptr, + functionname_ptr); + + return TRUE; + } + + if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset, + &found, filename_ptr, + functionname_ptr, line_ptr, + &elf_tdata (abfd)->line_info)) + return FALSE; + + if (found && (*functionname_ptr || *line_ptr)) + return TRUE; + + if (symbols == NULL) + return FALSE; + + if (!aarch64_elf_find_function (abfd, section, symbols, offset, + filename_ptr, functionname_ptr)) + return FALSE; + + *line_ptr = 0; + return TRUE; +} + +static bfd_boolean +elf64_aarch64_find_inliner_info (bfd *abfd, + const char **filename_ptr, + const char **functionname_ptr, + unsigned int *line_ptr) +{ + bfd_boolean found; + found = _bfd_dwarf2_find_inliner_info + (abfd, filename_ptr, + functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info); + return found; +} + + +static void +elf64_aarch64_post_process_headers (bfd *abfd, + struct bfd_link_info *link_info + ATTRIBUTE_UNUSED) +{ + Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */ + + i_ehdrp = elf_elfheader (abfd); + i_ehdrp->e_ident[EI_OSABI] = 0; + i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION; +} + +static enum elf_reloc_type_class +elf64_aarch64_reloc_type_class (const Elf_Internal_Rela *rela) +{ + switch ((int) ELF64_R_TYPE (rela->r_info)) + { + case R_AARCH64_RELATIVE: + return reloc_class_relative; + case R_AARCH64_JUMP_SLOT: + return reloc_class_plt; + case R_AARCH64_COPY: + return reloc_class_copy; + default: + return reloc_class_normal; + } +} + +/* Set the right machine number for an AArch64 ELF file. */ + +static bfd_boolean +elf64_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr) +{ + if (hdr->sh_type == SHT_NOTE) + *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS; + + return TRUE; +} + +/* Handle an AArch64 specific section when reading an object file. This is + called when bfd_section_from_shdr finds a section with an unknown + type. */ + +static bfd_boolean +elf64_aarch64_section_from_shdr (bfd *abfd, + Elf_Internal_Shdr *hdr, + const char *name, int shindex) +{ + /* There ought to be a place to keep ELF backend specific flags, but + at the moment there isn't one. We just keep track of the + sections by their name, instead. Fortunately, the ABI gives + names for all the AArch64 specific sections, so we will probably get + away with this. */ + switch (hdr->sh_type) + { + case SHT_AARCH64_ATTRIBUTES: + break; + + default: + return FALSE; + } + + if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) + return FALSE; + + return TRUE; +} + +/* A structure used to record a list of sections, independently + of the next and prev fields in the asection structure. */ +typedef struct section_list +{ + asection *sec; + struct section_list *next; + struct section_list *prev; +} +section_list; + +/* Unfortunately we need to keep a list of sections for which + an _aarch64_elf_section_data structure has been allocated. This + is because it is possible for functions like elf64_aarch64_write_section + to be called on a section which has had an elf_data_structure + allocated for it (and so the used_by_bfd field is valid) but + for which the AArch64 extended version of this structure - the + _aarch64_elf_section_data structure - has not been allocated. */ +static section_list *sections_with_aarch64_elf_section_data = NULL; + +static void +record_section_with_aarch64_elf_section_data (asection *sec) +{ + struct section_list *entry; + + entry = bfd_malloc (sizeof (*entry)); + if (entry == NULL) + return; + entry->sec = sec; + entry->next = sections_with_aarch64_elf_section_data; + entry->prev = NULL; + if (entry->next != NULL) + entry->next->prev = entry; + sections_with_aarch64_elf_section_data = entry; +} + +static struct section_list * +find_aarch64_elf_section_entry (asection *sec) +{ + struct section_list *entry; + static struct section_list *last_entry = NULL; + + /* This is a short cut for the typical case where the sections are added + to the sections_with_aarch64_elf_section_data list in forward order and + then looked up here in backwards order. This makes a real difference + to the ld-srec/sec64k.exp linker test. */ + entry = sections_with_aarch64_elf_section_data; + if (last_entry != NULL) + { + if (last_entry->sec == sec) + entry = last_entry; + else if (last_entry->next != NULL && last_entry->next->sec == sec) + entry = last_entry->next; + } + + for (; entry; entry = entry->next) + if (entry->sec == sec) + break; + + if (entry) + /* Record the entry prior to this one - it is the entry we are + most likely to want to locate next time. Also this way if we + have been called from + unrecord_section_with_aarch64_elf_section_data () we will not + be caching a pointer that is about to be freed. */ + last_entry = entry->prev; + + return entry; +} + +static void +unrecord_section_with_aarch64_elf_section_data (asection *sec) +{ + struct section_list *entry; + + entry = find_aarch64_elf_section_entry (sec); + + if (entry) + { + if (entry->prev != NULL) + entry->prev->next = entry->next; + if (entry->next != NULL) + entry->next->prev = entry->prev; + if (entry == sections_with_aarch64_elf_section_data) + sections_with_aarch64_elf_section_data = entry->next; + free (entry); + } +} + + +typedef struct +{ + void *finfo; + struct bfd_link_info *info; + asection *sec; + int sec_shndx; + int (*func) (void *, const char *, Elf_Internal_Sym *, + asection *, struct elf_link_hash_entry *); +} output_arch_syminfo; + +enum map_symbol_type +{ + AARCH64_MAP_INSN, + AARCH64_MAP_DATA +}; + + +/* Output a single mapping symbol. */ + +static bfd_boolean +elf64_aarch64_output_map_sym (output_arch_syminfo *osi, + enum map_symbol_type type, bfd_vma offset) +{ + static const char *names[2] = { "$x", "$d" }; + Elf_Internal_Sym sym; + + sym.st_value = (osi->sec->output_section->vma + + osi->sec->output_offset + offset); + sym.st_size = 0; + sym.st_other = 0; + sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); + sym.st_shndx = osi->sec_shndx; + return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1; +} + + + +/* Output mapping symbols for PLT entries associated with H. */ + +static bfd_boolean +elf64_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf) +{ + output_arch_syminfo *osi = (output_arch_syminfo *) inf; + bfd_vma addr; + + if (h->root.type == bfd_link_hash_indirect) + return TRUE; + + if (h->root.type == bfd_link_hash_warning) + /* When warning symbols are created, they **replace** the "real" + entry in the hash table, thus we never get to see the real + symbol in a hash traversal. So look at it now. */ + h = (struct elf_link_hash_entry *) h->root.u.i.link; + + if (h->plt.offset == (bfd_vma) - 1) + return TRUE; + + addr = h->plt.offset; + if (addr == 32) + { + if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) + return FALSE; + } + return TRUE; +} + + +/* Output a single local symbol for a generated stub. */ + +static bfd_boolean +elf64_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name, + bfd_vma offset, bfd_vma size) +{ + Elf_Internal_Sym sym; + + sym.st_value = (osi->sec->output_section->vma + + osi->sec->output_offset + offset); + sym.st_size = size; + sym.st_other = 0; + sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC); + sym.st_shndx = osi->sec_shndx; + return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1; +} + +static bfd_boolean +aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg) +{ + struct elf64_aarch64_stub_hash_entry *stub_entry; + asection *stub_sec; + bfd_vma addr; + char *stub_name; + output_arch_syminfo *osi; + + /* Massage our args to the form they really have. */ + stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry; + osi = (output_arch_syminfo *) in_arg; + + stub_sec = stub_entry->stub_sec; + + /* Ensure this stub is attached to the current section being + processed. */ + if (stub_sec != osi->sec) + return TRUE; + + addr = (bfd_vma) stub_entry->stub_offset; + + stub_name = stub_entry->output_name; + + switch (stub_entry->stub_type) + { + case aarch64_stub_adrp_branch: + if (!elf64_aarch64_output_stub_sym (osi, stub_name, addr, + sizeof (aarch64_adrp_branch_stub))) + return FALSE; + if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) + return FALSE; + break; + case aarch64_stub_long_branch: + if (!elf64_aarch64_output_stub_sym + (osi, stub_name, addr, sizeof (aarch64_long_branch_stub))) + return FALSE; + if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr)) + return FALSE; + if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16)) + return FALSE; + break; + default: + BFD_FAIL (); + } + + return TRUE; +} + +/* Output mapping symbols for linker generated sections. */ + +static bfd_boolean +elf64_aarch64_output_arch_local_syms (bfd *output_bfd, + struct bfd_link_info *info, + void *finfo, + int (*func) (void *, const char *, + Elf_Internal_Sym *, + asection *, + struct elf_link_hash_entry + *)) +{ + output_arch_syminfo osi; + struct elf64_aarch64_link_hash_table *htab; + + htab = elf64_aarch64_hash_table (info); + + osi.finfo = finfo; + osi.info = info; + osi.func = func; + + /* Long calls stubs. */ + if (htab->stub_bfd && htab->stub_bfd->sections) + { + asection *stub_sec; + + for (stub_sec = htab->stub_bfd->sections; + stub_sec != NULL; stub_sec = stub_sec->next) + { + /* Ignore non-stub sections. */ + if (!strstr (stub_sec->name, STUB_SUFFIX)) + continue; + + osi.sec = stub_sec; + + osi.sec_shndx = _bfd_elf_section_from_bfd_section + (output_bfd, osi.sec->output_section); + + bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub, + &osi); + } + } + + /* Finally, output mapping symbols for the PLT. */ + if (!htab->root.splt || htab->root.splt->size == 0) + return TRUE; + + /* For now live without mapping symbols for the plt. */ + osi.sec_shndx = _bfd_elf_section_from_bfd_section + (output_bfd, htab->root.splt->output_section); + osi.sec = htab->root.splt; + + elf_link_hash_traverse (&htab->root, elf64_aarch64_output_plt_map, + (void *) &osi); + + return TRUE; + +} + +/* Allocate target specific section data. */ + +static bfd_boolean +elf64_aarch64_new_section_hook (bfd *abfd, asection *sec) +{ + if (!sec->used_by_bfd) + { + _aarch64_elf_section_data *sdata; + bfd_size_type amt = sizeof (*sdata); + + sdata = bfd_zalloc (abfd, amt); + if (sdata == NULL) + return FALSE; + sec->used_by_bfd = sdata; + } + + record_section_with_aarch64_elf_section_data (sec); + + return _bfd_elf_new_section_hook (abfd, sec); +} + + +static void +unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED, + asection *sec, + void *ignore ATTRIBUTE_UNUSED) +{ + unrecord_section_with_aarch64_elf_section_data (sec); +} + +static bfd_boolean +elf64_aarch64_close_and_cleanup (bfd *abfd) +{ + if (abfd->sections) + bfd_map_over_sections (abfd, + unrecord_section_via_map_over_sections, NULL); + + return _bfd_elf_close_and_cleanup (abfd); +} + +static bfd_boolean +elf64_aarch64_bfd_free_cached_info (bfd *abfd) +{ + if (abfd->sections) + bfd_map_over_sections (abfd, + unrecord_section_via_map_over_sections, NULL); + + return _bfd_free_cached_info (abfd); +} + +static bfd_boolean +elf64_aarch64_is_function_type (unsigned int type) +{ + return type == STT_FUNC; +} + +/* Create dynamic sections. This is different from the ARM backend in that + the got, plt, gotplt and their relocation sections are all created in the + standard part of the bfd elf backend. */ + +static bfd_boolean +elf64_aarch64_create_dynamic_sections (bfd *dynobj, + struct bfd_link_info *info) +{ + struct elf64_aarch64_link_hash_table *htab; + struct elf_link_hash_entry *h; + + if (!_bfd_elf_create_dynamic_sections (dynobj, info)) + return FALSE; + + htab = elf64_aarch64_hash_table (info); + htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss"); + if (!info->shared) + htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss"); + + if (!htab->sdynbss || (!info->shared && !htab->srelbss)) + abort (); + + /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the + dynobj's .got section. We don't do this in the linker script + because we don't want to define the symbol if we are not creating + a global offset table. */ + h = _bfd_elf_define_linkage_sym (dynobj, info, + htab->root.sgot, "_GLOBAL_OFFSET_TABLE_"); + elf_hash_table (info)->hgot = h; + if (h == NULL) + return FALSE; + + return TRUE; +} + + +/* Allocate space in .plt, .got and associated reloc sections for + dynamic relocs. */ + +static bfd_boolean +elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf) +{ + struct bfd_link_info *info; + struct elf64_aarch64_link_hash_table *htab; + struct elf64_aarch64_link_hash_entry *eh; + struct elf_dyn_relocs *p; + + /* An example of a bfd_link_hash_indirect symbol is versioned + symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect) + -> __gxx_personality_v0(bfd_link_hash_defined) + + There is no need to process bfd_link_hash_indirect symbols here + because we will also be presented with the concrete instance of + the symbol and elf64_aarch64_copy_indirect_symbol () will have been + called to copy all relevant data from the generic to the concrete + symbol instance. + */ + if (h->root.type == bfd_link_hash_indirect) + return TRUE; + + if (h->root.type == bfd_link_hash_warning) + h = (struct elf_link_hash_entry *) h->root.u.i.link; + + info = (struct bfd_link_info *) inf; + htab = elf64_aarch64_hash_table (info); + + if (htab->root.dynamic_sections_created && h->plt.refcount > 0) + { + /* Make sure this symbol is output as a dynamic symbol. + Undefined weak syms won't yet be marked as dynamic. */ + if (h->dynindx == -1 && !h->forced_local) + { + if (!bfd_elf_link_record_dynamic_symbol (info, h)) + return FALSE; + } + + if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) + { + asection *s = htab->root.splt; + + /* If this is the first .plt entry, make room for the special + first entry. */ + if (s->size == 0) + s->size += htab->plt_header_size; + + h->plt.offset = s->size; + + /* If this symbol is not defined in a regular file, and we are + not generating a shared library, then set the symbol to this + location in the .plt. This is required to make function + pointers compare as equal between the normal executable and + the shared library. */ + if (!info->shared && !h->def_regular) + { + h->root.u.def.section = s; + h->root.u.def.value = h->plt.offset; + } + + /* Make room for this entry. For now we only create the + small model PLT entries. We later need to find a way + of relaxing into these from the large model PLT entries. */ + s->size += PLT_SMALL_ENTRY_SIZE; + + /* We also need to make an entry in the .got.plt section, which + will be placed in the .got section by the linker script. */ + htab->root.sgotplt->size += GOT_ENTRY_SIZE; + + /* We also need to make an entry in the .rela.plt section. */ + htab->root.srelplt->size += RELOC_SIZE (htab); + + /* We need to ensure that all GOT entries that serve the PLT + are consecutive with the special GOT slots [0] [1] and + [2]. Any addtional relocations, such as + R_AARCH64_TLSDESC, must be placed after the PLT related + entries. We abuse the reloc_count such that during + sizing we adjust reloc_count to indicate the number of + PLT related reserved entries. In subsequent phases when + filling in the contents of the reloc entries, PLT related + entries are placed by computing their PLT index (0 + .. reloc_count). While other none PLT relocs are placed + at the slot indicated by reloc_count and reloc_count is + updated. */ + + htab->root.srelplt->reloc_count++; + } + else + { + h->plt.offset = (bfd_vma) - 1; + h->needs_plt = 0; + } + } + else + { + h->plt.offset = (bfd_vma) - 1; + h->needs_plt = 0; + } + + eh = (struct elf64_aarch64_link_hash_entry *) h; + eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1; + + if (h->got.refcount > 0) + { + bfd_boolean dyn; + unsigned got_type = elf64_aarch64_hash_entry (h)->got_type; + + h->got.offset = (bfd_vma) - 1; + + dyn = htab->root.dynamic_sections_created; + + /* Make sure this symbol is output as a dynamic symbol. + Undefined weak syms won't yet be marked as dynamic. */ + if (dyn && h->dynindx == -1 && !h->forced_local) + { + if (!bfd_elf_link_record_dynamic_symbol (info, h)) + return FALSE; + } + + if (got_type == GOT_UNKNOWN) + { + } + else if (got_type == GOT_NORMAL) + { + h->got.offset = htab->root.sgot->size; + htab->root.sgot->size += GOT_ENTRY_SIZE; + if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT + || h->root.type != bfd_link_hash_undefweak) + && (info->shared + || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))) + { + htab->root.srelgot->size += RELOC_SIZE (htab); + } + } + else + { + int indx; + if (got_type & GOT_TLSDESC_GD) + { + eh->tlsdesc_got_jump_table_offset = + (htab->root.sgotplt->size + - aarch64_compute_jump_table_size (htab)); + htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2; + h->got.offset = (bfd_vma) - 2; + } + + if (got_type & GOT_TLS_GD) + { + h->got.offset = htab->root.sgot->size; + htab->root.sgot->size += GOT_ENTRY_SIZE * 2; + } + + if (got_type & GOT_TLS_IE) + { + h->got.offset = htab->root.sgot->size; + htab->root.sgot->size += GOT_ENTRY_SIZE; + } + + indx = h && h->dynindx != -1 ? h->dynindx : 0; + if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT + || h->root.type != bfd_link_hash_undefweak) + && (info->shared + || indx != 0 + || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))) + { + if (got_type & GOT_TLSDESC_GD) + { + htab->root.srelplt->size += RELOC_SIZE (htab); + /* Note reloc_count not incremented here! We have + already adjusted reloc_count for this relocation + type. */ + + /* TLSDESC PLT is now needed, but not yet determined. */ + htab->tlsdesc_plt = (bfd_vma) - 1; + } + + if (got_type & GOT_TLS_GD) + htab->root.srelgot->size += RELOC_SIZE (htab) * 2; + + if (got_type & GOT_TLS_IE) + htab->root.srelgot->size += RELOC_SIZE (htab); + } + } + } + else + { + h->got.offset = (bfd_vma) - 1; + } + + if (eh->dyn_relocs == NULL) + return TRUE; + + /* In the shared -Bsymbolic case, discard space allocated for + dynamic pc-relative relocs against symbols which turn out to be + defined in regular objects. For the normal shared case, discard + space for pc-relative relocs that have become local due to symbol + visibility changes. */ + + if (info->shared) + { + /* Relocs that use pc_count are those that appear on a call + insn, or certain REL relocs that can generated via assembly. + We want calls to protected symbols to resolve directly to the + function rather than going via the plt. If people want + function pointer comparisons to work as expected then they + should avoid writing weird assembly. */ + if (SYMBOL_CALLS_LOCAL (info, h)) + { + struct elf_dyn_relocs **pp; + + for (pp = &eh->dyn_relocs; (p = *pp) != NULL;) + { + p->count -= p->pc_count; + p->pc_count = 0; + if (p->count == 0) + *pp = p->next; + else + pp = &p->next; + } + } + + /* Also discard relocs on undefined weak syms with non-default + visibility. */ + if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak) + { + if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT) + eh->dyn_relocs = NULL; + + /* Make sure undefined weak symbols are output as a dynamic + symbol in PIEs. */ + else if (h->dynindx == -1 + && !h->forced_local + && !bfd_elf_link_record_dynamic_symbol (info, h)) + return FALSE; + } + + } + else if (ELIMINATE_COPY_RELOCS) + { + /* For the non-shared case, discard space for relocs against + symbols which turn out to need copy relocs or are not + dynamic. */ + + if (!h->non_got_ref + && ((h->def_dynamic + && !h->def_regular) + || (htab->root.dynamic_sections_created + && (h->root.type == bfd_link_hash_undefweak + || h->root.type == bfd_link_hash_undefined)))) + { + /* Make sure this symbol is output as a dynamic symbol. + Undefined weak syms won't yet be marked as dynamic. */ + if (h->dynindx == -1 + && !h->forced_local + && !bfd_elf_link_record_dynamic_symbol (info, h)) + return FALSE; + + /* If that succeeded, we know we'll be keeping all the + relocs. */ + if (h->dynindx != -1) + goto keep; + } + + eh->dyn_relocs = NULL; + + keep:; + } + + /* Finally, allocate space. */ + for (p = eh->dyn_relocs; p != NULL; p = p->next) + { + asection *sreloc; + + sreloc = elf_section_data (p->sec)->sreloc; + + BFD_ASSERT (sreloc != NULL); + + sreloc->size += p->count * RELOC_SIZE (htab); + } + + return TRUE; +} + + + + +/* This is the most important function of all . Innocuosly named + though ! */ +static bfd_boolean +elf64_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED, + struct bfd_link_info *info) +{ + struct elf64_aarch64_link_hash_table *htab; + bfd *dynobj; + asection *s; + bfd_boolean relocs; + bfd *ibfd; + + htab = elf64_aarch64_hash_table ((info)); + dynobj = htab->root.dynobj; + + BFD_ASSERT (dynobj != NULL); + + if (htab->root.dynamic_sections_created) + { + if (info->executable) + { + s = bfd_get_linker_section (dynobj, ".interp"); + if (s == NULL) + abort (); + s->size = sizeof ELF_DYNAMIC_INTERPRETER; + s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER; + } + } + + /* Set up .got offsets for local syms, and space for local dynamic + relocs. */ + for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next) + { + struct elf_aarch64_local_symbol *locals = NULL; + Elf_Internal_Shdr *symtab_hdr; + asection *srel; + unsigned int i; + + if (!is_aarch64_elf (ibfd)) + continue; + + for (s = ibfd->sections; s != NULL; s = s->next) + { + struct elf_dyn_relocs *p; + + for (p = (struct elf_dyn_relocs *) + (elf_section_data (s)->local_dynrel); p != NULL; p = p->next) + { + if (!bfd_is_abs_section (p->sec) + && bfd_is_abs_section (p->sec->output_section)) + { + /* Input section has been discarded, either because + it is a copy of a linkonce section or due to + linker script /DISCARD/, so we'll be discarding + the relocs too. */ + } + else if (p->count != 0) + { + srel = elf_section_data (p->sec)->sreloc; + srel->size += p->count * RELOC_SIZE (htab); + if ((p->sec->output_section->flags & SEC_READONLY) != 0) + info->flags |= DF_TEXTREL; + } + } + } + + locals = elf64_aarch64_locals (ibfd); + if (!locals) + continue; + + symtab_hdr = &elf_symtab_hdr (ibfd); + srel = htab->root.srelgot; + for (i = 0; i < symtab_hdr->sh_info; i++) + { + locals[i].got_offset = (bfd_vma) - 1; + locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1; + if (locals[i].got_refcount > 0) + { + unsigned got_type = locals[i].got_type; + if (got_type & GOT_TLSDESC_GD) + { + locals[i].tlsdesc_got_jump_table_offset = + (htab->root.sgotplt->size + - aarch64_compute_jump_table_size (htab)); + htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2; + locals[i].got_offset = (bfd_vma) - 2; + } + + if (got_type & GOT_TLS_GD) + { + locals[i].got_offset = htab->root.sgot->size; + htab->root.sgot->size += GOT_ENTRY_SIZE * 2; + } + + if (got_type & GOT_TLS_IE) + { + locals[i].got_offset = htab->root.sgot->size; + htab->root.sgot->size += GOT_ENTRY_SIZE; + } + + if (got_type == GOT_UNKNOWN) + { + } + + if (got_type == GOT_NORMAL) + { + } + + if (info->shared) + { + if (got_type & GOT_TLSDESC_GD) + { + htab->root.srelplt->size += RELOC_SIZE (htab); + /* Note RELOC_COUNT not incremented here! */ + htab->tlsdesc_plt = (bfd_vma) - 1; + } + + if (got_type & GOT_TLS_GD) + htab->root.srelgot->size += RELOC_SIZE (htab) * 2; + + if (got_type & GOT_TLS_IE) + htab->root.srelgot->size += RELOC_SIZE (htab); + } + } + else + { + locals[i].got_refcount = (bfd_vma) - 1; + } + } + } + + + /* Allocate global sym .plt and .got entries, and space for global + sym dynamic relocs. */ + elf_link_hash_traverse (&htab->root, elf64_aarch64_allocate_dynrelocs, + info); + + + /* For every jump slot reserved in the sgotplt, reloc_count is + incremented. However, when we reserve space for TLS descriptors, + it's not incremented, so in order to compute the space reserved + for them, it suffices to multiply the reloc count by the jump + slot size. */ + + if (htab->root.srelplt) + htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab); + + if (htab->tlsdesc_plt) + { + if (htab->root.splt->size == 0) + htab->root.splt->size += PLT_ENTRY_SIZE; + + htab->tlsdesc_plt = htab->root.splt->size; + htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE; + + /* If we're not using lazy TLS relocations, don't generate the + GOT entry required. */ + if (!(info->flags & DF_BIND_NOW)) + { + htab->dt_tlsdesc_got = htab->root.sgot->size; + htab->root.sgot->size += GOT_ENTRY_SIZE; + } + } + + /* We now have determined the sizes of the various dynamic sections. + Allocate memory for them. */ + relocs = FALSE; + for (s = dynobj->sections; s != NULL; s = s->next) + { + if ((s->flags & SEC_LINKER_CREATED) == 0) + continue; + + if (s == htab->root.splt + || s == htab->root.sgot + || s == htab->root.sgotplt + || s == htab->root.iplt + || s == htab->root.igotplt || s == htab->sdynbss) + { + /* Strip this section if we don't need it; see the + comment below. */ + } + else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela")) + { + if (s->size != 0 && s != htab->root.srelplt) + relocs = TRUE; + + /* We use the reloc_count field as a counter if we need + to copy relocs into the output file. */ + if (s != htab->root.srelplt) + s->reloc_count = 0; + } + else + { + /* It's not one of our sections, so don't allocate space. */ + continue; + } + + if (s->size == 0) + { + /* If we don't need this section, strip it from the + output file. This is mostly to handle .rela.bss and + .rela.plt. We must create both sections in + create_dynamic_sections, because they must be created + before the linker maps input sections to output + sections. The linker does that before + adjust_dynamic_symbol is called, and it is that + function which decides whether anything needs to go + into these sections. */ + + s->flags |= SEC_EXCLUDE; + continue; + } + + if ((s->flags & SEC_HAS_CONTENTS) == 0) + continue; + + /* Allocate memory for the section contents. We use bfd_zalloc + here in case unused entries are not reclaimed before the + section's contents are written out. This should not happen, + but this way if it does, we get a R_AARCH64_NONE reloc instead + of garbage. */ + s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size); + if (s->contents == NULL) + return FALSE; + } + + if (htab->root.dynamic_sections_created) + { + /* Add some entries to the .dynamic section. We fill in the + values later, in elf64_aarch64_finish_dynamic_sections, but we + must add the entries now so that we get the correct size for + the .dynamic section. The DT_DEBUG entry is filled in by the + dynamic linker and used by the debugger. */ +#define add_dynamic_entry(TAG, VAL) \ + _bfd_elf_add_dynamic_entry (info, TAG, VAL) + + if (info->executable) + { + if (!add_dynamic_entry (DT_DEBUG, 0)) + return FALSE; + } + + if (htab->root.splt->size != 0) + { + if (!add_dynamic_entry (DT_PLTGOT, 0) + || !add_dynamic_entry (DT_PLTRELSZ, 0) + || !add_dynamic_entry (DT_PLTREL, DT_RELA) + || !add_dynamic_entry (DT_JMPREL, 0)) + return FALSE; + + if (htab->tlsdesc_plt + && (!add_dynamic_entry (DT_TLSDESC_PLT, 0) + || !add_dynamic_entry (DT_TLSDESC_GOT, 0))) + return FALSE; + } + + if (relocs) + { + if (!add_dynamic_entry (DT_RELA, 0) + || !add_dynamic_entry (DT_RELASZ, 0) + || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab))) + return FALSE; + + /* If any dynamic relocs apply to a read-only section, + then we need a DT_TEXTREL entry. */ + if ((info->flags & DF_TEXTREL) != 0) + { + if (!add_dynamic_entry (DT_TEXTREL, 0)) + return FALSE; + } + } + } +#undef add_dynamic_entry + + return TRUE; + + +} + +static inline void +elf64_aarch64_update_plt_entry (bfd *output_bfd, + unsigned int r_type, + bfd_byte *plt_entry, bfd_vma value) +{ + reloc_howto_type *howto; + howto = elf64_aarch64_howto_from_type (r_type); + bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value); +} + +static void +elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h, + struct elf64_aarch64_link_hash_table + *htab, bfd *output_bfd) +{ + bfd_byte *plt_entry; + bfd_vma plt_index; + bfd_vma got_offset; + bfd_vma gotplt_entry_address; + bfd_vma plt_entry_address; + Elf_Internal_Rela rela; + bfd_byte *loc; + + plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size; + + /* Offset in the GOT is PLT index plus got GOT headers(3) + times 8. */ + got_offset = (plt_index + 3) * GOT_ENTRY_SIZE; + plt_entry = htab->root.splt->contents + h->plt.offset; + plt_entry_address = htab->root.splt->output_section->vma + + htab->root.splt->output_section->output_offset + h->plt.offset; + gotplt_entry_address = htab->root.sgotplt->output_section->vma + + htab->root.sgotplt->output_offset + got_offset; + + /* Copy in the boiler-plate for the PLTn entry. */ + memcpy (plt_entry, elf64_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE); + + /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8. + ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */ + elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21, + plt_entry, + PG (gotplt_entry_address) - + PG (plt_entry_address)); + + /* Fill in the lo12 bits for the load from the pltgot. */ + elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC, + plt_entry + 4, + PG_OFFSET (gotplt_entry_address)); + + /* Fill in the the lo12 bits for the add from the pltgot entry. */ + elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC, + plt_entry + 8, + PG_OFFSET (gotplt_entry_address)); + + /* All the GOTPLT Entries are essentially initialized to PLT0. */ + bfd_put_64 (output_bfd, + (htab->root.splt->output_section->vma + + htab->root.splt->output_offset), + htab->root.sgotplt->contents + got_offset); + + /* Fill in the entry in the .rela.plt section. */ + rela.r_offset = gotplt_entry_address; + rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_JUMP_SLOT); + rela.r_addend = 0; + + /* Compute the relocation entry to used based on PLT index and do + not adjust reloc_count. The reloc_count has already been adjusted + to account for this entry. */ + loc = htab->root.srelplt->contents + plt_index * RELOC_SIZE (htab); + bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); +} + +/* Size sections even though they're not dynamic. We use it to setup + _TLS_MODULE_BASE_, if needed. */ + +static bfd_boolean +elf64_aarch64_always_size_sections (bfd *output_bfd, + struct bfd_link_info *info) +{ + asection *tls_sec; + + if (info->relocatable) + return TRUE; + + tls_sec = elf_hash_table (info)->tls_sec; + + if (tls_sec) + { + struct elf_link_hash_entry *tlsbase; + + tlsbase = elf_link_hash_lookup (elf_hash_table (info), + "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE); + + if (tlsbase) + { + struct bfd_link_hash_entry *h = NULL; + const struct elf_backend_data *bed = + get_elf_backend_data (output_bfd); + + if (!(_bfd_generic_link_add_one_symbol + (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL, + tls_sec, 0, NULL, FALSE, bed->collect, &h))) + return FALSE; + + tlsbase->type = STT_TLS; + tlsbase = (struct elf_link_hash_entry *) h; + tlsbase->def_regular = 1; + tlsbase->other = STV_HIDDEN; + (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE); + } + } + + return TRUE; +} + +/* Finish up dynamic symbol handling. We set the contents of various + dynamic sections here. */ +static bfd_boolean +elf64_aarch64_finish_dynamic_symbol (bfd *output_bfd, + struct bfd_link_info *info, + struct elf_link_hash_entry *h, + Elf_Internal_Sym *sym) +{ + struct elf64_aarch64_link_hash_table *htab; + htab = elf64_aarch64_hash_table (info); + + if (h->plt.offset != (bfd_vma) - 1) + { + /* This symbol has an entry in the procedure linkage table. Set + it up. */ + + if (h->dynindx == -1 + || htab->root.splt == NULL + || htab->root.sgotplt == NULL || htab->root.srelplt == NULL) + abort (); + + elf64_aarch64_create_small_pltn_entry (h, htab, output_bfd); + if (!h->def_regular) + { + /* Mark the symbol as undefined, rather than as defined in + the .plt section. Leave the value alone. This is a clue + for the dynamic linker, to make function pointer + comparisons work between an application and shared + library. */ + sym->st_shndx = SHN_UNDEF; + } + } + + if (h->got.offset != (bfd_vma) - 1 + && elf64_aarch64_hash_entry (h)->got_type == GOT_NORMAL) + { + Elf_Internal_Rela rela; + bfd_byte *loc; + + /* This symbol has an entry in the global offset table. Set it + up. */ + if (htab->root.sgot == NULL || htab->root.srelgot == NULL) + abort (); + + rela.r_offset = (htab->root.sgot->output_section->vma + + htab->root.sgot->output_offset + + (h->got.offset & ~(bfd_vma) 1)); + + if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h)) + { + if (!h->def_regular) + return FALSE; + + BFD_ASSERT ((h->got.offset & 1) != 0); + rela.r_info = ELF64_R_INFO (0, R_AARCH64_RELATIVE); + rela.r_addend = (h->root.u.def.value + + h->root.u.def.section->output_section->vma + + h->root.u.def.section->output_offset); + } + else + { + BFD_ASSERT ((h->got.offset & 1) == 0); + bfd_put_64 (output_bfd, (bfd_vma) 0, + htab->root.sgot->contents + h->got.offset); + rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_GLOB_DAT); + rela.r_addend = 0; + } + + loc = htab->root.srelgot->contents; + loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab); + bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); + } + + if (h->needs_copy) + { + Elf_Internal_Rela rela; + bfd_byte *loc; + + /* This symbol needs a copy reloc. Set it up. */ + + if (h->dynindx == -1 + || (h->root.type != bfd_link_hash_defined + && h->root.type != bfd_link_hash_defweak) + || htab->srelbss == NULL) + abort (); + + rela.r_offset = (h->root.u.def.value + + h->root.u.def.section->output_section->vma + + h->root.u.def.section->output_offset); + rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_COPY); + rela.r_addend = 0; + loc = htab->srelbss->contents; + loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab); + bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); + } + + /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may + be NULL for local symbols. */ + if (sym != NULL + && (strcmp (h->root.root.string, "_DYNAMIC") == 0 + || h == elf_hash_table (info)->hgot)) + sym->st_shndx = SHN_ABS; + + return TRUE; +} + +static void +elf64_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED, + struct elf64_aarch64_link_hash_table + *htab) +{ + /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between + small and large plts and at the minute just generates + the small PLT. */ + + /* PLT0 of the small PLT looks like this - + stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack. + adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT + ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the + // symbol resolver + add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the + // GOTPLT entry for this. + br x17 + */ + bfd_vma plt_got_base; + bfd_vma plt_base; + + + memcpy (htab->root.splt->contents, elf64_aarch64_small_plt0_entry, + PLT_ENTRY_SIZE); + elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize = + PLT_ENTRY_SIZE; + + plt_got_base = (htab->root.sgotplt->output_section->vma + + htab->root.sgotplt->output_offset); + + plt_base = htab->root.splt->output_section->vma + + htab->root.splt->output_section->output_offset; + + /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8. + ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */ + elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21, + htab->root.splt->contents + 4, + PG (plt_got_base + 16) - PG (plt_base + 4)); + + elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC, + htab->root.splt->contents + 8, + PG_OFFSET (plt_got_base + 16)); + + elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC, + htab->root.splt->contents + 12, + PG_OFFSET (plt_got_base + 16)); +} + +static bfd_boolean +elf64_aarch64_finish_dynamic_sections (bfd *output_bfd, + struct bfd_link_info *info) +{ + struct elf64_aarch64_link_hash_table *htab; + bfd *dynobj; + asection *sdyn; + + htab = elf64_aarch64_hash_table (info); + dynobj = htab->root.dynobj; + sdyn = bfd_get_linker_section (dynobj, ".dynamic"); + + if (htab->root.dynamic_sections_created) + { + Elf64_External_Dyn *dyncon, *dynconend; + + if (sdyn == NULL || htab->root.sgot == NULL) + abort (); + + dyncon = (Elf64_External_Dyn *) sdyn->contents; + dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size); + for (; dyncon < dynconend; dyncon++) + { + Elf_Internal_Dyn dyn; + asection *s; + + bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn); + + switch (dyn.d_tag) + { + default: + continue; + + case DT_PLTGOT: + s = htab->root.sgotplt; + dyn.d_un.d_ptr = s->output_section->vma + s->output_offset; + break; + + case DT_JMPREL: + dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma; + break; + + case DT_PLTRELSZ: + s = htab->root.srelplt->output_section; + dyn.d_un.d_val = s->size; + break; + + case DT_RELASZ: + /* The procedure linkage table relocs (DT_JMPREL) should + not be included in the overall relocs (DT_RELA). + Therefore, we override the DT_RELASZ entry here to + make it not include the JMPREL relocs. Since the + linker script arranges for .rela.plt to follow all + other relocation sections, we don't have to worry + about changing the DT_RELA entry. */ + if (htab->root.srelplt != NULL) + { + s = htab->root.srelplt->output_section; + dyn.d_un.d_val -= s->size; + } + break; + + case DT_TLSDESC_PLT: + s = htab->root.splt; + dyn.d_un.d_ptr = s->output_section->vma + s->output_offset + + htab->tlsdesc_plt; + break; + + case DT_TLSDESC_GOT: + s = htab->root.sgot; + dyn.d_un.d_ptr = s->output_section->vma + s->output_offset + + htab->dt_tlsdesc_got; + break; + } + + bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon); + } + + } + + /* Fill in the special first entry in the procedure linkage table. */ + if (htab->root.splt && htab->root.splt->size > 0) + { + elf64_aarch64_init_small_plt0_entry (output_bfd, htab); + + elf_section_data (htab->root.splt->output_section)-> + this_hdr.sh_entsize = htab->plt_entry_size; + + + if (htab->tlsdesc_plt) + { + bfd_put_64 (output_bfd, (bfd_vma) 0, + htab->root.sgot->contents + htab->dt_tlsdesc_got); + + memcpy (htab->root.splt->contents + htab->tlsdesc_plt, + elf64_aarch64_tlsdesc_small_plt_entry, + sizeof (elf64_aarch64_tlsdesc_small_plt_entry)); + + { + bfd_vma adrp1_addr = + htab->root.splt->output_section->vma + + htab->root.splt->output_offset + htab->tlsdesc_plt + 4; + + bfd_vma adrp2_addr = + htab->root.splt->output_section->vma + + htab->root.splt->output_offset + htab->tlsdesc_plt + 8; + + bfd_vma got_addr = + htab->root.sgot->output_section->vma + + htab->root.sgot->output_offset; + + bfd_vma pltgot_addr = + htab->root.sgotplt->output_section->vma + + htab->root.sgotplt->output_offset; + + bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got; + bfd_vma opcode; + + /* adrp x2, DT_TLSDESC_GOT */ + opcode = bfd_get_32 (output_bfd, + htab->root.splt->contents + + htab->tlsdesc_plt + 4); + opcode = reencode_adr_imm + (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12); + bfd_put_32 (output_bfd, opcode, + htab->root.splt->contents + htab->tlsdesc_plt + 4); + + /* adrp x3, 0 */ + opcode = bfd_get_32 (output_bfd, + htab->root.splt->contents + + htab->tlsdesc_plt + 8); + opcode = reencode_adr_imm + (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12); + bfd_put_32 (output_bfd, opcode, + htab->root.splt->contents + htab->tlsdesc_plt + 8); + + /* ldr x2, [x2, #0] */ + opcode = bfd_get_32 (output_bfd, + htab->root.splt->contents + + htab->tlsdesc_plt + 12); + opcode = reencode_ldst_pos_imm (opcode, + PG_OFFSET (dt_tlsdesc_got) >> 3); + bfd_put_32 (output_bfd, opcode, + htab->root.splt->contents + htab->tlsdesc_plt + 12); + + /* add x3, x3, 0 */ + opcode = bfd_get_32 (output_bfd, + htab->root.splt->contents + + htab->tlsdesc_plt + 16); + opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr)); + bfd_put_32 (output_bfd, opcode, + htab->root.splt->contents + htab->tlsdesc_plt + 16); + } + } + } + + if (htab->root.sgotplt) + { + if (bfd_is_abs_section (htab->root.sgotplt->output_section)) + { + (*_bfd_error_handler) + (_("discarded output section: `%A'"), htab->root.sgotplt); + return FALSE; + } + + /* Fill in the first three entries in the global offset table. */ + if (htab->root.sgotplt->size > 0) + { + /* Set the first entry in the global offset table to the address of + the dynamic section. */ + if (sdyn == NULL) + bfd_put_64 (output_bfd, (bfd_vma) 0, + htab->root.sgotplt->contents); + else + bfd_put_64 (output_bfd, + sdyn->output_section->vma + sdyn->output_offset, + htab->root.sgotplt->contents); + /* Write GOT[1] and GOT[2], needed for the dynamic linker. */ + bfd_put_64 (output_bfd, + (bfd_vma) 0, + htab->root.sgotplt->contents + GOT_ENTRY_SIZE); + bfd_put_64 (output_bfd, + (bfd_vma) 0, + htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2); + } + + elf_section_data (htab->root.sgotplt->output_section)-> + this_hdr.sh_entsize = GOT_ENTRY_SIZE; + } + + if (htab->root.sgot && htab->root.sgot->size > 0) + elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize + = GOT_ENTRY_SIZE; + + return TRUE; +} + +/* Return address for Ith PLT stub in section PLT, for relocation REL + or (bfd_vma) -1 if it should not be included. */ + +static bfd_vma +elf64_aarch64_plt_sym_val (bfd_vma i, const asection *plt, + const arelent *rel ATTRIBUTE_UNUSED) +{ + return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE; +} + + +/* We use this so we can override certain functions + (though currently we don't). */ + +const struct elf_size_info elf64_aarch64_size_info = +{ + sizeof (Elf64_External_Ehdr), + sizeof (Elf64_External_Phdr), + sizeof (Elf64_External_Shdr), + sizeof (Elf64_External_Rel), + sizeof (Elf64_External_Rela), + sizeof (Elf64_External_Sym), + sizeof (Elf64_External_Dyn), + sizeof (Elf_External_Note), + 4, /* Hash table entry size. */ + 1, /* Internal relocs per external relocs. */ + 64, /* Arch size. */ + 3, /* Log_file_align. */ + ELFCLASS64, EV_CURRENT, + bfd_elf64_write_out_phdrs, + bfd_elf64_write_shdrs_and_ehdr, + bfd_elf64_checksum_contents, + bfd_elf64_write_relocs, + bfd_elf64_swap_symbol_in, + bfd_elf64_swap_symbol_out, + bfd_elf64_slurp_reloc_table, + bfd_elf64_slurp_symbol_table, + bfd_elf64_swap_dyn_in, + bfd_elf64_swap_dyn_out, + bfd_elf64_swap_reloc_in, + bfd_elf64_swap_reloc_out, + bfd_elf64_swap_reloca_in, + bfd_elf64_swap_reloca_out +}; + +#define ELF_ARCH bfd_arch_aarch64 +#define ELF_MACHINE_CODE EM_AARCH64 +#define ELF_MAXPAGESIZE 0x10000 +#define ELF_MINPAGESIZE 0x1000 +#define ELF_COMMONPAGESIZE 0x1000 + +#define bfd_elf64_close_and_cleanup \ + elf64_aarch64_close_and_cleanup + +#define bfd_elf64_bfd_copy_private_bfd_data \ + elf64_aarch64_copy_private_bfd_data + +#define bfd_elf64_bfd_free_cached_info \ + elf64_aarch64_bfd_free_cached_info + +#define bfd_elf64_bfd_is_target_special_symbol \ + elf64_aarch64_is_target_special_symbol + +#define bfd_elf64_bfd_link_hash_table_create \ + elf64_aarch64_link_hash_table_create + +#define bfd_elf64_bfd_link_hash_table_free \ + elf64_aarch64_hash_table_free + +#define bfd_elf64_bfd_merge_private_bfd_data \ + elf64_aarch64_merge_private_bfd_data + +#define bfd_elf64_bfd_print_private_bfd_data \ + elf64_aarch64_print_private_bfd_data + +#define bfd_elf64_bfd_reloc_type_lookup \ + elf64_aarch64_reloc_type_lookup + +#define bfd_elf64_bfd_reloc_name_lookup \ + elf64_aarch64_reloc_name_lookup + +#define bfd_elf64_bfd_set_private_flags \ + elf64_aarch64_set_private_flags + +#define bfd_elf64_find_inliner_info \ + elf64_aarch64_find_inliner_info + +#define bfd_elf64_find_nearest_line \ + elf64_aarch64_find_nearest_line + +#define bfd_elf64_mkobject \ + elf64_aarch64_mkobject + +#define bfd_elf64_new_section_hook \ + elf64_aarch64_new_section_hook + +#define elf_backend_adjust_dynamic_symbol \ + elf64_aarch64_adjust_dynamic_symbol + +#define elf_backend_always_size_sections \ + elf64_aarch64_always_size_sections + +#define elf_backend_check_relocs \ + elf64_aarch64_check_relocs + +#define elf_backend_copy_indirect_symbol \ + elf64_aarch64_copy_indirect_symbol + +/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts + to them in our hash. */ +#define elf_backend_create_dynamic_sections \ + elf64_aarch64_create_dynamic_sections + +#define elf_backend_init_index_section \ + _bfd_elf_init_2_index_sections + +#define elf_backend_is_function_type \ + elf64_aarch64_is_function_type + +#define elf_backend_finish_dynamic_sections \ + elf64_aarch64_finish_dynamic_sections + +#define elf_backend_finish_dynamic_symbol \ + elf64_aarch64_finish_dynamic_symbol + +#define elf_backend_gc_sweep_hook \ + elf64_aarch64_gc_sweep_hook + +#define elf_backend_object_p \ + elf64_aarch64_object_p + +#define elf_backend_output_arch_local_syms \ + elf64_aarch64_output_arch_local_syms + +#define elf_backend_plt_sym_val \ + elf64_aarch64_plt_sym_val + +#define elf_backend_post_process_headers \ + elf64_aarch64_post_process_headers + +#define elf_backend_relocate_section \ + elf64_aarch64_relocate_section + +#define elf_backend_reloc_type_class \ + elf64_aarch64_reloc_type_class + +#define elf_backend_section_flags \ + elf64_aarch64_section_flags + +#define elf_backend_section_from_shdr \ + elf64_aarch64_section_from_shdr + +#define elf_backend_size_dynamic_sections \ + elf64_aarch64_size_dynamic_sections + +#define elf_backend_size_info \ + elf64_aarch64_size_info + +#define elf_backend_can_refcount 1 +#define elf_backend_can_gc_sections 0 +#define elf_backend_plt_readonly 1 +#define elf_backend_want_got_plt 1 +#define elf_backend_want_plt_sym 0 +#define elf_backend_may_use_rel_p 0 +#define elf_backend_may_use_rela_p 1 +#define elf_backend_default_use_rela_p 1 +#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3) + +#undef elf_backend_obj_attrs_section +#define elf_backend_obj_attrs_section ".ARM.attributes" + +#include "elf64-target.h" diff --git a/bfd/libbfd.h b/bfd/libbfd.h index 03b065fd956..88ff9c66850 100644 --- a/bfd/libbfd.h +++ b/bfd/libbfd.h @@ -2420,6 +2420,63 @@ static const char *const bfd_reloc_code_real_names[] = { "@@uninitialized@@", "BFD_RELOC_MICROBLAZE_64_GOTOFF", "BFD_RELOC_MICROBLAZE_32_GOTOFF", "BFD_RELOC_MICROBLAZE_COPY", + "BFD_RELOC_AARCH64_ADD_LO12", + "BFD_RELOC_AARCH64_ADR_GOT_PAGE", + "BFD_RELOC_AARCH64_ADR_HI21_PCREL", + "BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL", + "BFD_RELOC_AARCH64_ADR_LO21_PCREL", + "BFD_RELOC_AARCH64_BRANCH19", + "BFD_RELOC_AARCH64_CALL26", + "BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP", + "BFD_RELOC_AARCH64_JUMP26", + "BFD_RELOC_AARCH64_LD_LO19_PCREL", + "BFD_RELOC_AARCH64_LD64_GOT_LO12_NC", + "BFD_RELOC_AARCH64_LDST_LO12", + "BFD_RELOC_AARCH64_LDST8_LO12", + "BFD_RELOC_AARCH64_LDST16_LO12", + "BFD_RELOC_AARCH64_LDST32_LO12", + "BFD_RELOC_AARCH64_LDST64_LO12", + "BFD_RELOC_AARCH64_LDST128_LO12", + "BFD_RELOC_AARCH64_MOVW_G0", + "BFD_RELOC_AARCH64_MOVW_G0_S", + "BFD_RELOC_AARCH64_MOVW_G0_NC", + "BFD_RELOC_AARCH64_MOVW_G1", + "BFD_RELOC_AARCH64_MOVW_G1_NC", + "BFD_RELOC_AARCH64_MOVW_G1_S", + "BFD_RELOC_AARCH64_MOVW_G2", + "BFD_RELOC_AARCH64_MOVW_G2_NC", + "BFD_RELOC_AARCH64_MOVW_G2_S", + "BFD_RELOC_AARCH64_MOVW_G3", + "BFD_RELOC_AARCH64_TLSDESC", + "BFD_RELOC_AARCH64_TLSDESC_ADD", + "BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC", + "BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE", + "BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21", + "BFD_RELOC_AARCH64_TLSDESC_CALL", + "BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC", + "BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19", + "BFD_RELOC_AARCH64_TLSDESC_LDR", + "BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC", + "BFD_RELOC_AARCH64_TLSDESC_OFF_G1", + "BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC", + "BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21", + "BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", + "BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19", + "BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", + "BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", + "BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1", + "BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12", + "BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12", + "BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC", + "BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0", + "BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC", + "BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1", + "BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC", + "BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2", + "BFD_RELOC_AARCH64_TLS_DTPMOD64", + "BFD_RELOC_AARCH64_TLS_DTPREL64", + "BFD_RELOC_AARCH64_TLS_TPREL64", + "BFD_RELOC_AARCH64_TSTBR14", "BFD_RELOC_TILEPRO_COPY", "BFD_RELOC_TILEPRO_GLOB_DAT", "BFD_RELOC_TILEPRO_JMP_SLOT", diff --git a/bfd/reloc.c b/bfd/reloc.c index 19c1f96a275..47d052d1345 100644 --- a/bfd/reloc.c +++ b/bfd/reloc.c @@ -5883,6 +5883,278 @@ ENUMDOC This is used to tell the dynamic linker to copy the value out of the dynamic object into the runtime process image. +ENUM + BFD_RELOC_AARCH64_ADD_LO12 +ENUMDOC + AArch64 ADD immediate instruction, holding bits 0 to 11 of the address. + Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. +ENUM + BFD_RELOC_AARCH64_ADR_GOT_PAGE +ENUMDOC + Get to the page base of the global offset table entry for a symbol as + part of an ADRP instruction using a 21 bit PC relative value.Used in + conjunction with BFD_RELOC_AARCH64_LD64_GOT_LO12_NC. +ENUM + BFD_RELOC_AARCH64_ADR_HI21_PCREL +ENUMDOC + AArch64 ADRP instruction, with bits 12 to 32 of a pc-relative page + offset, giving a 4KB aligned page base address. +ENUM + BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL +ENUMDOC + AArch64 ADRP instruction, with bits 12 to 32 of a pc-relative page + offset, giving a 4KB aligned page base address, but with no overflow + checking. +ENUM + BFD_RELOC_AARCH64_ADR_LO21_PCREL +ENUMDOC + AArch64 ADR instruction, holding a simple 21 bit pc-relative byte offset. +ENUM + BFD_RELOC_AARCH64_BRANCH19 +ENUMDOC + AArch64 19 bit pc-relative conditional branch and compare & branch. + The lowest two bits must be zero and are not stored in the instruction, + giving a 21 bit signed byte offset. +ENUM + BFD_RELOC_AARCH64_CALL26 +ENUMDOC + AArch64 26 bit pc-relative unconditional branch and link. + The lowest two bits must be zero and are not stored in the instruction, + giving a 28 bit signed byte offset. +ENUM + BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP +ENUMDOC + AArch64 pseudo relocation code to be used internally by the AArch64 + assembler and not (currently) written to any object files. +ENUM + BFD_RELOC_AARCH64_JUMP26 +ENUMDOC + AArch64 26 bit pc-relative unconditional branch. + The lowest two bits must be zero and are not stored in the instruction, + giving a 28 bit signed byte offset. +ENUM + BFD_RELOC_AARCH64_LD_LO19_PCREL +ENUMDOC + AArch64 Load Literal instruction, holding a 19 bit pc-relative word + offset. The lowest two bits must be zero and are not stored in the + instruction, giving a 21 bit signed byte offset. +ENUM + BFD_RELOC_AARCH64_LD64_GOT_LO12_NC +ENUMDOC + Unsigned 12 bit byte offset for 64 bit load/store from the page of + the GOT entry for this symbol. Used in conjunction with + BFD_RELOC_AARCH64_ADR_GOTPAGE. +ENUM + BFD_RELOC_AARCH64_LDST_LO12 +ENUMDOC + AArch64 unspecified load/store instruction, holding bits 0 to 11 of the + address. Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. +ENUM + BFD_RELOC_AARCH64_LDST8_LO12 +ENUMDOC + AArch64 8-bit load/store instruction, holding bits 0 to 11 of the + address. Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. +ENUM + BFD_RELOC_AARCH64_LDST16_LO12 +ENUMDOC + AArch64 16-bit load/store instruction, holding bits 0 to 11 of the + address. Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. +ENUM + BFD_RELOC_AARCH64_LDST32_LO12 +ENUMDOC + AArch64 32-bit load/store instruction, holding bits 0 to 11 of the + address. Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. +ENUM + BFD_RELOC_AARCH64_LDST64_LO12 +ENUMDOC + AArch64 64-bit load/store instruction, holding bits 0 to 11 of the + address. Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. +ENUM + BFD_RELOC_AARCH64_LDST128_LO12 +ENUMDOC + AArch64 128-bit load/store instruction, holding bits 0 to 11 of the + address. Used in conjunction with BFD_RELOC_AARCH64_ADR_HI21_PCREL. +ENUM + BFD_RELOC_AARCH64_MOVW_G0 +ENUMDOC + AArch64 MOV[NZK] instruction with most significant bits 0 to 15 + of an unsigned address/value. +ENUM + BFD_RELOC_AARCH64_MOVW_G0_S +ENUMDOC + AArch64 MOV[NZ] instruction with most significant bits 0 to 15 + of a signed value. Changes instruction to MOVZ or MOVN depending on the + value's sign. +ENUM + BFD_RELOC_AARCH64_MOVW_G0_NC +ENUMDOC + AArch64 MOV[NZK] instruction with less significant bits 0 to 15 of + an address/value. No overflow checking. +ENUM + BFD_RELOC_AARCH64_MOVW_G1 +ENUMDOC + AArch64 MOV[NZK] instruction with most significant bits 16 to 31 + of an unsigned address/value. +ENUM + BFD_RELOC_AARCH64_MOVW_G1_NC +ENUMDOC + AArch64 MOV[NZK] instruction with less significant bits 16 to 31 + of an address/value. No overflow checking. +ENUM + BFD_RELOC_AARCH64_MOVW_G1_S +ENUMDOC + AArch64 MOV[NZ] instruction with most significant bits 16 to 31 + of a signed value. Changes instruction to MOVZ or MOVN depending on the + value's sign. +ENUM + BFD_RELOC_AARCH64_MOVW_G2 +ENUMDOC + AArch64 MOV[NZK] instruction with most significant bits 32 to 47 + of an unsigned address/value. +ENUM + BFD_RELOC_AARCH64_MOVW_G2_NC +ENUMDOC + AArch64 MOV[NZK] instruction with less significant bits 32 to 47 + of an address/value. No overflow checking. +ENUM + BFD_RELOC_AARCH64_MOVW_G2_S +ENUMDOC + AArch64 MOV[NZ] instruction with most significant bits 32 to 47 + of a signed value. Changes instruction to MOVZ or MOVN depending on the + value's sign. +ENUM + BFD_RELOC_AARCH64_MOVW_G3 +ENUMDOC + AArch64 MOV[NZK] instruction with most signficant bits 48 to 64 + of a signed or unsigned address/value. +ENUM + BFD_RELOC_AARCH64_TLSDESC +ENUMDOC + AArch64 TLS relocation. +ENUM + BFD_RELOC_AARCH64_TLSDESC_ADD +ENUMDOC + AArch64 TLS DESC relocation. +ENUM + BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC +ENUMDOC + AArch64 TLS DESC relocation. +ENUM + BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE +ENUMDOC + AArch64 TLS DESC relocation. +ENUM + BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 +ENUMDOC + AArch64 TLS DESC relocation. +ENUM + BFD_RELOC_AARCH64_TLSDESC_CALL +ENUMDOC + AArch64 TLS DESC relocation. +ENUM + BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC +ENUMDOC + AArch64 TLS DESC relocation. +ENUM + BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19 +ENUMDOC + AArch64 TLS DESC relocation. +ENUM + BFD_RELOC_AARCH64_TLSDESC_LDR +ENUMDOC + AArch64 TLS DESC relocation. +ENUM + BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC +ENUMDOC + AArch64 TLS DESC relocation. +ENUM + BFD_RELOC_AARCH64_TLSDESC_OFF_G1 +ENUMDOC + AArch64 TLS DESC relocation. +ENUM + BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC +ENUMDOC + Unsigned 12 bit byte offset to global offset table entry for a symbols + tls_index structure. Used in conjunction with + BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21. +ENUM + BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 +ENUMDOC + Get to the page base of the global offset table entry for a symbols + tls_index structure as part of an adrp instruction using a 21 bit PC + relative value. Used in conjunction with + BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC. +ENUM + BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 +ENUMDOC + AArch64 TLS INITIAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 +ENUMDOC + AArch64 TLS INITIAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC +ENUMDOC + AArch64 TLS INITIAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC +ENUMDOC + AArch64 TLS INITIAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 +ENUMDOC + AArch64 TLS INITIAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 +ENUMDOC + AArch64 TLS LOCAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 +ENUMDOC + AArch64 TLS LOCAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC +ENUMDOC + AArch64 TLS LOCAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 +ENUMDOC + AArch64 TLS LOCAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC +ENUMDOC + AArch64 TLS LOCAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 +ENUMDOC + AArch64 TLS LOCAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC +ENUMDOC + AArch64 TLS LOCAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 +ENUMDOC + AArch64 TLS LOCAL EXEC relocation. +ENUM + BFD_RELOC_AARCH64_TLS_DTPMOD64 +ENUMDOC + AArch64 TLS relocation. +ENUM + BFD_RELOC_AARCH64_TLS_DTPREL64 +ENUMDOC + AArch64 TLS relocation. +ENUM + BFD_RELOC_AARCH64_TLS_TPREL64 +ENUMDOC + AArch64 TLS relocation. +ENUM + BFD_RELOC_AARCH64_TSTBR14 +ENUMDOC + AArch64 14 bit pc-relative test bit and branch. + The lowest two bits must be zero and are not stored in the instruction, + giving a 16 bit signed byte offset. + ENUM BFD_RELOC_TILEPRO_COPY ENUMX @@ -6043,7 +6315,6 @@ ENUMX BFD_RELOC_TILEPRO_IMM16_X1_TLS_LE_HA ENUMDOC Tilera TILEPro Relocations. - ENUM BFD_RELOC_TILEGX_HW0 ENUMX @@ -6236,7 +6507,6 @@ ENUMX BFD_RELOC_TILEGX_IMM8_Y1_TLS_ADD ENUMDOC Tilera TILE-Gx Relocations. - ENUM BFD_RELOC_EPIPHANY_SIMM8 ENUMDOC diff --git a/bfd/targets.c b/bfd/targets.c index ce1cf350f48..fa206d24bef 100644 --- a/bfd/targets.c +++ b/bfd/targets.c @@ -730,6 +730,7 @@ extern const bfd_target bfd_elf64_alpha_freebsd_vec; extern const bfd_target bfd_elf64_alpha_vec; extern const bfd_target bfd_elf64_big_generic_vec; extern const bfd_target bfd_elf64_bigmips_vec; +extern const bfd_target bfd_elf64_bigaarch64_vec; extern const bfd_target bfd_elf64_hppa_linux_vec; extern const bfd_target bfd_elf64_hppa_vec; extern const bfd_target bfd_elf64_ia64_big_vec; @@ -738,6 +739,7 @@ extern const bfd_target bfd_elf64_ia64_little_vec; extern const bfd_target bfd_elf64_ia64_vms_vec; extern const bfd_target bfd_elf64_little_generic_vec; extern const bfd_target bfd_elf64_littlemips_vec; +extern const bfd_target bfd_elf64_littleaarch64_vec; extern const bfd_target bfd_elf64_mmix_vec; extern const bfd_target bfd_elf64_powerpc_vec; extern const bfd_target bfd_elf64_powerpcle_vec; @@ -1105,6 +1107,7 @@ static const bfd_target * const _bfd_target_vector[] = &bfd_elf64_alpha_vec, &bfd_elf64_big_generic_vec, &bfd_elf64_bigmips_vec, + &bfd_elf64_bigaarch64_vec, &bfd_elf64_hppa_linux_vec, &bfd_elf64_hppa_vec, &bfd_elf64_ia64_big_vec, @@ -1113,6 +1116,7 @@ static const bfd_target * const _bfd_target_vector[] = &bfd_elf64_ia64_vms_vec, &bfd_elf64_little_generic_vec, &bfd_elf64_littlemips_vec, + &bfd_elf64_littleaarch64_vec, &bfd_elf64_mmix_vec, &bfd_elf64_powerpc_vec, &bfd_elf64_powerpcle_vec, diff --git a/binutils/ChangeLog b/binutils/ChangeLog index 04c9b3a58a4..3e8c2da4c7d 100644 --- a/binutils/ChangeLog +++ b/binutils/ChangeLog @@ -1,3 +1,26 @@ +2012-08-13 Ian Bolton + Laurent Desnogues + Jim MacArthur + Marcus Shawcroft + Nigel Stephens + Ramana Radhakrishnan + Richard Earnshaw + Sofiane Naci + Tejas Belagod + Yufeng Zhang + + * readelf.c (guess_is_rela): Handle EM_AARCH64. + (get_machine_name): Likewise. + (get_aarch64_segment_type): New function. + (get_segment_type): Handle EM_AARCH64 by calling the new function. + (get_aarch64_section_type_name): New function. + (get_section_type_name): Handle EM_AARCH64 by calling the new function. + (is_32bit_abs_reloc): Handle EM_AARCH64. + (is_32bit_pcrel_reloc): Likewise. + (is_64bit_abs_reloc): Likewise. + (is_64bit_pcrel_reloc): Likewise. + (is_none_reloc): Likewise. + 2012-08-09 Nick Clifton * po/vi.po: Updated Vietnamese translation. diff --git a/binutils/MAINTAINERS b/binutils/MAINTAINERS index 9850903ecd1..cdb8b84ace3 100644 --- a/binutils/MAINTAINERS +++ b/binutils/MAINTAINERS @@ -57,6 +57,7 @@ maintainer. The first maintainer is free to devolve that responsibility among the other maintainers. ALPHA Richard Henderson + AARCH64 Richard Earnshaw ARM Nick Clifton ARM Richard Earnshaw ARM Paul Brook diff --git a/binutils/doc/Makefile.in b/binutils/doc/Makefile.in index e39ee65c888..801af7da521 100644 --- a/binutils/doc/Makefile.in +++ b/binutils/doc/Makefile.in @@ -39,7 +39,7 @@ DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/../bfd/acinclude.m4 \ $(top_srcdir)/../config/zlib.m4 \ - $(top_srcdir)/../bfd/warning.m4 \ + $(top_srcdir)/../bfd/warning.m4 $(top_srcdir)/../config/acx.m4 \ $(top_srcdir)/../config/depstand.m4 \ $(top_srcdir)/../config/gettext-sister.m4 \ $(top_srcdir)/../config/iconv.m4 \ diff --git a/binutils/readelf.c b/binutils/readelf.c index e6f2be6b0d0..5423c7fa25f 100644 --- a/binutils/readelf.c +++ b/binutils/readelf.c @@ -91,6 +91,7 @@ #define RELOC_MACROS_GEN_FUNC +#include "elf/aarch64.h" #include "elf/alpha.h" #include "elf/arc.h" #include "elf/arm.h" @@ -551,6 +552,7 @@ guess_is_rela (unsigned int e_machine) /* Targets that use RELA relocations. */ case EM_68K: case EM_860: + case EM_AARCH64: case EM_ADAPTEVA_EPIPHANY: case EM_ALPHA: case EM_ALTERA_NIOS2: @@ -983,6 +985,10 @@ dump_relocations (FILE * file, rtype = NULL; break; + case EM_AARCH64: + rtype = elf_aarch64_reloc_type (type); + break; + case EM_M32R: case EM_CYGNUS_M32R: rtype = elf_m32r_reloc_type (type); @@ -1830,6 +1836,7 @@ get_machine_name (unsigned e_machine) switch (e_machine) { case EM_NONE: return _("None"); + case EM_AARCH64: return "AArch64"; case EM_M32: return "WE32100"; case EM_SPARC: return "Sparc"; case EM_SPU: return "SPU"; @@ -2694,6 +2701,20 @@ get_osabi_name (unsigned int osabi) } } +static const char * +get_aarch64_segment_type (unsigned long type) +{ + switch (type) + { + case PT_AARCH64_ARCHEXT: + return "AARCH64_ARCHEXT"; + default: + break; + } + + return NULL; +} + static const char * get_arm_segment_type (unsigned long type) { @@ -2816,6 +2837,9 @@ get_segment_type (unsigned long p_type) switch (elf_header.e_machine) { + case EM_AARCH64: + result = get_aarch64_segment_type (p_type); + break; case EM_ARM: result = get_arm_segment_type (p_type); break; @@ -2976,6 +3000,19 @@ get_x86_64_section_type_name (unsigned int sh_type) return NULL; } +static const char * +get_aarch64_section_type_name (unsigned int sh_type) +{ + switch (sh_type) + { + case SHT_AARCH64_ATTRIBUTES: + return "AARCH64_ATTRIBUTES"; + default: + break; + } + return NULL; +} + static const char * get_arm_section_type_name (unsigned int sh_type) { @@ -3075,6 +3112,9 @@ get_section_type_name (unsigned int sh_type) case EM_K1OM: result = get_x86_64_section_type_name (sh_type); break; + case EM_AARCH64: + result = get_aarch64_section_type_name (sh_type); + break; case EM_ARM: result = get_arm_section_type_name (sh_type); break; @@ -9770,6 +9810,8 @@ is_32bit_abs_reloc (unsigned int reloc_type) return reloc_type == 1; /* R_860_32. */ case EM_960: return reloc_type == 2; /* R_960_32. */ + case EM_AARCH64: + return reloc_type == 258; /* R_AARCH64_ABS32 */ case EM_ALPHA: return reloc_type == 1; /* R_ALPHA_REFLONG. */ case EM_ARC: @@ -9924,6 +9966,8 @@ is_32bit_pcrel_reloc (unsigned int reloc_type) return reloc_type == 2; /* R_386_PC32. */ case EM_68K: return reloc_type == 4; /* R_68K_PC32. */ + case EM_AARCH64: + return reloc_type == 261; /* R_AARCH64_PREL32 */ case EM_ADAPTEVA_EPIPHANY: return reloc_type == 6; case EM_ALPHA: @@ -9978,6 +10022,8 @@ is_64bit_abs_reloc (unsigned int reloc_type) { switch (elf_header.e_machine) { + case EM_AARCH64: + return reloc_type == 257; /* R_AARCH64_ABS64. */ case EM_ALPHA: return reloc_type == 2; /* R_ALPHA_REFQUAD. */ case EM_IA_64: @@ -10014,6 +10060,8 @@ is_64bit_pcrel_reloc (unsigned int reloc_type) { switch (elf_header.e_machine) { + case EM_AARCH64: + return reloc_type == 260; /* R_AARCH64_PREL64. */ case EM_ALPHA: return reloc_type == 11; /* R_ALPHA_SREL64. */ case EM_IA_64: @@ -10143,6 +10191,8 @@ is_none_reloc (unsigned int reloc_type) case EM_XC16X: case EM_C166: /* R_XC16X_NONE. */ return reloc_type == 0; + case EM_AARCH64: + return reloc_type == 0 || reloc_type == 256; case EM_XTENSA_OLD: case EM_XTENSA: return (reloc_type == 0 /* R_XTENSA_NONE. */ diff --git a/binutils/testsuite/ChangeLog b/binutils/testsuite/ChangeLog index 7d4de5ab50f..98555dfc5c5 100644 --- a/binutils/testsuite/ChangeLog +++ b/binutils/testsuite/ChangeLog @@ -1,3 +1,16 @@ +2012-08-13 Ian Bolton + Laurent Desnogues + Jim MacArthur + Marcus Shawcroft + Nigel Stephens + Ramana Radhakrishnan + Richard Earnshaw + Sofiane Naci + Tejas Belagod + Yufeng Zhang + + * objdump.exp: Add AArch64. + 2012-08-02 H.J. Lu PR binutils/14420 diff --git a/binutils/testsuite/binutils-all/objdump.exp b/binutils/testsuite/binutils-all/objdump.exp index 267bcbd8244..14d18606e96 100644 --- a/binutils/testsuite/binutils-all/objdump.exp +++ b/binutils/testsuite/binutils-all/objdump.exp @@ -36,7 +36,7 @@ send_user "Version [binutil_version $OBJDUMP]" set got [binutils_run $OBJDUMP "$OBJDUMPFLAGS -i"] set cpus_expected [list] -lappend cpus_expected alpha arc arm cris +lappend cpus_expected aarch64 alpha arc arm cris lappend cpus_expected d10v d30v fr30 fr500 fr550 h8 hppa i386 i860 i960 ip2022 lappend cpus_expected m16c m32c m32r m68hc11 m68hc12 m68k m88k MCore MicroBlaze lappend cpus_expected mips mn10200 mn10300 ms1 msp ns32k pj powerpc pyramid diff --git a/gas/ChangeLog b/gas/ChangeLog index fb6ac642f36..896b941ba4d 100644 --- a/gas/ChangeLog +++ b/gas/ChangeLog @@ -1,3 +1,27 @@ +2012-08-13 Ian Bolton + Laurent Desnogues + Jim MacArthur + Marcus Shawcroft + Nigel Stephens + Ramana Radhakrishnan + Richard Earnshaw + Sofiane Naci + Tejas Belagod + Yufeng Zhang + + * Makefile.am: Add AArch64. + * Makefile.in: Regenerate. + * config/tc-aarch64.c: New file. + * config/tc-aarch64.h: New file. + * configure.tgt: Add AArch64. + * doc/Makefile.am: Add AArch64. + * doc/Makefile.in: Regenerate. + * doc/all.texi: Add AArch64. + * doc/as.texinfo: Add AArch64. + * doc/c-aarch64.texi: New file. + * po/POTFILES.in: Regenerate. + * NEWS: Mention the new support. + 2012-08-13 Maciej W. Rozycki * config/tc-mips.c (NO_ISA_COP, COP_INSN): Remove macros. diff --git a/gas/Makefile.am b/gas/Makefile.am index 020e7cf9802..256e2322fd8 100644 --- a/gas/Makefile.am +++ b/gas/Makefile.am @@ -107,6 +107,7 @@ HFILES = \ # CPU files in config. TARGET_CPU_CFILES = \ + config/tc-aarch64.c \ config/tc-alpha.c \ config/tc-arc.c \ config/tc-arm.c \ @@ -176,6 +177,7 @@ TARGET_CPU_CFILES = \ config/xtensa-relax.c TARGET_CPU_HFILES = \ + config/tc-aarch64.h \ config/tc-alpha.h \ config/tc-arc.h \ config/tc-arm.h \ diff --git a/gas/Makefile.in b/gas/Makefile.in index f631d028ef8..94812d96db9 100644 --- a/gas/Makefile.in +++ b/gas/Makefile.in @@ -375,6 +375,7 @@ HFILES = \ # CPU files in config. TARGET_CPU_CFILES = \ + config/tc-aarch64.c \ config/tc-alpha.c \ config/tc-arc.c \ config/tc-arm.c \ @@ -444,6 +445,7 @@ TARGET_CPU_CFILES = \ config/xtensa-relax.c TARGET_CPU_HFILES = \ + config/tc-aarch64.h \ config/tc-alpha.h \ config/tc-arc.h \ config/tc-arm.h \ @@ -793,6 +795,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/stabs.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/subsegs.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/symbols.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-aarch64.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-alpha.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-arc.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-arm.Po@am__quote@ @@ -884,6 +887,20 @@ distclean-compile: @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< +tc-aarch64.o: config/tc-aarch64.c +@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-aarch64.o -MD -MP -MF $(DEPDIR)/tc-aarch64.Tpo -c -o tc-aarch64.o `test -f 'config/tc-aarch64.c' || echo '$(srcdir)/'`config/tc-aarch64.c +@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/tc-aarch64.Tpo $(DEPDIR)/tc-aarch64.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='config/tc-aarch64.c' object='tc-aarch64.o' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-aarch64.o `test -f 'config/tc-aarch64.c' || echo '$(srcdir)/'`config/tc-aarch64.c + +tc-aarch64.obj: config/tc-aarch64.c +@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-aarch64.obj -MD -MP -MF $(DEPDIR)/tc-aarch64.Tpo -c -o tc-aarch64.obj `if test -f 'config/tc-aarch64.c'; then $(CYGPATH_W) 'config/tc-aarch64.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-aarch64.c'; fi` +@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/tc-aarch64.Tpo $(DEPDIR)/tc-aarch64.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='config/tc-aarch64.c' object='tc-aarch64.obj' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-aarch64.obj `if test -f 'config/tc-aarch64.c'; then $(CYGPATH_W) 'config/tc-aarch64.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-aarch64.c'; fi` + tc-alpha.o: config/tc-alpha.c @am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-alpha.o -MD -MP -MF $(DEPDIR)/tc-alpha.Tpo -c -o tc-alpha.o `test -f 'config/tc-alpha.c' || echo '$(srcdir)/'`config/tc-alpha.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/tc-alpha.Tpo $(DEPDIR)/tc-alpha.Po diff --git a/gas/NEWS b/gas/NEWS index 4564dbcae94..ef0aec3bb1a 100644 --- a/gas/NEWS +++ b/gas/NEWS @@ -1,5 +1,7 @@ -*- text -*- +* Add support for the 64-bit ARM architecture: AArch64. + Changes in 2.23: * Add support for S12X processor. diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c new file mode 100644 index 00000000000..4333e8ef232 --- /dev/null +++ b/gas/config/tc-aarch64.c @@ -0,0 +1,7349 @@ +/* tc-aarch64.c -- Assemble for the AArch64 ISA + + Copyright 2009, 2010, 2011, 2012 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GAS. + + GAS is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the license, or + (at your option) any later version. + + GAS is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING3. If not, + see . */ + +#include "as.h" +#include +#include +#include "bfd_stdint.h" +#define NO_RELOC 0 +#include "safe-ctype.h" +#include "subsegs.h" +#include "obstack.h" + +#ifdef OBJ_ELF +#include "elf/aarch64.h" +#include "dw2gencfi.h" +#endif + +#include "dwarf2dbg.h" + +/* Types of processor to assemble for. */ +#ifndef CPU_DEFAULT +#define CPU_DEFAULT AARCH64_ARCH_V8 +#endif + +#define streq(a, b) (strcmp (a, b) == 0) + +static aarch64_feature_set cpu_variant; + +/* Variables that we set while parsing command-line options. Once all + options have been read we re-process these values to set the real + assembly flags. */ +static const aarch64_feature_set *mcpu_cpu_opt = NULL; +static const aarch64_feature_set *march_cpu_opt = NULL; + +/* Constants for known architecture features. */ +static const aarch64_feature_set cpu_default = CPU_DEFAULT; + +static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY; +static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE; + +#ifdef OBJ_ELF +/* Pre-defined "_GLOBAL_OFFSET_TABLE_" */ +static symbolS *GOT_symbol; +#endif + +enum neon_el_type +{ + NT_invtype = -1, + NT_b, + NT_h, + NT_s, + NT_d, + NT_q +}; + +/* Bits for DEFINED field in neon_type_el. */ +#define NTA_HASTYPE 1 +#define NTA_HASINDEX 2 + +struct neon_type_el +{ + enum neon_el_type type; + unsigned char defined; + unsigned width; + int64_t index; +}; + +#define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001 + +struct reloc +{ + bfd_reloc_code_real_type type; + expressionS exp; + int pc_rel; + enum aarch64_opnd opnd; + uint32_t flags; + unsigned need_libopcodes_p : 1; +}; + +struct aarch64_instruction +{ + /* libopcodes structure for instruction intermediate representation. */ + aarch64_inst base; + /* Record assembly errors found during the parsing. */ + struct + { + enum aarch64_operand_error_kind kind; + const char *error; + } parsing_error; + /* The condition that appears in the assembly line. */ + int cond; + /* Relocation information (including the GAS internal fixup). */ + struct reloc reloc; + /* Need to generate an immediate in the literal pool. */ + unsigned gen_lit_pool : 1; +}; + +typedef struct aarch64_instruction aarch64_instruction; + +static aarch64_instruction inst; + +static bfd_boolean parse_operands (char *, const aarch64_opcode *); +static bfd_boolean programmer_friendly_fixup (aarch64_instruction *); + +/* Diagnostics inline function utilites. + + These are lightweight utlities which should only be called by parse_operands + and other parsers. GAS processes each assembly line by parsing it against + instruction template(s), in the case of multiple templates (for the same + mnemonic name), those templates are tried one by one until one succeeds or + all fail. An assembly line may fail a few templates before being + successfully parsed; an error saved here in most cases is not a user error + but an error indicating the current template is not the right template. + Therefore it is very important that errors can be saved at a low cost during + the parsing; we don't want to slow down the whole parsing by recording + non-user errors in detail. + + Remember that the objective is to help GAS pick up the most approapriate + error message in the case of multiple templates, e.g. FMOV which has 8 + templates. */ + +static inline void +clear_error (void) +{ + inst.parsing_error.kind = AARCH64_OPDE_NIL; + inst.parsing_error.error = NULL; +} + +static inline bfd_boolean +error_p (void) +{ + return inst.parsing_error.kind != AARCH64_OPDE_NIL; +} + +static inline const char * +get_error_message (void) +{ + return inst.parsing_error.error; +} + +static inline void +set_error_message (const char *error) +{ + inst.parsing_error.error = error; +} + +static inline enum aarch64_operand_error_kind +get_error_kind (void) +{ + return inst.parsing_error.kind; +} + +static inline void +set_error_kind (enum aarch64_operand_error_kind kind) +{ + inst.parsing_error.kind = kind; +} + +static inline void +set_error (enum aarch64_operand_error_kind kind, const char *error) +{ + inst.parsing_error.kind = kind; + inst.parsing_error.error = error; +} + +static inline void +set_recoverable_error (const char *error) +{ + set_error (AARCH64_OPDE_RECOVERABLE, error); +} + +/* Use the DESC field of the corresponding aarch64_operand entry to compose + the error message. */ +static inline void +set_default_error (void) +{ + set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL); +} + +static inline void +set_syntax_error (const char *error) +{ + set_error (AARCH64_OPDE_SYNTAX_ERROR, error); +} + +static inline void +set_first_syntax_error (const char *error) +{ + if (! error_p ()) + set_error (AARCH64_OPDE_SYNTAX_ERROR, error); +} + +static inline void +set_fatal_syntax_error (const char *error) +{ + set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error); +} + +/* Number of littlenums required to hold an extended precision number. */ +#define MAX_LITTLENUMS 6 + +/* Return value for certain parsers when the parsing fails; those parsers + return the information of the parsed result, e.g. register number, on + success. */ +#define PARSE_FAIL -1 + +/* This is an invalid condition code that means no conditional field is + present. */ +#define COND_ALWAYS 0x10 + +typedef struct +{ + const char *template; + unsigned long value; +} asm_barrier_opt; + +typedef struct +{ + const char *template; + uint32_t value; +} asm_nzcv; + +struct reloc_entry +{ + char *name; + bfd_reloc_code_real_type reloc; +}; + +/* Structure for a hash table entry for a register. */ +typedef struct +{ + const char *name; + unsigned char number; + unsigned char type; + unsigned char builtin; +} reg_entry; + +/* Macros to define the register types and masks for the purpose + of parsing. */ + +#undef AARCH64_REG_TYPES +#define AARCH64_REG_TYPES \ + BASIC_REG_TYPE(R_32) /* w[0-30] */ \ + BASIC_REG_TYPE(R_64) /* x[0-30] */ \ + BASIC_REG_TYPE(SP_32) /* wsp */ \ + BASIC_REG_TYPE(SP_64) /* sp */ \ + BASIC_REG_TYPE(Z_32) /* wzr */ \ + BASIC_REG_TYPE(Z_64) /* xzr */ \ + BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\ + BASIC_REG_TYPE(FP_H) /* h[0-31] */ \ + BASIC_REG_TYPE(FP_S) /* s[0-31] */ \ + BASIC_REG_TYPE(FP_D) /* d[0-31] */ \ + BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \ + BASIC_REG_TYPE(CN) /* c[0-7] */ \ + BASIC_REG_TYPE(VN) /* v[0-31] */ \ + /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \ + MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \ + /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \ + MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \ + | REG_TYPE(SP_32) | REG_TYPE(SP_64) \ + | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \ + /* Typecheck: any [BHSDQ]P FP. */ \ + MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \ + | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \ + /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \ + MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \ + | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \ + | REG_TYPE(FP_B) | REG_TYPE(FP_H) \ + | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \ + /* Any integer register; used for error messages only. */ \ + MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \ + | REG_TYPE(SP_32) | REG_TYPE(SP_64) \ + | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \ + /* Pseudo type to mark the end of the enumerator sequence. */ \ + BASIC_REG_TYPE(MAX) + +#undef BASIC_REG_TYPE +#define BASIC_REG_TYPE(T) REG_TYPE_##T, +#undef MULTI_REG_TYPE +#define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T) + +/* Register type enumerators. */ +typedef enum +{ + /* A list of REG_TYPE_*. */ + AARCH64_REG_TYPES +} aarch64_reg_type; + +#undef BASIC_REG_TYPE +#define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T, +#undef REG_TYPE +#define REG_TYPE(T) (1 << REG_TYPE_##T) +#undef MULTI_REG_TYPE +#define MULTI_REG_TYPE(T,V) V, + +/* Values indexed by aarch64_reg_type to assist the type checking. */ +static const unsigned reg_type_masks[] = +{ + AARCH64_REG_TYPES +}; + +#undef BASIC_REG_TYPE +#undef REG_TYPE +#undef MULTI_REG_TYPE +#undef AARCH64_REG_TYPES + +/* Diagnostics used when we don't get a register of the expected type. + Note: this has to synchronized with aarch64_reg_type definitions + above. */ +static const char * +get_reg_expected_msg (aarch64_reg_type reg_type) +{ + const char *msg; + + switch (reg_type) + { + case REG_TYPE_R_32: + msg = N_("integer 32-bit register expected"); + break; + case REG_TYPE_R_64: + msg = N_("integer 64-bit register expected"); + break; + case REG_TYPE_R_N: + msg = N_("integer register expected"); + break; + case REG_TYPE_R_Z_SP: + msg = N_("integer, zero or SP register expected"); + break; + case REG_TYPE_FP_B: + msg = N_("8-bit SIMD scalar register expected"); + break; + case REG_TYPE_FP_H: + msg = N_("16-bit SIMD scalar or floating-point half precision " + "register expected"); + break; + case REG_TYPE_FP_S: + msg = N_("32-bit SIMD scalar or floating-point single precision " + "register expected"); + break; + case REG_TYPE_FP_D: + msg = N_("64-bit SIMD scalar or floating-point double precision " + "register expected"); + break; + case REG_TYPE_FP_Q: + msg = N_("128-bit SIMD scalar or floating-point quad precision " + "register expected"); + break; + case REG_TYPE_CN: + msg = N_("C0 - C15 expected"); + break; + case REG_TYPE_R_Z_BHSDQ_V: + msg = N_("register expected"); + break; + case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */ + msg = N_("SIMD scalar or floating-point register expected"); + break; + case REG_TYPE_VN: /* any V reg */ + msg = N_("vector register expected"); + break; + default: + as_fatal (_("invalid register type %d"), reg_type); + } + return msg; +} + +/* Some well known registers that we refer to directly elsewhere. */ +#define REG_SP 31 + +/* Instructions take 4 bytes in the object file. */ +#define INSN_SIZE 4 + +/* Define some common error messages. */ +#define BAD_SP _("SP not allowed here") + +static struct hash_control *aarch64_ops_hsh; +static struct hash_control *aarch64_cond_hsh; +static struct hash_control *aarch64_shift_hsh; +static struct hash_control *aarch64_sys_regs_hsh; +static struct hash_control *aarch64_pstatefield_hsh; +static struct hash_control *aarch64_sys_regs_ic_hsh; +static struct hash_control *aarch64_sys_regs_dc_hsh; +static struct hash_control *aarch64_sys_regs_at_hsh; +static struct hash_control *aarch64_sys_regs_tlbi_hsh; +static struct hash_control *aarch64_reg_hsh; +static struct hash_control *aarch64_barrier_opt_hsh; +static struct hash_control *aarch64_nzcv_hsh; +static struct hash_control *aarch64_pldop_hsh; + +/* Stuff needed to resolve the label ambiguity + As: + ... + label: + may differ from: + ... + label: + */ + +static symbolS *last_label_seen; + +/* Literal pool structure. Held on a per-section + and per-sub-section basis. */ + +#define MAX_LITERAL_POOL_SIZE 1024 +typedef struct literal_pool +{ + expressionS literals[MAX_LITERAL_POOL_SIZE]; + unsigned int next_free_entry; + unsigned int id; + symbolS *symbol; + segT section; + subsegT sub_section; + int size; + struct literal_pool *next; +} literal_pool; + +/* Pointer to a linked list of literal pools. */ +static literal_pool *list_of_pools = NULL; + +/* Pure syntax. */ + +/* This array holds the chars that always start a comment. If the + pre-processor is disabled, these aren't very useful. */ +const char comment_chars[] = ""; + +/* This array holds the chars that only start a comment at the beginning of + a line. If the line seems to have the form '# 123 filename' + .line and .file directives will appear in the pre-processed output. */ +/* Note that input_file.c hand checks for '#' at the beginning of the + first line of the input file. This is because the compiler outputs + #NO_APP at the beginning of its output. */ +/* Also note that comments like this one will always work. */ +const char line_comment_chars[] = "#"; + +const char line_separator_chars[] = ";"; + +/* Chars that can be used to separate mant + from exp in floating point numbers. */ +const char EXP_CHARS[] = "eE"; + +/* Chars that mean this number is a floating point constant. */ +/* As in 0f12.456 */ +/* or 0d1.2345e12 */ + +const char FLT_CHARS[] = "rRsSfFdDxXeEpP"; + +/* Prefix character that indicates the start of an immediate value. */ +#define is_immediate_prefix(C) ((C) == '#') + +/* Separator character handling. */ + +#define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0) + +static inline bfd_boolean +skip_past_char (char **str, char c) +{ + if (**str == c) + { + (*str)++; + return TRUE; + } + else + return FALSE; +} + +#define skip_past_comma(str) skip_past_char (str, ',') + +/* Arithmetic expressions (possibly involving symbols). */ + +/* Return TRUE if anything in the expression *SP is a bignum. */ + +static bfd_boolean +exp_has_bignum_p (symbolS * sp) +{ + if (symbol_get_value_expression (sp)->X_op == O_big) + return TRUE; + + if (symbol_get_value_expression (sp)->X_add_symbol) + { + return (exp_has_bignum_p (symbol_get_value_expression (sp)->X_add_symbol) + || (symbol_get_value_expression (sp)->X_op_symbol + && exp_has_bignum_p (symbol_get_value_expression (sp)-> + X_op_symbol))); + } + + return FALSE; +} + +static bfd_boolean in_my_get_expression_p = FALSE; + +/* Third argument to my_get_expression. */ +#define GE_NO_PREFIX 0 +#define GE_OPT_PREFIX 1 + +/* Return TRUE if the string pointed by *STR is successfully parsed + as an valid expression; *EP will be filled with the information of + such an expression. Otherwise return FALSE. */ + +static bfd_boolean +my_get_expression (expressionS * ep, char **str, int prefix_mode, + int reject_absent) +{ + char *save_in; + segT seg; + int prefix_present_p = 0; + + switch (prefix_mode) + { + case GE_NO_PREFIX: + break; + case GE_OPT_PREFIX: + if (is_immediate_prefix (**str)) + { + (*str)++; + prefix_present_p = 1; + } + break; + default: + abort (); + } + + memset (ep, 0, sizeof (expressionS)); + + save_in = input_line_pointer; + input_line_pointer = *str; + in_my_get_expression_p = TRUE; + seg = expression (ep); + in_my_get_expression_p = FALSE; + + if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent)) + { + /* We found a bad expression in md_operand(). */ + *str = input_line_pointer; + input_line_pointer = save_in; + if (prefix_present_p && ! error_p ()) + set_fatal_syntax_error (_("bad expression")); + else + set_first_syntax_error (_("bad expression")); + return FALSE; + } + +#ifdef OBJ_AOUT + if (seg != absolute_section + && seg != text_section + && seg != data_section + && seg != bss_section && seg != undefined_section) + { + set_syntax_error (_("bad segment")); + *str = input_line_pointer; + input_line_pointer = save_in; + return FALSE; + } +#else + (void) seg; +#endif + + /* Get rid of any bignums now, so that we don't generate an error for which + we can't establish a line number later on. Big numbers are never valid + in instructions, which is where this routine is always called. */ + if (ep->X_op == O_big + || (ep->X_add_symbol + && (exp_has_bignum_p (ep->X_add_symbol) + || (ep->X_op_symbol && exp_has_bignum_p (ep->X_op_symbol))))) + { + if (prefix_present_p && error_p ()) + set_fatal_syntax_error (_("invalid constant")); + else + set_first_syntax_error (_("invalid constant")); + *str = input_line_pointer; + input_line_pointer = save_in; + return FALSE; + } + + *str = input_line_pointer; + input_line_pointer = save_in; + return TRUE; +} + +/* Turn a string in input_line_pointer into a floating point constant + of type TYPE, and store the appropriate bytes in *LITP. The number + of LITTLENUMS emitted is stored in *SIZEP. An error message is + returned, or NULL on OK. */ + +char * +md_atof (int type, char *litP, int *sizeP) +{ + return ieee_md_atof (type, litP, sizeP, target_big_endian); +} + +/* We handle all bad expressions here, so that we can report the faulty + instruction in the error message. */ +void +md_operand (expressionS * exp) +{ + if (in_my_get_expression_p) + exp->X_op = O_illegal; +} + +/* Immediate values. */ + +/* Errors may be set multiple times during parsing or bit encoding + (particularly in the Neon bits), but usually the earliest error which is set + will be the most meaningful. Avoid overwriting it with later (cascading) + errors by calling this function. */ + +static void +first_error (const char *error) +{ + if (! error_p ()) + set_syntax_error (error); +} + +/* Similiar to first_error, but this function accepts formatted error + message. */ +static void +first_error_fmt (const char *format, ...) +{ + va_list args; + enum + { size = 100 }; + /* N.B. this single buffer will not cause error messages for different + instructions to pollute each other; this is because at the end of + processing of each assembly line, error message if any will be + collected by as_bad. */ + static char buffer[size]; + + if (! error_p ()) + { + int ret; + va_start (args, format); + ret = vsnprintf (buffer, size, format, args); + know (ret <= size - 1 && ret >= 0); + va_end (args); + set_syntax_error (buffer); + } +} + +/* Register parsing. */ + +/* Generic register parser which is called by other specialized + register parsers. + CCP points to what should be the beginning of a register name. + If it is indeed a valid register name, advance CCP over it and + return the reg_entry structure; otherwise return NULL. + It does not issue diagnostics. */ + +static reg_entry * +parse_reg (char **ccp) +{ + char *start = *ccp; + char *p; + reg_entry *reg; + +#ifdef REGISTER_PREFIX + if (*start != REGISTER_PREFIX) + return NULL; + start++; +#endif + + p = start; + if (!ISALPHA (*p) || !is_name_beginner (*p)) + return NULL; + + do + p++; + while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_'); + + reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start); + + if (!reg) + return NULL; + + *ccp = p; + return reg; +} + +/* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise + return FALSE. */ +static bfd_boolean +aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type) +{ + if (reg->type == type) + return TRUE; + + switch (type) + { + case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */ + case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */ + case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */ + case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */ + case REG_TYPE_VN: /* Vector register. */ + gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX); + return ((reg_type_masks[reg->type] & reg_type_masks[type]) + == reg_type_masks[reg->type]); + default: + as_fatal ("unhandled type %d", type); + abort (); + } +} + +/* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP. + Return the register number otherwise. *ISREG32 is set to one if the + register is 32-bit wide; *ISREGZERO is set to one if the register is + of type Z_32 or Z_64. + Note that this function does not issue any diagnostics. */ + +static int +aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz, + int *isreg32, int *isregzero) +{ + char *str = *ccp; + const reg_entry *reg = parse_reg (&str); + + if (reg == NULL) + return PARSE_FAIL; + + if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP)) + return PARSE_FAIL; + + switch (reg->type) + { + case REG_TYPE_SP_32: + case REG_TYPE_SP_64: + if (reject_sp) + return PARSE_FAIL; + *isreg32 = reg->type == REG_TYPE_SP_32; + *isregzero = 0; + break; + case REG_TYPE_R_32: + case REG_TYPE_R_64: + *isreg32 = reg->type == REG_TYPE_R_32; + *isregzero = 0; + break; + case REG_TYPE_Z_32: + case REG_TYPE_Z_64: + if (reject_rz) + return PARSE_FAIL; + *isreg32 = reg->type == REG_TYPE_Z_32; + *isregzero = 1; + break; + default: + return PARSE_FAIL; + } + + *ccp = str; + + return reg->number; +} + +/* Parse the qualifier of a SIMD vector register or a SIMD vector element. + Fill in *PARSED_TYPE and return TRUE if the parsing succeeds; + otherwise return FALSE. + + Accept only one occurrence of: + 8b 16b 4h 8h 2s 4s 1d 2d + b h s d q */ +static bfd_boolean +parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str) +{ + char *ptr = *str; + unsigned width; + unsigned element_size; + enum neon_el_type type; + + /* skip '.' */ + ptr++; + + if (!ISDIGIT (*ptr)) + { + width = 0; + goto elt_size; + } + width = strtoul (ptr, &ptr, 10); + if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16) + { + first_error_fmt (_("bad size %d in vector width specifier"), width); + return FALSE; + } + +elt_size: + switch (TOLOWER (*ptr)) + { + case 'b': + type = NT_b; + element_size = 8; + break; + case 'h': + type = NT_h; + element_size = 16; + break; + case 's': + type = NT_s; + element_size = 32; + break; + case 'd': + type = NT_d; + element_size = 64; + break; + case 'q': + if (width == 1) + { + type = NT_q; + element_size = 128; + break; + } + /* fall through. */ + default: + if (*ptr != '\0') + first_error_fmt (_("unexpected character `%c' in element size"), *ptr); + else + first_error (_("missing element size")); + return FALSE; + } + if (width != 0 && width * element_size != 64 && width * element_size != 128) + { + first_error_fmt (_ + ("invalid element size %d and vector size combination %c"), + width, *ptr); + return FALSE; + } + ptr++; + + parsed_type->type = type; + parsed_type->width = width; + + *str = ptr; + + return TRUE; +} + +/* Parse a single type, e.g. ".8b", leading period included. + Only applicable to Vn registers. + + Return TRUE on success; otherwise return FALSE. */ +static bfd_boolean +parse_neon_operand_type (struct neon_type_el *vectype, char **ccp) +{ + char *str = *ccp; + + if (*str == '.') + { + if (! parse_neon_type_for_operand (vectype, &str)) + { + first_error (_("vector type expected")); + return FALSE; + } + } + else + return FALSE; + + *ccp = str; + + return TRUE; +} + +/* Parse a register of the type TYPE. + + Return PARSE_FAIL if the string pointed by *CCP is not a valid register + name or the parsed register is not of TYPE. + + Otherwise return the register number, and optionally fill in the actual + type of the register in *RTYPE when multiple alternatives were given, and + return the register shape and element index information in *TYPEINFO. + + IN_REG_LIST should be set with TRUE if the caller is parsing a register + list. */ + +static int +parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype, + struct neon_type_el *typeinfo, bfd_boolean in_reg_list) +{ + char *str = *ccp; + const reg_entry *reg = parse_reg (&str); + struct neon_type_el atype; + struct neon_type_el parsetype; + bfd_boolean is_typed_vecreg = FALSE; + + atype.defined = 0; + atype.type = NT_invtype; + atype.width = -1; + atype.index = 0; + + if (reg == NULL) + { + if (typeinfo) + *typeinfo = atype; + set_default_error (); + return PARSE_FAIL; + } + + if (! aarch64_check_reg_type (reg, type)) + { + DEBUG_TRACE ("reg type check failed"); + set_default_error (); + return PARSE_FAIL; + } + type = reg->type; + + if (type == REG_TYPE_VN + && parse_neon_operand_type (&parsetype, &str)) + { + /* Register if of the form Vn.[bhsdq]. */ + is_typed_vecreg = TRUE; + + if (parsetype.width == 0) + /* Expect index. In the new scheme we cannot have + Vn.[bhsdq] represent a scalar. Therefore any + Vn.[bhsdq] should have an index following it. + Except in reglists ofcourse. */ + atype.defined |= NTA_HASINDEX; + else + atype.defined |= NTA_HASTYPE; + + atype.type = parsetype.type; + atype.width = parsetype.width; + } + + if (skip_past_char (&str, '[')) + { + expressionS exp; + + /* Reject Sn[index] syntax. */ + if (!is_typed_vecreg) + { + first_error (_("this type of register can't be indexed")); + return PARSE_FAIL; + } + + if (in_reg_list == TRUE) + { + first_error (_("index not allowed inside register list")); + return PARSE_FAIL; + } + + atype.defined |= NTA_HASINDEX; + + my_get_expression (&exp, &str, GE_NO_PREFIX, 1); + + if (exp.X_op != O_constant) + { + first_error (_("constant expression required")); + return PARSE_FAIL; + } + + if (! skip_past_char (&str, ']')) + return PARSE_FAIL; + + atype.index = exp.X_add_number; + } + else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0) + { + /* Indexed vector register expected. */ + first_error (_("indexed vector register expected")); + return PARSE_FAIL; + } + + /* A vector reg Vn should be typed or indexed. */ + if (type == REG_TYPE_VN && atype.defined == 0) + { + first_error (_("invalid use of vector register")); + } + + if (typeinfo) + *typeinfo = atype; + + if (rtype) + *rtype = type; + + *ccp = str; + + return reg->number; +} + +/* Parse register. + + Return the register number on success; return PARSE_FAIL otherwise. + + If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of + the register (e.g. NEON double or quad reg when either has been requested). + + If this is a NEON vector register with additional type information, fill + in the struct pointed to by VECTYPE (if non-NULL). + + This parser does not handle register list. */ + +static int +aarch64_reg_parse (char **ccp, aarch64_reg_type type, + aarch64_reg_type *rtype, struct neon_type_el *vectype) +{ + struct neon_type_el atype; + char *str = *ccp; + int reg = parse_typed_reg (&str, type, rtype, &atype, + /*in_reg_list= */ FALSE); + + if (reg == PARSE_FAIL) + return PARSE_FAIL; + + if (vectype) + *vectype = atype; + + *ccp = str; + + return reg; +} + +static inline bfd_boolean +eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2) +{ + return + e1.type == e2.type + && e1.defined == e2.defined + && e1.width == e2.width && e1.index == e2.index; +} + +/* This function parses the NEON register list. On success, it returns + the parsed register list information in the following encoded format: + + bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1 + 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg + + The information of the register shape and/or index is returned in + *VECTYPE. + + It returns PARSE_FAIL if the register list is invalid. + + The list contains one to four registers. + Each register can be one of: + .[] + . + All should be identical. + All should be identical. + There are restrictions on numbers which are checked later + (by reg_list_valid_p). */ + +static int +parse_neon_reg_list (char **ccp, struct neon_type_el *vectype) +{ + char *str = *ccp; + int nb_regs; + struct neon_type_el typeinfo, typeinfo_first; + int val, val_range; + int in_range; + int ret_val; + int i; + bfd_boolean error = FALSE; + bfd_boolean expect_index = FALSE; + + if (*str != '{') + { + set_syntax_error (_("expecting {")); + return PARSE_FAIL; + } + str++; + + nb_regs = 0; + typeinfo_first.defined = 0; + typeinfo_first.type = NT_invtype; + typeinfo_first.width = -1; + typeinfo_first.index = 0; + ret_val = 0; + val = -1; + val_range = -1; + in_range = 0; + do + { + if (in_range) + { + str++; /* skip over '-' */ + val_range = val; + } + val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo, + /*in_reg_list= */ TRUE); + if (val == PARSE_FAIL) + { + set_first_syntax_error (_("invalid vector register in list")); + error = TRUE; + continue; + } + /* reject [bhsd]n */ + if (typeinfo.defined == 0) + { + set_first_syntax_error (_("invalid scalar register in list")); + error = TRUE; + continue; + } + + if (typeinfo.defined & NTA_HASINDEX) + expect_index = TRUE; + + if (in_range) + { + if (val < val_range) + { + set_first_syntax_error + (_("invalid range in vector register list")); + error = TRUE; + } + val_range++; + } + else + { + val_range = val; + if (nb_regs == 0) + typeinfo_first = typeinfo; + else if (! eq_neon_type_el (typeinfo_first, typeinfo)) + { + set_first_syntax_error + (_("type mismatch in vector register list")); + error = TRUE; + } + } + if (! error) + for (i = val_range; i <= val; i++) + { + ret_val |= i << (5 * nb_regs); + nb_regs++; + } + in_range = 0; + } + while (skip_past_comma (&str) || (in_range = 1, *str == '-')); + + skip_whitespace (str); + if (*str != '}') + { + set_first_syntax_error (_("end of vector register list not found")); + error = TRUE; + } + str++; + + skip_whitespace (str); + + if (expect_index) + { + if (skip_past_char (&str, '[')) + { + expressionS exp; + + my_get_expression (&exp, &str, GE_NO_PREFIX, 1); + if (exp.X_op != O_constant) + { + set_first_syntax_error (_("constant expression required.")); + error = TRUE; + } + if (! skip_past_char (&str, ']')) + error = TRUE; + else + typeinfo_first.index = exp.X_add_number; + } + else + { + set_first_syntax_error (_("expected index")); + error = TRUE; + } + } + + if (nb_regs > 4) + { + set_first_syntax_error (_("too many registers in vector register list")); + error = TRUE; + } + else if (nb_regs == 0) + { + set_first_syntax_error (_("empty vector register list")); + error = TRUE; + } + + *ccp = str; + if (! error) + *vectype = typeinfo_first; + + return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1); +} + +/* Directives: register aliases. */ + +static reg_entry * +insert_reg_alias (char *str, int number, aarch64_reg_type type) +{ + reg_entry *new; + const char *name; + + if ((new = hash_find (aarch64_reg_hsh, str)) != 0) + { + if (new->builtin) + as_warn (_("ignoring attempt to redefine built-in register '%s'"), + str); + + /* Only warn about a redefinition if it's not defined as the + same register. */ + else if (new->number != number || new->type != type) + as_warn (_("ignoring redefinition of register alias '%s'"), str); + + return NULL; + } + + name = xstrdup (str); + new = xmalloc (sizeof (reg_entry)); + + new->name = name; + new->number = number; + new->type = type; + new->builtin = FALSE; + + if (hash_insert (aarch64_reg_hsh, name, (void *) new)) + abort (); + + return new; +} + +/* Look for the .req directive. This is of the form: + + new_register_name .req existing_register_name + + If we find one, or if it looks sufficiently like one that we want to + handle any error here, return TRUE. Otherwise return FALSE. */ + +static bfd_boolean +create_register_alias (char *newname, char *p) +{ + const reg_entry *old; + char *oldname, *nbuf; + size_t nlen; + + /* The input scrubber ensures that whitespace after the mnemonic is + collapsed to single spaces. */ + oldname = p; + if (strncmp (oldname, " .req ", 6) != 0) + return FALSE; + + oldname += 6; + if (*oldname == '\0') + return FALSE; + + old = hash_find (aarch64_reg_hsh, oldname); + if (!old) + { + as_warn (_("unknown register '%s' -- .req ignored"), oldname); + return TRUE; + } + + /* If TC_CASE_SENSITIVE is defined, then newname already points to + the desired alias name, and p points to its end. If not, then + the desired alias name is in the global original_case_string. */ +#ifdef TC_CASE_SENSITIVE + nlen = p - newname; +#else + newname = original_case_string; + nlen = strlen (newname); +#endif + + nbuf = alloca (nlen + 1); + memcpy (nbuf, newname, nlen); + nbuf[nlen] = '\0'; + + /* Create aliases under the new name as stated; an all-lowercase + version of the new name; and an all-uppercase version of the new + name. */ + if (insert_reg_alias (nbuf, old->number, old->type) != NULL) + { + for (p = nbuf; *p; p++) + *p = TOUPPER (*p); + + if (strncmp (nbuf, newname, nlen)) + { + /* If this attempt to create an additional alias fails, do not bother + trying to create the all-lower case alias. We will fail and issue + a second, duplicate error message. This situation arises when the + programmer does something like: + foo .req r0 + Foo .req r1 + The second .req creates the "Foo" alias but then fails to create + the artificial FOO alias because it has already been created by the + first .req. */ + if (insert_reg_alias (nbuf, old->number, old->type) == NULL) + return TRUE; + } + + for (p = nbuf; *p; p++) + *p = TOLOWER (*p); + + if (strncmp (nbuf, newname, nlen)) + insert_reg_alias (nbuf, old->number, old->type); + } + + return TRUE; +} + +/* Should never be called, as .req goes between the alias and the + register name, not at the beginning of the line. */ +static void +s_req (int a ATTRIBUTE_UNUSED) +{ + as_bad (_("invalid syntax for .req directive")); +} + +/* The .unreq directive deletes an alias which was previously defined + by .req. For example: + + my_alias .req r11 + .unreq my_alias */ + +static void +s_unreq (int a ATTRIBUTE_UNUSED) +{ + char *name; + char saved_char; + + name = input_line_pointer; + + while (*input_line_pointer != 0 + && *input_line_pointer != ' ' && *input_line_pointer != '\n') + ++input_line_pointer; + + saved_char = *input_line_pointer; + *input_line_pointer = 0; + + if (!*name) + as_bad (_("invalid syntax for .unreq directive")); + else + { + reg_entry *reg = hash_find (aarch64_reg_hsh, name); + + if (!reg) + as_bad (_("unknown register alias '%s'"), name); + else if (reg->builtin) + as_warn (_("ignoring attempt to undefine built-in register '%s'"), + name); + else + { + char *p; + char *nbuf; + + hash_delete (aarch64_reg_hsh, name, FALSE); + free ((char *) reg->name); + free (reg); + + /* Also locate the all upper case and all lower case versions. + Do not complain if we cannot find one or the other as it + was probably deleted above. */ + + nbuf = strdup (name); + for (p = nbuf; *p; p++) + *p = TOUPPER (*p); + reg = hash_find (aarch64_reg_hsh, nbuf); + if (reg) + { + hash_delete (aarch64_reg_hsh, nbuf, FALSE); + free ((char *) reg->name); + free (reg); + } + + for (p = nbuf; *p; p++) + *p = TOLOWER (*p); + reg = hash_find (aarch64_reg_hsh, nbuf); + if (reg) + { + hash_delete (aarch64_reg_hsh, nbuf, FALSE); + free ((char *) reg->name); + free (reg); + } + + free (nbuf); + } + } + + *input_line_pointer = saved_char; + demand_empty_rest_of_line (); +} + +/* Directives: Instruction set selection. */ + +#ifdef OBJ_ELF +/* This code is to handle mapping symbols as defined in the ARM AArch64 ELF + spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05). + Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag), + and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */ + +/* Create a new mapping symbol for the transition to STATE. */ + +static void +make_mapping_symbol (enum mstate state, valueT value, fragS * frag) +{ + symbolS *symbolP; + const char *symname; + int type; + + switch (state) + { + case MAP_DATA: + symname = "$d"; + type = BSF_NO_FLAGS; + break; + case MAP_INSN: + symname = "$x"; + type = BSF_NO_FLAGS; + break; + default: + abort (); + } + + symbolP = symbol_new (symname, now_seg, value, frag); + symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL; + + /* Save the mapping symbols for future reference. Also check that + we do not place two mapping symbols at the same offset within a + frag. We'll handle overlap between frags in + check_mapping_symbols. + + If .fill or other data filling directive generates zero sized data, + the mapping symbol for the following code will have the same value + as the one generated for the data filling directive. In this case, + we replace the old symbol with the new one at the same address. */ + if (value == 0) + { + if (frag->tc_frag_data.first_map != NULL) + { + know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0); + symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, + &symbol_lastP); + } + frag->tc_frag_data.first_map = symbolP; + } + if (frag->tc_frag_data.last_map != NULL) + { + know (S_GET_VALUE (frag->tc_frag_data.last_map) <= + S_GET_VALUE (symbolP)); + if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP)) + symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, + &symbol_lastP); + } + frag->tc_frag_data.last_map = symbolP; +} + +/* We must sometimes convert a region marked as code to data during + code alignment, if an odd number of bytes have to be padded. The + code mapping symbol is pushed to an aligned address. */ + +static void +insert_data_mapping_symbol (enum mstate state, + valueT value, fragS * frag, offsetT bytes) +{ + /* If there was already a mapping symbol, remove it. */ + if (frag->tc_frag_data.last_map != NULL + && S_GET_VALUE (frag->tc_frag_data.last_map) == + frag->fr_address + value) + { + symbolS *symp = frag->tc_frag_data.last_map; + + if (value == 0) + { + know (frag->tc_frag_data.first_map == symp); + frag->tc_frag_data.first_map = NULL; + } + frag->tc_frag_data.last_map = NULL; + symbol_remove (symp, &symbol_rootP, &symbol_lastP); + } + + make_mapping_symbol (MAP_DATA, value, frag); + make_mapping_symbol (state, value + bytes, frag); +} + +static void mapping_state_2 (enum mstate state, int max_chars); + +/* Set the mapping state to STATE. Only call this when about to + emit some STATE bytes to the file. */ + +void +mapping_state (enum mstate state) +{ + enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; + +#define TRANSITION(from, to) (mapstate == (from) && state == (to)) + + if (mapstate == state) + /* The mapping symbol has already been emitted. + There is nothing else to do. */ + return; + else if (TRANSITION (MAP_UNDEFINED, MAP_DATA)) + /* This case will be evaluated later in the next else. */ + return; + else if (TRANSITION (MAP_UNDEFINED, MAP_INSN)) + { + /* Only add the symbol if the offset is > 0: + if we're at the first frag, check it's size > 0; + if we're not at the first frag, then for sure + the offset is > 0. */ + struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root; + const int add_symbol = (frag_now != frag_first) + || (frag_now_fix () > 0); + + if (add_symbol) + make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first); + } + + mapping_state_2 (state, 0); +#undef TRANSITION +} + +/* Same as mapping_state, but MAX_CHARS bytes have already been + allocated. Put the mapping symbol that far back. */ + +static void +mapping_state_2 (enum mstate state, int max_chars) +{ + enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; + + if (!SEG_NORMAL (now_seg)) + return; + + if (mapstate == state) + /* The mapping symbol has already been emitted. + There is nothing else to do. */ + return; + + seg_info (now_seg)->tc_segment_info_data.mapstate = state; + make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now); +} +#else +#define mapping_state(x) /* nothing */ +#define mapping_state_2(x, y) /* nothing */ +#endif + +/* Directives: sectioning and alignment. */ + +static void +s_bss (int ignore ATTRIBUTE_UNUSED) +{ + /* We don't support putting frags in the BSS segment, we fake it by + marking in_bss, then looking at s_skip for clues. */ + subseg_set (bss_section, 0); + demand_empty_rest_of_line (); + mapping_state (MAP_DATA); +} + +static void +s_even (int ignore ATTRIBUTE_UNUSED) +{ + /* Never make frag if expect extra pass. */ + if (!need_pass_2) + frag_align (1, 0, 0); + + record_alignment (now_seg, 1); + + demand_empty_rest_of_line (); +} + +/* Directives: Literal pools. */ + +static literal_pool * +find_literal_pool (int size) +{ + literal_pool *pool; + + for (pool = list_of_pools; pool != NULL; pool = pool->next) + { + if (pool->section == now_seg + && pool->sub_section == now_subseg && pool->size == size) + break; + } + + return pool; +} + +static literal_pool * +find_or_make_literal_pool (int size) +{ + /* Next literal pool ID number. */ + static unsigned int latest_pool_num = 1; + literal_pool *pool; + + pool = find_literal_pool (size); + + if (pool == NULL) + { + /* Create a new pool. */ + pool = xmalloc (sizeof (*pool)); + if (!pool) + return NULL; + + /* Currently we always put the literal pool in the current text + section. If we were generating "small" model code where we + knew that all code and initialised data was within 1MB then + we could output literals to mergeable, read-only data + sections. */ + + pool->next_free_entry = 0; + pool->section = now_seg; + pool->sub_section = now_subseg; + pool->size = size; + pool->next = list_of_pools; + pool->symbol = NULL; + + /* Add it to the list. */ + list_of_pools = pool; + } + + /* New pools, and emptied pools, will have a NULL symbol. */ + if (pool->symbol == NULL) + { + pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section, + (valueT) 0, &zero_address_frag); + pool->id = latest_pool_num++; + } + + /* Done. */ + return pool; +} + +/* Add the literal of size SIZE in *EXP to the relevant literal pool. + Return TRUE on success, otherwise return FALSE. */ +static bfd_boolean +add_to_lit_pool (expressionS *exp, int size) +{ + literal_pool *pool; + unsigned int entry; + + pool = find_or_make_literal_pool (size); + + /* Check if this literal value is already in the pool. */ + for (entry = 0; entry < pool->next_free_entry; entry++) + { + if ((pool->literals[entry].X_op == exp->X_op) + && (exp->X_op == O_constant) + && (pool->literals[entry].X_add_number == exp->X_add_number) + && (pool->literals[entry].X_unsigned == exp->X_unsigned)) + break; + + if ((pool->literals[entry].X_op == exp->X_op) + && (exp->X_op == O_symbol) + && (pool->literals[entry].X_add_number == exp->X_add_number) + && (pool->literals[entry].X_add_symbol == exp->X_add_symbol) + && (pool->literals[entry].X_op_symbol == exp->X_op_symbol)) + break; + } + + /* Do we need to create a new entry? */ + if (entry == pool->next_free_entry) + { + if (entry >= MAX_LITERAL_POOL_SIZE) + { + set_syntax_error (_("literal pool overflow")); + return FALSE; + } + + pool->literals[entry] = *exp; + pool->next_free_entry += 1; + } + + exp->X_op = O_symbol; + exp->X_add_number = ((int) entry) * size; + exp->X_add_symbol = pool->symbol; + + return TRUE; +} + +/* Can't use symbol_new here, so have to create a symbol and then at + a later date assign it a value. Thats what these functions do. */ + +static void +symbol_locate (symbolS * symbolP, + const char *name,/* It is copied, the caller can modify. */ + segT segment, /* Segment identifier (SEG_). */ + valueT valu, /* Symbol value. */ + fragS * frag) /* Associated fragment. */ +{ + unsigned int name_length; + char *preserved_copy_of_name; + + name_length = strlen (name) + 1; /* +1 for \0. */ + obstack_grow (¬es, name, name_length); + preserved_copy_of_name = obstack_finish (¬es); + +#ifdef tc_canonicalize_symbol_name + preserved_copy_of_name = + tc_canonicalize_symbol_name (preserved_copy_of_name); +#endif + + S_SET_NAME (symbolP, preserved_copy_of_name); + + S_SET_SEGMENT (symbolP, segment); + S_SET_VALUE (symbolP, valu); + symbol_clear_list_pointers (symbolP); + + symbol_set_frag (symbolP, frag); + + /* Link to end of symbol chain. */ + { + extern int symbol_table_frozen; + + if (symbol_table_frozen) + abort (); + } + + symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP); + + obj_symbol_new_hook (symbolP); + +#ifdef tc_symbol_new_hook + tc_symbol_new_hook (symbolP); +#endif + +#ifdef DEBUG_SYMS + verify_symbol_chain (symbol_rootP, symbol_lastP); +#endif /* DEBUG_SYMS */ +} + + +static void +s_ltorg (int ignored ATTRIBUTE_UNUSED) +{ + unsigned int entry; + literal_pool *pool; + char sym_name[20]; + int align; + + for (align = 2; align < 4; align++) + { + int size = 1 << align; + + pool = find_literal_pool (size); + if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0) + continue; + + mapping_state (MAP_DATA); + + /* Align pool as you have word accesses. + Only make a frag if we have to. */ + if (!need_pass_2) + frag_align (align, 0, 0); + + record_alignment (now_seg, align); + + sprintf (sym_name, "$$lit_\002%x", pool->id); + + symbol_locate (pool->symbol, sym_name, now_seg, + (valueT) frag_now_fix (), frag_now); + symbol_table_insert (pool->symbol); + + for (entry = 0; entry < pool->next_free_entry; entry++) + /* First output the expression in the instruction to the pool. */ + emit_expr (&(pool->literals[entry]), size); /* .word|.xword */ + + /* Mark the pool as empty. */ + pool->next_free_entry = 0; + pool->symbol = NULL; + } +} + +#ifdef OBJ_ELF +/* Forward declarations for functions below, in the MD interface + section. */ +static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int); +static struct reloc_table_entry * find_reloc_table_entry (char **); + +/* Directives: Data. */ +/* N.B. the support for relocation suffix in this directive needs to be + implemented properly. */ + +static void +s_aarch64_elf_cons (int nbytes) +{ + expressionS exp; + +#ifdef md_flush_pending_output + md_flush_pending_output (); +#endif + + if (is_it_end_of_statement ()) + { + demand_empty_rest_of_line (); + return; + } + +#ifdef md_cons_align + md_cons_align (nbytes); +#endif + + mapping_state (MAP_DATA); + do + { + struct reloc_table_entry *reloc; + + expression (&exp); + + if (exp.X_op != O_symbol) + emit_expr (&exp, (unsigned int) nbytes); + else + { + skip_past_char (&input_line_pointer, '#'); + if (skip_past_char (&input_line_pointer, ':')) + { + reloc = find_reloc_table_entry (&input_line_pointer); + if (reloc == NULL) + as_bad (_("unrecognized relocation suffix")); + else + as_bad (_("unimplemented relocation suffix")); + ignore_rest_of_line (); + return; + } + else + emit_expr (&exp, (unsigned int) nbytes); + } + } + while (*input_line_pointer++ == ','); + + /* Put terminator back into stream. */ + input_line_pointer--; + demand_empty_rest_of_line (); +} + +#endif /* OBJ_ELF */ + +/* Output a 32-bit word, but mark as an instruction. */ + +static void +s_aarch64_inst (int ignored ATTRIBUTE_UNUSED) +{ + expressionS exp; + +#ifdef md_flush_pending_output + md_flush_pending_output (); +#endif + + if (is_it_end_of_statement ()) + { + demand_empty_rest_of_line (); + return; + } + + if (!need_pass_2) + frag_align_code (2, 0); +#ifdef OBJ_ELF + mapping_state (MAP_INSN); +#endif + + do + { + expression (&exp); + if (exp.X_op != O_constant) + { + as_bad (_("constant expression required")); + ignore_rest_of_line (); + return; + } + + if (target_big_endian) + { + unsigned int val = exp.X_add_number; + exp.X_add_number = SWAP_32 (val); + } + emit_expr (&exp, 4); + } + while (*input_line_pointer++ == ','); + + /* Put terminator back into stream. */ + input_line_pointer--; + demand_empty_rest_of_line (); +} + +#ifdef OBJ_ELF +/* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */ + +static void +s_tlsdesccall (int ignored ATTRIBUTE_UNUSED) +{ + expressionS exp; + + /* Since we're just labelling the code, there's no need to define a + mapping symbol. */ + expression (&exp); + /* Make sure there is enough room in this frag for the following + blr. This trick only works if the blr follows immediately after + the .tlsdesc directive. */ + frag_grow (4); + fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0, + BFD_RELOC_AARCH64_TLSDESC_CALL); + + demand_empty_rest_of_line (); +} +#endif /* OBJ_ELF */ + +static void s_aarch64_arch (int); +static void s_aarch64_cpu (int); + +/* This table describes all the machine specific pseudo-ops the assembler + has to support. The fields are: + pseudo-op name without dot + function to call to execute this pseudo-op + Integer arg to pass to the function. */ + +const pseudo_typeS md_pseudo_table[] = { + /* Never called because '.req' does not start a line. */ + {"req", s_req, 0}, + {"unreq", s_unreq, 0}, + {"bss", s_bss, 0}, + {"even", s_even, 0}, + {"ltorg", s_ltorg, 0}, + {"pool", s_ltorg, 0}, + {"cpu", s_aarch64_cpu, 0}, + {"arch", s_aarch64_arch, 0}, + {"inst", s_aarch64_inst, 0}, +#ifdef OBJ_ELF + {"tlsdesccall", s_tlsdesccall, 0}, + {"word", s_aarch64_elf_cons, 4}, + {"long", s_aarch64_elf_cons, 4}, + {"xword", s_aarch64_elf_cons, 8}, + {"dword", s_aarch64_elf_cons, 8}, +#endif + {0, 0, 0} +}; + + +/* Check whether STR points to a register name followed by a comma or the + end of line; REG_TYPE indicates which register types are checked + against. Return TRUE if STR is such a register name; otherwise return + FALSE. The function does not intend to produce any diagnostics, but since + the register parser aarch64_reg_parse, which is called by this function, + does produce diagnostics, we call clear_error to clear any diagnostics + that may be generated by aarch64_reg_parse. + Also, the function returns FALSE directly if there is any user error + present at the function entry. This prevents the existing diagnostics + state from being spoiled. + The function currently serves parse_constant_immediate and + parse_big_immediate only. */ +static bfd_boolean +reg_name_p (char *str, aarch64_reg_type reg_type) +{ + int reg; + + /* Prevent the diagnostics state from being spoiled. */ + if (error_p ()) + return FALSE; + + reg = aarch64_reg_parse (&str, reg_type, NULL, NULL); + + /* Clear the parsing error that may be set by the reg parser. */ + clear_error (); + + if (reg == PARSE_FAIL) + return FALSE; + + skip_whitespace (str); + if (*str == ',' || is_end_of_line[(unsigned int) *str]) + return TRUE; + + return FALSE; +} + +/* Parser functions used exclusively in instruction operands. */ + +/* Parse an immediate expression which may not be constant. + + To prevent the expression parser from pushing a register name + into the symbol table as an undefined symbol, firstly a check is + done to find out whether STR is a valid register name followed + by a comma or the end of line. Return FALSE if STR is such a + string. */ + +static bfd_boolean +parse_immediate_expression (char **str, expressionS *exp) +{ + if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V)) + { + set_recoverable_error (_("immediate operand required")); + return FALSE; + } + + my_get_expression (exp, str, GE_OPT_PREFIX, 1); + + if (exp->X_op == O_absent) + { + set_fatal_syntax_error (_("missing immediate expression")); + return FALSE; + } + + return TRUE; +} + +/* Constant immediate-value read function for use in insn parsing. + STR points to the beginning of the immediate (with the optional + leading #); *VAL receives the value. + + Return TRUE on success; otherwise return FALSE. */ + +static bfd_boolean +parse_constant_immediate (char **str, int64_t * val) +{ + expressionS exp; + + if (! parse_immediate_expression (str, &exp)) + return FALSE; + + if (exp.X_op != O_constant) + { + set_syntax_error (_("constant expression required")); + return FALSE; + } + + *val = exp.X_add_number; + return TRUE; +} + +static uint32_t +encode_imm_float_bits (uint32_t imm) +{ + return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */ + | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */ +} + +/* Return TRUE if IMM is a valid floating-point immediate; return FALSE + otherwise. */ +static bfd_boolean +aarch64_imm_float_p (uint32_t imm) +{ + /* 3 32222222 2221111111111 + 1 09876543 21098765432109876543210 + n Eeeeeexx xxxx0000000000000000000 */ + uint32_t e; + + e = (imm >> 30) & 0x1; + if (e == 0) + e = 0x3e000000; + else + e = 0x40000000; + return (imm & 0x7ffff) == 0 /* lower 19 bits are 0 */ + && ((imm & 0x7e000000) == e); /* bits 25-29 = ~ bit 30 */ +} + +/* Note: this accepts the floating-point 0 constant. */ +static bfd_boolean +parse_aarch64_imm_float (char **ccp, int *immed) +{ + char *str = *ccp; + char *fpnum; + LITTLENUM_TYPE words[MAX_LITTLENUMS]; + int found_fpchar = 0; + + skip_past_char (&str, '#'); + + /* We must not accidentally parse an integer as a floating-point number. Make + sure that the value we parse is not an integer by checking for special + characters '.' or 'e'. + FIXME: This is a hack that is not very efficient, but doing better is + tricky because type information isn't in a very usable state at parse + time. */ + fpnum = str; + skip_whitespace (fpnum); + + if (strncmp (fpnum, "0x", 2) == 0) + return FALSE; + else + { + for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++) + if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E') + { + found_fpchar = 1; + break; + } + + if (!found_fpchar) + return FALSE; + } + + if ((str = atof_ieee (str, 's', words)) != NULL) + { + unsigned fpword = 0; + int i; + + /* Our FP word must be 32 bits (single-precision FP). */ + for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++) + { + fpword <<= LITTLENUM_NUMBER_OF_BITS; + fpword |= words[i]; + } + + if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0) + *immed = fpword; + else + goto invalid_fp; + + *ccp = str; + + return TRUE; + } + +invalid_fp: + set_fatal_syntax_error (_("invalid floating-point constant")); + return FALSE; +} + +/* Less-generic immediate-value read function with the possibility of loading + a big (64-bit) immediate, as required by AdvSIMD Modified immediate + instructions. + + To prevent the expression parser from pushing a register name into the + symbol table as an undefined symbol, a check is firstly done to find + out whether STR is a valid register name followed by a comma or the end + of line. Return FALSE if STR is such a register. */ + +static bfd_boolean +parse_big_immediate (char **str, int64_t *imm) +{ + char *ptr = *str; + + if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V)) + { + set_syntax_error (_("immediate operand required")); + return FALSE; + } + + my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1); + + if (inst.reloc.exp.X_op == O_constant) + *imm = inst.reloc.exp.X_add_number; + + *str = ptr; + + return TRUE; +} + +/* Set operand IDX of the *INSTR that needs a GAS internal fixup. + if NEED_LIBOPCODES is non-zero, the fixup will need + assistance from the libopcodes. */ + +static inline void +aarch64_set_gas_internal_fixup (struct reloc *reloc, + const aarch64_opnd_info *operand, + int need_libopcodes_p) +{ + reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP; + reloc->opnd = operand->type; + if (need_libopcodes_p) + reloc->need_libopcodes_p = 1; +}; + +/* Return TRUE if the instruction needs to be fixed up later internally by + the GAS; otherwise return FALSE. */ + +static inline bfd_boolean +aarch64_gas_internal_fixup_p (void) +{ + return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP; +} + +/* Assign the immediate value to the relavant field in *OPERAND if + RELOC->EXP is a constant expression; otherwise, flag that *OPERAND + needs an internal fixup in a later stage. + ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or + IMM.VALUE that may get assigned with the constant. */ +static inline void +assign_imm_if_const_or_fixup_later (struct reloc *reloc, + aarch64_opnd_info *operand, + int addr_off_p, + int need_libopcodes_p, + int skip_p) +{ + if (reloc->exp.X_op == O_constant) + { + if (addr_off_p) + operand->addr.offset.imm = reloc->exp.X_add_number; + else + operand->imm.value = reloc->exp.X_add_number; + reloc->type = BFD_RELOC_UNUSED; + } + else + { + aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p); + /* Tell libopcodes to ignore this operand or not. This is helpful + when one of the operands needs to be fixed up later but we need + libopcodes to check the other operands. */ + operand->skip = skip_p; + } +} + +/* Relocation modifiers. Each entry in the table contains the textual + name for the relocation which may be placed before a symbol used as + a load/store offset, or add immediate. It must be surrounded by a + leading and trailing colon, for example: + + ldr x0, [x1, #:rello:varsym] + add x0, x1, #:rello:varsym */ + +struct reloc_table_entry +{ + const char *name; + int pc_rel; + bfd_reloc_code_real_type adrp_type; + bfd_reloc_code_real_type movw_type; + bfd_reloc_code_real_type add_type; + bfd_reloc_code_real_type ldst_type; +}; + +static struct reloc_table_entry reloc_table[] = { + /* Low 12 bits of absolute address: ADD/i and LDR/STR */ + {"lo12", 0, + 0, + 0, + BFD_RELOC_AARCH64_ADD_LO12, + BFD_RELOC_AARCH64_LDST_LO12}, + + /* Higher 21 bits of pc-relative page offset: ADRP */ + {"pg_hi21", 1, + BFD_RELOC_AARCH64_ADR_HI21_PCREL, + 0, + 0, + 0}, + + /* Higher 21 bits of pc-relative page offset: ADRP, no check */ + {"pg_hi21_nc", 1, + BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, + 0, + 0, + 0}, + + /* Most significant bits 0-15 of unsigned address/value: MOVZ */ + {"abs_g0", 0, + 0, + BFD_RELOC_AARCH64_MOVW_G0, + 0, + 0}, + + /* Most significant bits 0-15 of signed address/value: MOVN/Z */ + {"abs_g0_s", 0, + 0, + BFD_RELOC_AARCH64_MOVW_G0_S, + 0, + 0}, + + /* Less significant bits 0-15 of address/value: MOVK, no check */ + {"abs_g0_nc", 0, + 0, + BFD_RELOC_AARCH64_MOVW_G0_NC, + 0, + 0}, + + /* Most significant bits 16-31 of unsigned address/value: MOVZ */ + {"abs_g1", 0, + 0, + BFD_RELOC_AARCH64_MOVW_G1, + 0, + 0}, + + /* Most significant bits 16-31 of signed address/value: MOVN/Z */ + {"abs_g1_s", 0, + 0, + BFD_RELOC_AARCH64_MOVW_G1_S, + 0, + 0}, + + /* Less significant bits 16-31 of address/value: MOVK, no check */ + {"abs_g1_nc", 0, + 0, + BFD_RELOC_AARCH64_MOVW_G1_NC, + 0, + 0}, + + /* Most significant bits 32-47 of unsigned address/value: MOVZ */ + {"abs_g2", 0, + 0, + BFD_RELOC_AARCH64_MOVW_G2, + 0, + 0}, + + /* Most significant bits 32-47 of signed address/value: MOVN/Z */ + {"abs_g2_s", 0, + 0, + BFD_RELOC_AARCH64_MOVW_G2_S, + 0, + 0}, + + /* Less significant bits 32-47 of address/value: MOVK, no check */ + {"abs_g2_nc", 0, + 0, + BFD_RELOC_AARCH64_MOVW_G2_NC, + 0, + 0}, + + /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */ + {"abs_g3", 0, + 0, + BFD_RELOC_AARCH64_MOVW_G3, + 0, + 0}, + /* Get to the page containing GOT entry for a symbol. */ + {"got", 1, + BFD_RELOC_AARCH64_ADR_GOT_PAGE, + 0, + 0, + 0}, + /* 12 bit offset into the page containing GOT entry for that symbol. */ + {"got_lo12", 0, + 0, + 0, + 0, + BFD_RELOC_AARCH64_LD64_GOT_LO12_NC}, + + /* Get to the page containing GOT TLS entry for a symbol */ + {"tlsgd", 0, + BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, + 0, + 0, + 0}, + + /* 12 bit offset into the page containing GOT TLS entry for a symbol */ + {"tlsgd_lo12", 0, + 0, + 0, + BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, + 0}, + + /* Get to the page containing GOT TLS entry for a symbol */ + {"tlsdesc", 0, + BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE, + 0, + 0, + 0}, + + /* 12 bit offset into the page containing GOT TLS entry for a symbol */ + {"tlsdesc_lo12", 0, + 0, + 0, + BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, + BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC}, + + /* Get to the page containing GOT TLS entry for a symbol */ + {"gottprel", 0, + BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, + 0, + 0, + 0}, + + /* 12 bit offset into the page containing GOT TLS entry for a symbol */ + {"gottprel_lo12", 0, + 0, + 0, + 0, + BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC}, + + /* Get tp offset for a symbol. */ + {"tprel", 0, + 0, + 0, + BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, + 0}, + + /* Get tp offset for a symbol. */ + {"tprel_lo12", 0, + 0, + 0, + BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, + 0}, + + /* Get tp offset for a symbol. */ + {"tprel_hi12", 0, + 0, + 0, + BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, + 0}, + + /* Get tp offset for a symbol. */ + {"tprel_lo12_nc", 0, + 0, + 0, + BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC, + 0}, + + /* Most significant bits 32-47 of address/value: MOVZ. */ + {"tprel_g2", 0, + 0, + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, + 0, + 0}, + + /* Most significant bits 16-31 of address/value: MOVZ. */ + {"tprel_g1", 0, + 0, + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, + 0, + 0}, + + /* Most significant bits 16-31 of address/value: MOVZ, no check. */ + {"tprel_g1_nc", 0, + 0, + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC, + 0, + 0}, + + /* Most significant bits 0-15 of address/value: MOVZ. */ + {"tprel_g0", 0, + 0, + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, + 0, + 0}, + + /* Most significant bits 0-15 of address/value: MOVZ, no check. */ + {"tprel_g0_nc", 0, + 0, + BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC, + 0, + 0}, +}; + +/* Given the address of a pointer pointing to the textual name of a + relocation as may appear in assembler source, attempt to find its + details in reloc_table. The pointer will be updated to the character + after the trailing colon. On failure, NULL will be returned; + otherwise return the reloc_table_entry. */ + +static struct reloc_table_entry * +find_reloc_table_entry (char **str) +{ + unsigned int i; + for (i = 0; i < ARRAY_SIZE (reloc_table); i++) + { + int length = strlen (reloc_table[i].name); + + if (strncasecmp (reloc_table[i].name, *str, length) == 0 + && (*str)[length] == ':') + { + *str += (length + 1); + return &reloc_table[i]; + } + } + + return NULL; +} + +/* Mode argument to parse_shift and parser_shifter_operand. */ +enum parse_shift_mode +{ + SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or + "#imm{,lsl #n}" */ + SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or + "#imm" */ + SHIFTED_LSL, /* bare "lsl #n" */ + SHIFTED_LSL_MSL, /* "lsl|msl #n" */ + SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */ +}; + +/* Parse a operator on an AArch64 data processing instruction. + Return TRUE on success; otherwise return FALSE. */ +static bfd_boolean +parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode) +{ + const struct aarch64_name_value_pair *shift_op; + enum aarch64_modifier_kind kind; + expressionS exp; + int exp_has_prefix; + char *s = *str; + char *p = s; + + for (p = *str; ISALPHA (*p); p++) + ; + + if (p == *str) + { + set_syntax_error (_("shift expression expected")); + return FALSE; + } + + shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str); + + if (shift_op == NULL) + { + set_syntax_error (_("shift operator expected")); + return FALSE; + } + + kind = aarch64_get_operand_modifier (shift_op); + + if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL) + { + set_syntax_error (_("invalid use of 'MSL'")); + return FALSE; + } + + switch (mode) + { + case SHIFTED_LOGIC_IMM: + if (aarch64_extend_operator_p (kind) == TRUE) + { + set_syntax_error (_("extending shift is not permitted")); + return FALSE; + } + break; + + case SHIFTED_ARITH_IMM: + if (kind == AARCH64_MOD_ROR) + { + set_syntax_error (_("'ROR' shift is not permitted")); + return FALSE; + } + break; + + case SHIFTED_LSL: + if (kind != AARCH64_MOD_LSL) + { + set_syntax_error (_("only 'LSL' shift is permitted")); + return FALSE; + } + break; + + case SHIFTED_REG_OFFSET: + if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL + && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX) + { + set_fatal_syntax_error + (_("invalid shift for the register offset addressing mode")); + return FALSE; + } + break; + + case SHIFTED_LSL_MSL: + if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL) + { + set_syntax_error (_("invalid shift operator")); + return FALSE; + } + break; + + default: + abort (); + } + + /* Whitespace can appear here if the next thing is a bare digit. */ + skip_whitespace (p); + + /* Parse shift amount. */ + exp_has_prefix = 0; + if (mode == SHIFTED_REG_OFFSET && *p == ']') + exp.X_op = O_absent; + else + { + if (is_immediate_prefix (*p)) + { + p++; + exp_has_prefix = 1; + } + my_get_expression (&exp, &p, GE_NO_PREFIX, 0); + } + if (exp.X_op == O_absent) + { + if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix) + { + set_syntax_error (_("missing shift amount")); + return FALSE; + } + operand->shifter.amount = 0; + } + else if (exp.X_op != O_constant) + { + set_syntax_error (_("constant shift amount required")); + return FALSE; + } + else if (exp.X_add_number < 0 || exp.X_add_number > 63) + { + set_fatal_syntax_error (_("shift amount out of range 0 to 63")); + return FALSE; + } + else + { + operand->shifter.amount = exp.X_add_number; + operand->shifter.amount_present = 1; + } + + operand->shifter.operator_present = 1; + operand->shifter.kind = kind; + + *str = p; + return TRUE; +} + +/* Parse a for a data processing instruction: + + # + #, LSL #imm + + Validation of immediate operands is deferred to md_apply_fix. + + Return TRUE on success; otherwise return FALSE. */ + +static bfd_boolean +parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand, + enum parse_shift_mode mode) +{ + char *p; + + if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM) + return FALSE; + + p = *str; + + /* Accept an immediate expression. */ + if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1)) + return FALSE; + + /* Accept optional LSL for arithmetic immediate values. */ + if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p)) + if (! parse_shift (&p, operand, SHIFTED_LSL)) + return FALSE; + + /* Not accept any shifter for logical immediate values. */ + if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p) + && parse_shift (&p, operand, mode)) + { + set_syntax_error (_("unexpected shift operator")); + return FALSE; + } + + *str = p; + return TRUE; +} + +/* Parse a for a data processing instruction: + + + , + # + #, LSL #imm + + where is handled by parse_shift above, and the last two + cases are handled by the function above. + + Validation of immediate operands is deferred to md_apply_fix. + + Return TRUE on success; otherwise return FALSE. */ + +static bfd_boolean +parse_shifter_operand (char **str, aarch64_opnd_info *operand, + enum parse_shift_mode mode) +{ + int reg; + int isreg32, isregzero; + enum aarch64_operand_class opd_class + = aarch64_get_operand_class (operand->type); + + if ((reg = + aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL) + { + if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE) + { + set_syntax_error (_("unexpected register in the immediate operand")); + return FALSE; + } + + if (!isregzero && reg == REG_SP) + { + set_syntax_error (BAD_SP); + return FALSE; + } + + operand->reg.regno = reg; + operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X; + + /* Accept optional shift operation on register. */ + if (! skip_past_comma (str)) + return TRUE; + + if (! parse_shift (str, operand, mode)) + return FALSE; + + return TRUE; + } + else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG) + { + set_syntax_error + (_("integer register expected in the extended/shifted operand " + "register")); + return FALSE; + } + + /* We have a shifted immediate variable. */ + return parse_shifter_operand_imm (str, operand, mode); +} + +/* Return TRUE on success; return FALSE otherwise. */ + +static bfd_boolean +parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand, + enum parse_shift_mode mode) +{ + char *p = *str; + + /* Determine if we have the sequence of characters #: or just : + coming next. If we do, then we check for a :rello: relocation + modifier. If we don't, punt the whole lot to + parse_shifter_operand. */ + + if ((p[0] == '#' && p[1] == ':') || p[0] == ':') + { + struct reloc_table_entry *entry; + + if (p[0] == '#') + p += 2; + else + p++; + *str = p; + + /* Try to parse a relocation. Anything else is an error. */ + if (!(entry = find_reloc_table_entry (str))) + { + set_syntax_error (_("unknown relocation modifier")); + return FALSE; + } + + if (entry->add_type == 0) + { + set_syntax_error + (_("this relocation modifier is not allowed on this instruction")); + return FALSE; + } + + /* Save str before we decompose it. */ + p = *str; + + /* Next, we parse the expression. */ + if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1)) + return FALSE; + + /* Record the relocation type (use the ADD variant here). */ + inst.reloc.type = entry->add_type; + inst.reloc.pc_rel = entry->pc_rel; + + /* If str is empty, we've reached the end, stop here. */ + if (**str == '\0') + return TRUE; + + /* Otherwise, we have a shifted reloc modifier, so rewind to + recover the variable name and continue parsing for the shifter. */ + *str = p; + return parse_shifter_operand_imm (str, operand, mode); + } + + return parse_shifter_operand (str, operand, mode); +} + +/* Parse all forms of an address expression. Information is written + to *OPERAND and/or inst.reloc. + + The A64 instruction set has the following addressing modes: + + Offset + [base] // in SIMD ld/st structure + [base{,#0}] // in ld/st exclusive + [base{,#imm}] + [base,Xm{,LSL #imm}] + [base,Xm,SXTX {#imm}] + [base,Wm,(S|U)XTW {#imm}] + Pre-indexed + [base,#imm]! + Post-indexed + [base],#imm + [base],Xm // in SIMD ld/st structure + PC-relative (literal) + label + =immediate + + (As a convenience, the notation "=immediate" is permitted in conjunction + with the pc-relative literal load instructions to automatically place an + immediate value or symbolic address in a nearby literal pool and generate + a hidden label which references it.) + + Upon a successful parsing, the address structure in *OPERAND will be + filled in the following way: + + .base_regno = + .offset.is_reg // 1 if the offset is a register + .offset.imm = + .offset.regno = + + For different addressing modes defined in the A64 ISA: + + Offset + .pcrel=0; .preind=1; .postind=0; .writeback=0 + Pre-indexed + .pcrel=0; .preind=1; .postind=0; .writeback=1 + Post-indexed + .pcrel=0; .preind=0; .postind=1; .writeback=1 + PC-relative (literal) + .pcrel=1; .preind=1; .postind=0; .writeback=0 + + The shift/extension information, if any, will be stored in .shifter. + + It is the caller's responsibility to check for addressing modes not + supported by the instruction, and to set inst.reloc.type. */ + +static bfd_boolean +parse_address_main (char **str, aarch64_opnd_info *operand, int reloc, + int accept_reg_post_index) +{ + char *p = *str; + int reg; + int isreg32, isregzero; + expressionS *exp = &inst.reloc.exp; + + if (! skip_past_char (&p, '[')) + { + /* =immediate or label. */ + operand->addr.pcrel = 1; + operand->addr.preind = 1; + + if (skip_past_char (&p, '=')) + /* =immediate; need to generate the literal in the liternal pool. */ + inst.gen_lit_pool = 1; + + if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1)) + { + set_syntax_error (_("invalid address")); + return FALSE; + } + + *str = p; + return TRUE; + } + + /* [ */ + + /* Accept SP and reject ZR */ + reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero); + if (reg == PARSE_FAIL || isreg32) + { + set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64))); + return FALSE; + } + operand->addr.base_regno = reg; + + /* [Xn */ + if (skip_past_comma (&p)) + { + /* [Xn, */ + operand->addr.preind = 1; + + /* Reject SP and accept ZR */ + reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero); + if (reg != PARSE_FAIL) + { + /* [Xn,Rm */ + operand->addr.offset.regno = reg; + operand->addr.offset.is_reg = 1; + /* Shifted index. */ + if (skip_past_comma (&p)) + { + /* [Xn,Rm, */ + if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET)) + /* Use the diagnostics set in parse_shift, so not set new + error message here. */ + return FALSE; + } + /* We only accept: + [base,Xm{,LSL #imm}] + [base,Xm,SXTX {#imm}] + [base,Wm,(S|U)XTW {#imm}] */ + if (operand->shifter.kind == AARCH64_MOD_NONE + || operand->shifter.kind == AARCH64_MOD_LSL + || operand->shifter.kind == AARCH64_MOD_SXTX) + { + if (isreg32) + { + set_syntax_error (_("invalid use of 32-bit register offset")); + return FALSE; + } + } + else if (!isreg32) + { + set_syntax_error (_("invalid use of 64-bit register offset")); + return FALSE; + } + } + else + { + /* [Xn,#:: */ + skip_past_char (&p, '#'); + if (reloc && skip_past_char (&p, ':')) + { + struct reloc_table_entry *entry; + + /* Try to parse a relocation modifier. Anything else is + an error. */ + if (!(entry = find_reloc_table_entry (&p))) + { + set_syntax_error (_("unknown relocation modifier")); + return FALSE; + } + + if (entry->ldst_type == 0) + { + set_syntax_error + (_("this relocation modifier is not allowed on this " + "instruction")); + return FALSE; + } + + /* [Xn,#:: */ + /* We now have the group relocation table entry corresponding to + the name in the assembler source. Next, we parse the + expression. */ + if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1)) + { + set_syntax_error (_("invalid relocation expression")); + return FALSE; + } + + /* [Xn,#:: */ + /* Record the load/store relocation type. */ + inst.reloc.type = entry->ldst_type; + inst.reloc.pc_rel = entry->pc_rel; + } + else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1)) + { + set_syntax_error (_("invalid expression in the address")); + return FALSE; + } + /* [Xn, */ + } + } + + if (! skip_past_char (&p, ']')) + { + set_syntax_error (_("']' expected")); + return FALSE; + } + + if (skip_past_char (&p, '!')) + { + if (operand->addr.preind && operand->addr.offset.is_reg) + { + set_syntax_error (_("register offset not allowed in pre-indexed " + "addressing mode")); + return FALSE; + } + /* [Xn]! */ + operand->addr.writeback = 1; + } + else if (skip_past_comma (&p)) + { + /* [Xn], */ + operand->addr.postind = 1; + operand->addr.writeback = 1; + + if (operand->addr.preind) + { + set_syntax_error (_("cannot combine pre- and post-indexing")); + return FALSE; + } + + if (accept_reg_post_index + && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32, + &isregzero)) != PARSE_FAIL) + { + /* [Xn],Xm */ + if (isreg32) + { + set_syntax_error (_("invalid 32-bit register offset")); + return FALSE; + } + operand->addr.offset.regno = reg; + operand->addr.offset.is_reg = 1; + } + else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1)) + { + /* [Xn],#expr */ + set_syntax_error (_("invalid expression in the address")); + return FALSE; + } + } + + /* If at this point neither .preind nor .postind is set, we have a + bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */ + if (operand->addr.preind == 0 && operand->addr.postind == 0) + { + if (operand->addr.writeback) + { + /* Reject [Rn]! */ + set_syntax_error (_("missing offset in the pre-indexed address")); + return FALSE; + } + operand->addr.preind = 1; + inst.reloc.exp.X_op = O_constant; + inst.reloc.exp.X_add_number = 0; + } + + *str = p; + return TRUE; +} + +/* Return TRUE on success; otherwise return FALSE. */ +static bfd_boolean +parse_address (char **str, aarch64_opnd_info *operand, + int accept_reg_post_index) +{ + return parse_address_main (str, operand, 0, accept_reg_post_index); +} + +/* Return TRUE on success; otherwise return FALSE. */ +static bfd_boolean +parse_address_reloc (char **str, aarch64_opnd_info *operand) +{ + return parse_address_main (str, operand, 1, 0); +} + +/* Parse an operand for a MOVZ, MOVN or MOVK instruction. + Return TRUE on success; otherwise return FALSE. */ +static bfd_boolean +parse_half (char **str, int *internal_fixup_p) +{ + char *p, *saved; + int dummy; + + p = *str; + skip_past_char (&p, '#'); + + gas_assert (internal_fixup_p); + *internal_fixup_p = 0; + + if (*p == ':') + { + struct reloc_table_entry *entry; + + /* Try to parse a relocation. Anything else is an error. */ + ++p; + if (!(entry = find_reloc_table_entry (&p))) + { + set_syntax_error (_("unknown relocation modifier")); + return FALSE; + } + + if (entry->movw_type == 0) + { + set_syntax_error + (_("this relocation modifier is not allowed on this instruction")); + return FALSE; + } + + inst.reloc.type = entry->movw_type; + } + else + *internal_fixup_p = 1; + + /* Avoid parsing a register as a general symbol. */ + saved = p; + if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL) + return FALSE; + p = saved; + + if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1)) + return FALSE; + + *str = p; + return TRUE; +} + +/* Parse an operand for an ADRP instruction: + ADRP ,