fac027ab7b8e8f02319be2cc32bd1bfd59f929ab
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame stack trace info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* Bits for DEFINED field in vector_type_el. */
118 #define NTA_HASTYPE 1
119 #define NTA_HASINDEX 2
120 #define NTA_HASVARWIDTH 4
121
122 struct vector_type_el
123 {
124 enum vector_el_type type;
125 unsigned char defined;
126 unsigned element_size;
127 unsigned width;
128 int64_t index;
129 };
130
131 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
132
133 struct reloc
134 {
135 bfd_reloc_code_real_type type;
136 expressionS exp;
137 int pc_rel;
138 enum aarch64_opnd opnd;
139 uint32_t flags;
140 unsigned need_libopcodes_p : 1;
141 };
142
143 struct aarch64_instruction
144 {
145 /* libopcodes structure for instruction intermediate representation. */
146 aarch64_inst base;
147 /* Record assembly errors found during the parsing. */
148 aarch64_operand_error parsing_error;
149 /* The condition that appears in the assembly line. */
150 int cond;
151 /* Relocation information (including the GAS internal fixup). */
152 struct reloc reloc;
153 /* Need to generate an immediate in the literal pool. */
154 unsigned gen_lit_pool : 1;
155 };
156
157 typedef struct aarch64_instruction aarch64_instruction;
158
159 static aarch64_instruction inst;
160
161 static bool parse_operands (char *, const aarch64_opcode *);
162 static bool programmer_friendly_fixup (aarch64_instruction *);
163
164 /* If an AARCH64_OPDE_SYNTAX_ERROR has no error string, its first three
165 data fields contain the following information:
166
167 data[0].i:
168 A mask of register types that would have been acceptable as bare
169 operands, outside of a register list. In addition, SEF_DEFAULT_ERROR
170 is set if a general parsing error occured for an operand (that is,
171 an error not related to registers, and having no error string).
172
173 data[1].i:
174 A mask of register types that would have been acceptable inside
175 a register list. In addition, SEF_IN_REGLIST is set if the
176 operand contained a '{' and if we got to the point of trying
177 to parse a register inside a list.
178
179 data[2].i:
180 The mask associated with the register that was actually seen, or 0
181 if none. A nonzero value describes a register inside a register
182 list if data[1].i & SEF_IN_REGLIST, otherwise it describes a bare
183 register.
184
185 The idea is that stringless errors from multiple opcode templates can
186 be ORed together to give a summary of the available alternatives. */
187 #define SEF_DEFAULT_ERROR (1U << 31)
188 #define SEF_IN_REGLIST (1U << 31)
189
190 /* Diagnostics inline function utilities.
191
192 These are lightweight utilities which should only be called by parse_operands
193 and other parsers. GAS processes each assembly line by parsing it against
194 instruction template(s), in the case of multiple templates (for the same
195 mnemonic name), those templates are tried one by one until one succeeds or
196 all fail. An assembly line may fail a few templates before being
197 successfully parsed; an error saved here in most cases is not a user error
198 but an error indicating the current template is not the right template.
199 Therefore it is very important that errors can be saved at a low cost during
200 the parsing; we don't want to slow down the whole parsing by recording
201 non-user errors in detail.
202
203 Remember that the objective is to help GAS pick up the most appropriate
204 error message in the case of multiple templates, e.g. FMOV which has 8
205 templates. */
206
207 static inline void
208 clear_error (void)
209 {
210 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
211 inst.parsing_error.kind = AARCH64_OPDE_NIL;
212 }
213
214 static inline bool
215 error_p (void)
216 {
217 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
218 }
219
220 static inline void
221 set_error (enum aarch64_operand_error_kind kind, const char *error)
222 {
223 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
224 inst.parsing_error.index = -1;
225 inst.parsing_error.kind = kind;
226 inst.parsing_error.error = error;
227 }
228
229 static inline void
230 set_recoverable_error (const char *error)
231 {
232 set_error (AARCH64_OPDE_RECOVERABLE, error);
233 }
234
235 /* Use the DESC field of the corresponding aarch64_operand entry to compose
236 the error message. */
237 static inline void
238 set_default_error (void)
239 {
240 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
241 inst.parsing_error.data[0].i = SEF_DEFAULT_ERROR;
242 }
243
244 static inline void
245 set_expected_error (unsigned int flags)
246 {
247 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
248 inst.parsing_error.data[0].i = flags;
249 }
250
251 static inline void
252 set_syntax_error (const char *error)
253 {
254 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
255 }
256
257 static inline void
258 set_first_syntax_error (const char *error)
259 {
260 if (! error_p ())
261 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
262 }
263
264 static inline void
265 set_fatal_syntax_error (const char *error)
266 {
267 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
268 }
269 \f
270 /* Return value for certain parsers when the parsing fails; those parsers
271 return the information of the parsed result, e.g. register number, on
272 success. */
273 #define PARSE_FAIL -1
274
275 /* This is an invalid condition code that means no conditional field is
276 present. */
277 #define COND_ALWAYS 0x10
278
279 typedef struct
280 {
281 const char *template;
282 uint32_t value;
283 } asm_nzcv;
284
285 struct reloc_entry
286 {
287 char *name;
288 bfd_reloc_code_real_type reloc;
289 };
290
291 /* Macros to define the register types and masks for the purpose
292 of parsing. */
293
294 #undef AARCH64_REG_TYPES
295 #define AARCH64_REG_TYPES \
296 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
297 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
298 BASIC_REG_TYPE(SP_32) /* wsp */ \
299 BASIC_REG_TYPE(SP_64) /* sp */ \
300 BASIC_REG_TYPE(Z_32) /* wzr */ \
301 BASIC_REG_TYPE(Z_64) /* xzr */ \
302 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
303 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
304 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
305 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
306 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
307 BASIC_REG_TYPE(VN) /* v[0-31] */ \
308 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
309 BASIC_REG_TYPE(PN) /* p[0-15] */ \
310 BASIC_REG_TYPE(ZA) /* za */ \
311 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
312 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
313 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
314 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
315 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
316 /* Typecheck: same, plus SVE registers. */ \
317 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
318 | REG_TYPE(ZN)) \
319 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
320 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
322 /* Typecheck: same, plus SVE registers. */ \
323 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
324 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
325 | REG_TYPE(ZN)) \
326 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
327 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
329 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
330 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
331 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
332 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
333 /* Typecheck: any [BHSDQ]P FP. */ \
334 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
335 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
336 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
337 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
338 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
339 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
340 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
341 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
342 be used for SVE instructions, since Zn and Pn are valid symbols \
343 in other contexts. */ \
344 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
345 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
346 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
347 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
348 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
349 | REG_TYPE(ZN) | REG_TYPE(PN)) \
350 /* Any integer register; used for error messages only. */ \
351 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
352 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
353 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
354 /* Any vector register. */ \
355 MULTI_REG_TYPE(VZ, REG_TYPE(VN) | REG_TYPE(ZN)) \
356 /* An SVE vector or predicate register. */ \
357 MULTI_REG_TYPE(ZP, REG_TYPE(ZN) | REG_TYPE(PN)) \
358 /* Any vector or predicate register. */ \
359 MULTI_REG_TYPE(VZP, REG_TYPE(VN) | REG_TYPE(ZN) | REG_TYPE(PN)) \
360 /* The whole of ZA or a single tile. */ \
361 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
362 /* A horizontal or vertical slice of a ZA tile. */ \
363 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
364 /* Pseudo type to mark the end of the enumerator sequence. */ \
365 END_REG_TYPE(MAX)
366
367 #undef BASIC_REG_TYPE
368 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
369 #undef MULTI_REG_TYPE
370 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
371 #undef END_REG_TYPE
372 #define END_REG_TYPE(T) BASIC_REG_TYPE(T)
373
374 /* Register type enumerators. */
375 typedef enum aarch64_reg_type_
376 {
377 /* A list of REG_TYPE_*. */
378 AARCH64_REG_TYPES
379 } aarch64_reg_type;
380
381 #undef BASIC_REG_TYPE
382 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
383 #undef REG_TYPE
384 #define REG_TYPE(T) (1 << REG_TYPE_##T)
385 #undef MULTI_REG_TYPE
386 #define MULTI_REG_TYPE(T,V) V,
387 #undef END_REG_TYPE
388 #define END_REG_TYPE(T) 0
389
390 /* Structure for a hash table entry for a register. */
391 typedef struct
392 {
393 const char *name;
394 unsigned char number;
395 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
396 unsigned char builtin;
397 } reg_entry;
398
399 /* Values indexed by aarch64_reg_type to assist the type checking. */
400 static const unsigned reg_type_masks[] =
401 {
402 AARCH64_REG_TYPES
403 };
404
405 #undef BASIC_REG_TYPE
406 #undef REG_TYPE
407 #undef MULTI_REG_TYPE
408 #undef END_REG_TYPE
409 #undef AARCH64_REG_TYPES
410
411 /* We expected one of the registers in MASK to be specified. If a register
412 of some kind was specified, SEEN is a mask that contains that register,
413 otherwise it is zero.
414
415 If it is possible to provide a relatively pithy message that describes
416 the error exactly, return a string that does so, reporting the error
417 against "operand %d". Return null otherwise.
418
419 From a QoI perspective, any REG_TYPE_* that is passed as the first
420 argument to set_expected_reg_error should generally have its own message.
421 Providing messages for combinations of such REG_TYPE_*s can be useful if
422 it is possible to summarize the combination in a relatively natural way.
423 On the other hand, it seems better to avoid long lists of unrelated
424 things. */
425
426 static const char *
427 get_reg_expected_msg (unsigned int mask, unsigned int seen)
428 {
429 /* First handle messages that use SEEN. */
430 if ((mask & reg_type_masks[REG_TYPE_ZAT])
431 && (seen & reg_type_masks[REG_TYPE_ZATHV]))
432 return N_("expected an unsuffixed ZA tile at operand %d");
433
434 if ((mask & reg_type_masks[REG_TYPE_ZATHV])
435 && (seen & reg_type_masks[REG_TYPE_ZAT]))
436 return N_("missing horizontal or vertical suffix at operand %d");
437
438 if ((mask & reg_type_masks[REG_TYPE_ZA])
439 && (seen & (reg_type_masks[REG_TYPE_ZAT]
440 | reg_type_masks[REG_TYPE_ZATHV])))
441 return N_("expected 'za' rather than a ZA tile at operand %d");
442
443 /* Integer, zero and stack registers. */
444 if (mask == reg_type_masks[REG_TYPE_R_64])
445 return N_("expected a 64-bit integer register at operand %d");
446 if (mask == reg_type_masks[REG_TYPE_R_Z])
447 return N_("expected an integer or zero register at operand %d");
448 if (mask == reg_type_masks[REG_TYPE_R_SP])
449 return N_("expected an integer or stack pointer register at operand %d");
450
451 /* Floating-point and SIMD registers. */
452 if (mask == reg_type_masks[REG_TYPE_BHSDQ])
453 return N_("expected a scalar SIMD or floating-point register"
454 " at operand %d");
455 if (mask == reg_type_masks[REG_TYPE_VN])
456 return N_("expected an Advanced SIMD vector register at operand %d");
457 if (mask == reg_type_masks[REG_TYPE_ZN])
458 return N_("expected an SVE vector register at operand %d");
459 if (mask == reg_type_masks[REG_TYPE_PN])
460 return N_("expected an SVE predicate register at operand %d");
461 if (mask == reg_type_masks[REG_TYPE_VZ])
462 return N_("expected a vector register at operand %d");
463 if (mask == reg_type_masks[REG_TYPE_ZP])
464 return N_("expected an SVE vector or predicate register at operand %d");
465 if (mask == reg_type_masks[REG_TYPE_VZP])
466 return N_("expected a vector or predicate register at operand %d");
467
468 /* ZA-related registers. */
469 if (mask == reg_type_masks[REG_TYPE_ZA])
470 return N_("expected a ZA array vector at operand %d");
471 if (mask == reg_type_masks[REG_TYPE_ZA_ZAT])
472 return N_("expected 'za' or a ZA tile at operand %d");
473 if (mask == reg_type_masks[REG_TYPE_ZAT])
474 return N_("expected a ZA tile at operand %d");
475 if (mask == reg_type_masks[REG_TYPE_ZATHV])
476 return N_("expected a ZA tile slice at operand %d");
477
478 /* Integer and vector combos. */
479 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_VN]))
480 return N_("expected an integer register or Advanced SIMD vector register"
481 " at operand %d");
482 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_ZN]))
483 return N_("expected an integer register or SVE vector register"
484 " at operand %d");
485 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_VZ]))
486 return N_("expected an integer or vector register at operand %d");
487 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_PN]))
488 return N_("expected an integer or predicate register at operand %d");
489 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_VZP]))
490 return N_("expected an integer, vector or predicate register"
491 " at operand %d");
492
493 /* SVE and SME combos. */
494 if (mask == (reg_type_masks[REG_TYPE_ZN] | reg_type_masks[REG_TYPE_ZATHV]))
495 return N_("expected an SVE vector register or ZA tile slice"
496 " at operand %d");
497
498 return NULL;
499 }
500
501 /* Record that we expected a register of type TYPE but didn't see one.
502 REG is the register that we actually saw, or null if we didn't see a
503 recognized register. FLAGS is SEF_IN_REGLIST if we are parsing the
504 contents of a register list, otherwise it is zero. */
505
506 static inline void
507 set_expected_reg_error (aarch64_reg_type type, const reg_entry *reg,
508 unsigned int flags)
509 {
510 assert (flags == 0 || flags == SEF_IN_REGLIST);
511 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
512 if (flags & SEF_IN_REGLIST)
513 inst.parsing_error.data[1].i = reg_type_masks[type] | flags;
514 else
515 inst.parsing_error.data[0].i = reg_type_masks[type];
516 if (reg)
517 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
518 }
519
520 /* Record that we expected a register list containing registers of type TYPE,
521 but didn't see the opening '{'. If we saw a register instead, REG is the
522 register that we saw, otherwise it is null. */
523
524 static inline void
525 set_expected_reglist_error (aarch64_reg_type type, const reg_entry *reg)
526 {
527 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
528 inst.parsing_error.data[1].i = reg_type_masks[type];
529 if (reg)
530 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
531 }
532
533 /* Some well known registers that we refer to directly elsewhere. */
534 #define REG_SP 31
535 #define REG_ZR 31
536
537 /* Instructions take 4 bytes in the object file. */
538 #define INSN_SIZE 4
539
540 static htab_t aarch64_ops_hsh;
541 static htab_t aarch64_cond_hsh;
542 static htab_t aarch64_shift_hsh;
543 static htab_t aarch64_sys_regs_hsh;
544 static htab_t aarch64_pstatefield_hsh;
545 static htab_t aarch64_sys_regs_ic_hsh;
546 static htab_t aarch64_sys_regs_dc_hsh;
547 static htab_t aarch64_sys_regs_at_hsh;
548 static htab_t aarch64_sys_regs_tlbi_hsh;
549 static htab_t aarch64_sys_regs_sr_hsh;
550 static htab_t aarch64_reg_hsh;
551 static htab_t aarch64_barrier_opt_hsh;
552 static htab_t aarch64_nzcv_hsh;
553 static htab_t aarch64_pldop_hsh;
554 static htab_t aarch64_hint_opt_hsh;
555
556 /* Stuff needed to resolve the label ambiguity
557 As:
558 ...
559 label: <insn>
560 may differ from:
561 ...
562 label:
563 <insn> */
564
565 static symbolS *last_label_seen;
566
567 /* Literal pool structure. Held on a per-section
568 and per-sub-section basis. */
569
570 #define MAX_LITERAL_POOL_SIZE 1024
571 typedef struct literal_expression
572 {
573 expressionS exp;
574 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
575 LITTLENUM_TYPE * bignum;
576 } literal_expression;
577
578 typedef struct literal_pool
579 {
580 literal_expression literals[MAX_LITERAL_POOL_SIZE];
581 unsigned int next_free_entry;
582 unsigned int id;
583 symbolS *symbol;
584 segT section;
585 subsegT sub_section;
586 int size;
587 struct literal_pool *next;
588 } literal_pool;
589
590 /* Pointer to a linked list of literal pools. */
591 static literal_pool *list_of_pools = NULL;
592 \f
593 /* Pure syntax. */
594
595 /* This array holds the chars that always start a comment. If the
596 pre-processor is disabled, these aren't very useful. */
597 const char comment_chars[] = "";
598
599 /* This array holds the chars that only start a comment at the beginning of
600 a line. If the line seems to have the form '# 123 filename'
601 .line and .file directives will appear in the pre-processed output. */
602 /* Note that input_file.c hand checks for '#' at the beginning of the
603 first line of the input file. This is because the compiler outputs
604 #NO_APP at the beginning of its output. */
605 /* Also note that comments like this one will always work. */
606 const char line_comment_chars[] = "#";
607
608 const char line_separator_chars[] = ";";
609
610 /* Chars that can be used to separate mant
611 from exp in floating point numbers. */
612 const char EXP_CHARS[] = "eE";
613
614 /* Chars that mean this number is a floating point constant. */
615 /* As in 0f12.456 */
616 /* or 0d1.2345e12 */
617
618 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
619
620 /* Prefix character that indicates the start of an immediate value. */
621 #define is_immediate_prefix(C) ((C) == '#')
622
623 /* Separator character handling. */
624
625 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
626
627 static inline bool
628 skip_past_char (char **str, char c)
629 {
630 if (**str == c)
631 {
632 (*str)++;
633 return true;
634 }
635 else
636 return false;
637 }
638
639 #define skip_past_comma(str) skip_past_char (str, ',')
640
641 /* Arithmetic expressions (possibly involving symbols). */
642
643 static bool in_aarch64_get_expression = false;
644
645 /* Third argument to aarch64_get_expression. */
646 #define GE_NO_PREFIX false
647 #define GE_OPT_PREFIX true
648
649 /* Fourth argument to aarch64_get_expression. */
650 #define ALLOW_ABSENT false
651 #define REJECT_ABSENT true
652
653 /* Return TRUE if the string pointed by *STR is successfully parsed
654 as an valid expression; *EP will be filled with the information of
655 such an expression. Otherwise return FALSE.
656
657 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
658 If REJECT_ABSENT is true then trat missing expressions as an error. */
659
660 static bool
661 aarch64_get_expression (expressionS * ep,
662 char ** str,
663 bool allow_immediate_prefix,
664 bool reject_absent)
665 {
666 char *save_in;
667 segT seg;
668 bool prefix_present = false;
669
670 if (allow_immediate_prefix)
671 {
672 if (is_immediate_prefix (**str))
673 {
674 (*str)++;
675 prefix_present = true;
676 }
677 }
678
679 memset (ep, 0, sizeof (expressionS));
680
681 save_in = input_line_pointer;
682 input_line_pointer = *str;
683 in_aarch64_get_expression = true;
684 seg = expression (ep);
685 in_aarch64_get_expression = false;
686
687 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
688 {
689 /* We found a bad expression in md_operand(). */
690 *str = input_line_pointer;
691 input_line_pointer = save_in;
692 if (prefix_present && ! error_p ())
693 set_fatal_syntax_error (_("bad expression"));
694 else
695 set_first_syntax_error (_("bad expression"));
696 return false;
697 }
698
699 #ifdef OBJ_AOUT
700 if (seg != absolute_section
701 && seg != text_section
702 && seg != data_section
703 && seg != bss_section
704 && seg != undefined_section)
705 {
706 set_syntax_error (_("bad segment"));
707 *str = input_line_pointer;
708 input_line_pointer = save_in;
709 return false;
710 }
711 #else
712 (void) seg;
713 #endif
714
715 *str = input_line_pointer;
716 input_line_pointer = save_in;
717 return true;
718 }
719
720 /* Turn a string in input_line_pointer into a floating point constant
721 of type TYPE, and store the appropriate bytes in *LITP. The number
722 of LITTLENUMS emitted is stored in *SIZEP. An error message is
723 returned, or NULL on OK. */
724
725 const char *
726 md_atof (int type, char *litP, int *sizeP)
727 {
728 return ieee_md_atof (type, litP, sizeP, target_big_endian);
729 }
730
731 /* We handle all bad expressions here, so that we can report the faulty
732 instruction in the error message. */
733 void
734 md_operand (expressionS * exp)
735 {
736 if (in_aarch64_get_expression)
737 exp->X_op = O_illegal;
738 }
739
740 /* Immediate values. */
741
742 /* Errors may be set multiple times during parsing or bit encoding
743 (particularly in the Neon bits), but usually the earliest error which is set
744 will be the most meaningful. Avoid overwriting it with later (cascading)
745 errors by calling this function. */
746
747 static void
748 first_error (const char *error)
749 {
750 if (! error_p ())
751 set_syntax_error (error);
752 }
753
754 /* Similar to first_error, but this function accepts formatted error
755 message. */
756 static void
757 first_error_fmt (const char *format, ...)
758 {
759 va_list args;
760 enum
761 { size = 100 };
762 /* N.B. this single buffer will not cause error messages for different
763 instructions to pollute each other; this is because at the end of
764 processing of each assembly line, error message if any will be
765 collected by as_bad. */
766 static char buffer[size];
767
768 if (! error_p ())
769 {
770 int ret ATTRIBUTE_UNUSED;
771 va_start (args, format);
772 ret = vsnprintf (buffer, size, format, args);
773 know (ret <= size - 1 && ret >= 0);
774 va_end (args);
775 set_syntax_error (buffer);
776 }
777 }
778
779 /* Internal helper routine converting a vector_type_el structure *VECTYPE
780 to a corresponding operand qualifier. */
781
782 static inline aarch64_opnd_qualifier_t
783 vectype_to_qualifier (const struct vector_type_el *vectype)
784 {
785 /* Element size in bytes indexed by vector_el_type. */
786 const unsigned char ele_size[5]
787 = {1, 2, 4, 8, 16};
788 const unsigned int ele_base [5] =
789 {
790 AARCH64_OPND_QLF_V_4B,
791 AARCH64_OPND_QLF_V_2H,
792 AARCH64_OPND_QLF_V_2S,
793 AARCH64_OPND_QLF_V_1D,
794 AARCH64_OPND_QLF_V_1Q
795 };
796
797 if (!vectype->defined || vectype->type == NT_invtype)
798 goto vectype_conversion_fail;
799
800 if (vectype->type == NT_zero)
801 return AARCH64_OPND_QLF_P_Z;
802 if (vectype->type == NT_merge)
803 return AARCH64_OPND_QLF_P_M;
804
805 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
806
807 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
808 {
809 /* Special case S_4B. */
810 if (vectype->type == NT_b && vectype->width == 4)
811 return AARCH64_OPND_QLF_S_4B;
812
813 /* Special case S_2H. */
814 if (vectype->type == NT_h && vectype->width == 2)
815 return AARCH64_OPND_QLF_S_2H;
816
817 /* Vector element register. */
818 return AARCH64_OPND_QLF_S_B + vectype->type;
819 }
820 else
821 {
822 /* Vector register. */
823 int reg_size = ele_size[vectype->type] * vectype->width;
824 unsigned offset;
825 unsigned shift;
826 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
827 goto vectype_conversion_fail;
828
829 /* The conversion is by calculating the offset from the base operand
830 qualifier for the vector type. The operand qualifiers are regular
831 enough that the offset can established by shifting the vector width by
832 a vector-type dependent amount. */
833 shift = 0;
834 if (vectype->type == NT_b)
835 shift = 3;
836 else if (vectype->type == NT_h || vectype->type == NT_s)
837 shift = 2;
838 else if (vectype->type >= NT_d)
839 shift = 1;
840 else
841 gas_assert (0);
842
843 offset = ele_base [vectype->type] + (vectype->width >> shift);
844 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
845 && offset <= AARCH64_OPND_QLF_V_1Q);
846 return offset;
847 }
848
849 vectype_conversion_fail:
850 first_error (_("bad vector arrangement type"));
851 return AARCH64_OPND_QLF_NIL;
852 }
853
854 /* Register parsing. */
855
856 /* Generic register parser which is called by other specialized
857 register parsers.
858 CCP points to what should be the beginning of a register name.
859 If it is indeed a valid register name, advance CCP over it and
860 return the reg_entry structure; otherwise return NULL.
861 It does not issue diagnostics. */
862
863 static reg_entry *
864 parse_reg (char **ccp)
865 {
866 char *start = *ccp;
867 char *p;
868 reg_entry *reg;
869
870 #ifdef REGISTER_PREFIX
871 if (*start != REGISTER_PREFIX)
872 return NULL;
873 start++;
874 #endif
875
876 p = start;
877 if (!ISALPHA (*p) || !is_name_beginner (*p))
878 return NULL;
879
880 do
881 p++;
882 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
883
884 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
885
886 if (!reg)
887 return NULL;
888
889 *ccp = p;
890 return reg;
891 }
892
893 /* Return the operand qualifier associated with all uses of REG, or
894 AARCH64_OPND_QLF_NIL if none. AARCH64_OPND_QLF_NIL means either
895 that qualifiers don't apply to REG or that qualifiers are added
896 using suffixes. */
897
898 static aarch64_opnd_qualifier_t
899 inherent_reg_qualifier (const reg_entry *reg)
900 {
901 switch (reg->type)
902 {
903 case REG_TYPE_R_32:
904 case REG_TYPE_SP_32:
905 case REG_TYPE_Z_32:
906 return AARCH64_OPND_QLF_W;
907
908 case REG_TYPE_R_64:
909 case REG_TYPE_SP_64:
910 case REG_TYPE_Z_64:
911 return AARCH64_OPND_QLF_X;
912
913 case REG_TYPE_FP_B:
914 case REG_TYPE_FP_H:
915 case REG_TYPE_FP_S:
916 case REG_TYPE_FP_D:
917 case REG_TYPE_FP_Q:
918 return AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
919
920 default:
921 return AARCH64_OPND_QLF_NIL;
922 }
923 }
924
925 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
926 return FALSE. */
927 static bool
928 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
929 {
930 return (reg_type_masks[type] & (1 << reg->type)) != 0;
931 }
932
933 /* Try to parse a base or offset register. Allow SVE base and offset
934 registers if REG_TYPE includes SVE registers. Return the register
935 entry on success, setting *QUALIFIER to the register qualifier.
936 Return null otherwise.
937
938 Note that this function does not issue any diagnostics. */
939
940 static const reg_entry *
941 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
942 aarch64_opnd_qualifier_t *qualifier)
943 {
944 char *str = *ccp;
945 const reg_entry *reg = parse_reg (&str);
946
947 if (reg == NULL)
948 return NULL;
949
950 switch (reg->type)
951 {
952 case REG_TYPE_ZN:
953 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
954 || str[0] != '.')
955 return NULL;
956 switch (TOLOWER (str[1]))
957 {
958 case 's':
959 *qualifier = AARCH64_OPND_QLF_S_S;
960 break;
961 case 'd':
962 *qualifier = AARCH64_OPND_QLF_S_D;
963 break;
964 default:
965 return NULL;
966 }
967 str += 2;
968 break;
969
970 default:
971 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
972 return NULL;
973 *qualifier = inherent_reg_qualifier (reg);
974 break;
975 }
976
977 *ccp = str;
978
979 return reg;
980 }
981
982 /* Try to parse a base or offset register. Return the register entry
983 on success, setting *QUALIFIER to the register qualifier. Return null
984 otherwise.
985
986 Note that this function does not issue any diagnostics. */
987
988 static const reg_entry *
989 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
990 {
991 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
992 }
993
994 /* Parse the qualifier of a vector register or vector element of type
995 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
996 succeeds; otherwise return FALSE.
997
998 Accept only one occurrence of:
999 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
1000 b h s d q */
1001 static bool
1002 parse_vector_type_for_operand (aarch64_reg_type reg_type,
1003 struct vector_type_el *parsed_type, char **str)
1004 {
1005 char *ptr = *str;
1006 unsigned width;
1007 unsigned element_size;
1008 enum vector_el_type type;
1009
1010 /* skip '.' */
1011 gas_assert (*ptr == '.');
1012 ptr++;
1013
1014 if (reg_type != REG_TYPE_VN || !ISDIGIT (*ptr))
1015 {
1016 width = 0;
1017 goto elt_size;
1018 }
1019 width = strtoul (ptr, &ptr, 10);
1020 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
1021 {
1022 first_error_fmt (_("bad size %d in vector width specifier"), width);
1023 return false;
1024 }
1025
1026 elt_size:
1027 switch (TOLOWER (*ptr))
1028 {
1029 case 'b':
1030 type = NT_b;
1031 element_size = 8;
1032 break;
1033 case 'h':
1034 type = NT_h;
1035 element_size = 16;
1036 break;
1037 case 's':
1038 type = NT_s;
1039 element_size = 32;
1040 break;
1041 case 'd':
1042 type = NT_d;
1043 element_size = 64;
1044 break;
1045 case 'q':
1046 if (reg_type != REG_TYPE_VN || width == 1)
1047 {
1048 type = NT_q;
1049 element_size = 128;
1050 break;
1051 }
1052 /* fall through. */
1053 default:
1054 if (*ptr != '\0')
1055 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
1056 else
1057 first_error (_("missing element size"));
1058 return false;
1059 }
1060 if (width != 0 && width * element_size != 64
1061 && width * element_size != 128
1062 && !(width == 2 && element_size == 16)
1063 && !(width == 4 && element_size == 8))
1064 {
1065 first_error_fmt (_
1066 ("invalid element size %d and vector size combination %c"),
1067 width, *ptr);
1068 return false;
1069 }
1070 ptr++;
1071
1072 parsed_type->type = type;
1073 parsed_type->width = width;
1074 parsed_type->element_size = element_size;
1075
1076 *str = ptr;
1077
1078 return true;
1079 }
1080
1081 /* *STR contains an SVE zero/merge predication suffix. Parse it into
1082 *PARSED_TYPE and point *STR at the end of the suffix. */
1083
1084 static bool
1085 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
1086 {
1087 char *ptr = *str;
1088
1089 /* Skip '/'. */
1090 gas_assert (*ptr == '/');
1091 ptr++;
1092 switch (TOLOWER (*ptr))
1093 {
1094 case 'z':
1095 parsed_type->type = NT_zero;
1096 break;
1097 case 'm':
1098 parsed_type->type = NT_merge;
1099 break;
1100 default:
1101 if (*ptr != '\0' && *ptr != ',')
1102 first_error_fmt (_("unexpected character `%c' in predication type"),
1103 *ptr);
1104 else
1105 first_error (_("missing predication type"));
1106 return false;
1107 }
1108 parsed_type->width = 0;
1109 *str = ptr + 1;
1110 return true;
1111 }
1112
1113 /* Return true if CH is a valid suffix character for registers of
1114 type TYPE. */
1115
1116 static bool
1117 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1118 {
1119 switch (type)
1120 {
1121 case REG_TYPE_VN:
1122 case REG_TYPE_ZN:
1123 case REG_TYPE_ZA:
1124 case REG_TYPE_ZAT:
1125 case REG_TYPE_ZATH:
1126 case REG_TYPE_ZATV:
1127 return ch == '.';
1128
1129 case REG_TYPE_PN:
1130 return ch == '.' || ch == '/';
1131
1132 default:
1133 return false;
1134 }
1135 }
1136
1137 /* Parse an index expression at *STR, storing it in *IMM on success. */
1138
1139 static bool
1140 parse_index_expression (char **str, int64_t *imm)
1141 {
1142 expressionS exp;
1143
1144 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1145 if (exp.X_op != O_constant)
1146 {
1147 first_error (_("constant expression required"));
1148 return false;
1149 }
1150 *imm = exp.X_add_number;
1151 return true;
1152 }
1153
1154 /* Parse a register of the type TYPE.
1155
1156 Return null if the string pointed to by *CCP is not a valid register
1157 name or the parsed register is not of TYPE.
1158
1159 Otherwise return the register, and optionally return the register
1160 shape and element index information in *TYPEINFO.
1161
1162 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1163
1164 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1165 register index.
1166
1167 FLAGS includes PTR_GOOD_MATCH if we are sufficiently far into parsing
1168 an operand that we can be confident that it is a good match. */
1169
1170 #define PTR_IN_REGLIST (1U << 0)
1171 #define PTR_FULL_REG (1U << 1)
1172 #define PTR_GOOD_MATCH (1U << 2)
1173
1174 static const reg_entry *
1175 parse_typed_reg (char **ccp, aarch64_reg_type type,
1176 struct vector_type_el *typeinfo, unsigned int flags)
1177 {
1178 char *str = *ccp;
1179 bool isalpha = ISALPHA (*str);
1180 const reg_entry *reg = parse_reg (&str);
1181 struct vector_type_el atype;
1182 struct vector_type_el parsetype;
1183 bool is_typed_vecreg = false;
1184 unsigned int err_flags = (flags & PTR_IN_REGLIST) ? SEF_IN_REGLIST : 0;
1185
1186 atype.defined = 0;
1187 atype.type = NT_invtype;
1188 atype.width = -1;
1189 atype.element_size = 0;
1190 atype.index = 0;
1191
1192 if (reg == NULL)
1193 {
1194 if (typeinfo)
1195 *typeinfo = atype;
1196 if (!isalpha && (flags & PTR_IN_REGLIST))
1197 set_fatal_syntax_error (_("syntax error in register list"));
1198 else if (flags & PTR_GOOD_MATCH)
1199 set_fatal_syntax_error (NULL);
1200 else
1201 set_expected_reg_error (type, reg, err_flags);
1202 return NULL;
1203 }
1204
1205 if (! aarch64_check_reg_type (reg, type))
1206 {
1207 DEBUG_TRACE ("reg type check failed");
1208 if (flags & PTR_GOOD_MATCH)
1209 set_fatal_syntax_error (NULL);
1210 else
1211 set_expected_reg_error (type, reg, err_flags);
1212 return NULL;
1213 }
1214 type = reg->type;
1215
1216 if (aarch64_valid_suffix_char_p (reg->type, *str))
1217 {
1218 if (*str == '.')
1219 {
1220 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1221 return NULL;
1222 if ((reg->type == REG_TYPE_ZAT
1223 || reg->type == REG_TYPE_ZATH
1224 || reg->type == REG_TYPE_ZATV)
1225 && reg->number * 8 >= parsetype.element_size)
1226 {
1227 set_syntax_error (_("ZA tile number out of range"));
1228 return NULL;
1229 }
1230 }
1231 else
1232 {
1233 if (!parse_predication_for_operand (&parsetype, &str))
1234 return NULL;
1235 }
1236
1237 /* Register if of the form Vn.[bhsdq]. */
1238 is_typed_vecreg = true;
1239
1240 if (type != REG_TYPE_VN)
1241 {
1242 /* The width is always variable; we don't allow an integer width
1243 to be specified. */
1244 gas_assert (parsetype.width == 0);
1245 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1246 }
1247 else if (parsetype.width == 0)
1248 /* Expect index. In the new scheme we cannot have
1249 Vn.[bhsdq] represent a scalar. Therefore any
1250 Vn.[bhsdq] should have an index following it.
1251 Except in reglists of course. */
1252 atype.defined |= NTA_HASINDEX;
1253 else
1254 atype.defined |= NTA_HASTYPE;
1255
1256 atype.type = parsetype.type;
1257 atype.width = parsetype.width;
1258 }
1259
1260 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1261 {
1262 /* Reject Sn[index] syntax. */
1263 if (!is_typed_vecreg)
1264 {
1265 first_error (_("this type of register can't be indexed"));
1266 return NULL;
1267 }
1268
1269 if (flags & PTR_IN_REGLIST)
1270 {
1271 first_error (_("index not allowed inside register list"));
1272 return NULL;
1273 }
1274
1275 atype.defined |= NTA_HASINDEX;
1276
1277 if (!parse_index_expression (&str, &atype.index))
1278 return NULL;
1279
1280 if (! skip_past_char (&str, ']'))
1281 return NULL;
1282 }
1283 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1284 {
1285 /* Indexed vector register expected. */
1286 first_error (_("indexed vector register expected"));
1287 return NULL;
1288 }
1289
1290 /* A vector reg Vn should be typed or indexed. */
1291 if (type == REG_TYPE_VN && atype.defined == 0)
1292 {
1293 first_error (_("invalid use of vector register"));
1294 }
1295
1296 if (typeinfo)
1297 *typeinfo = atype;
1298
1299 *ccp = str;
1300
1301 return reg;
1302 }
1303
1304 /* Parse register.
1305
1306 Return the register on success; return null otherwise.
1307
1308 If this is a NEON vector register with additional type information, fill
1309 in the struct pointed to by VECTYPE (if non-NULL).
1310
1311 This parser does not handle register lists. */
1312
1313 static const reg_entry *
1314 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1315 struct vector_type_el *vectype)
1316 {
1317 return parse_typed_reg (ccp, type, vectype, 0);
1318 }
1319
1320 static inline bool
1321 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1322 {
1323 return (e1.type == e2.type
1324 && e1.defined == e2.defined
1325 && e1.width == e2.width
1326 && e1.element_size == e2.element_size
1327 && e1.index == e2.index);
1328 }
1329
1330 /* This function parses a list of vector registers of type TYPE.
1331 On success, it returns the parsed register list information in the
1332 following encoded format:
1333
1334 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1335 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1336
1337 The information of the register shape and/or index is returned in
1338 *VECTYPE.
1339
1340 It returns PARSE_FAIL if the register list is invalid.
1341
1342 The list contains one to four registers.
1343 Each register can be one of:
1344 <Vt>.<T>[<index>]
1345 <Vt>.<T>
1346 All <T> should be identical.
1347 All <index> should be identical.
1348 There are restrictions on <Vt> numbers which are checked later
1349 (by reg_list_valid_p). */
1350
1351 static int
1352 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1353 struct vector_type_el *vectype)
1354 {
1355 char *str = *ccp;
1356 int nb_regs;
1357 struct vector_type_el typeinfo, typeinfo_first;
1358 int val, val_range;
1359 int in_range;
1360 int ret_val;
1361 int i;
1362 bool error = false;
1363 bool expect_index = false;
1364 unsigned int ptr_flags = PTR_IN_REGLIST;
1365
1366 if (*str != '{')
1367 {
1368 set_expected_reglist_error (type, parse_reg (&str));
1369 return PARSE_FAIL;
1370 }
1371 str++;
1372
1373 nb_regs = 0;
1374 typeinfo_first.defined = 0;
1375 typeinfo_first.type = NT_invtype;
1376 typeinfo_first.width = -1;
1377 typeinfo_first.element_size = 0;
1378 typeinfo_first.index = 0;
1379 ret_val = 0;
1380 val = -1;
1381 val_range = -1;
1382 in_range = 0;
1383 do
1384 {
1385 if (in_range)
1386 {
1387 str++; /* skip over '-' */
1388 val_range = val;
1389 }
1390 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1391 ptr_flags);
1392 if (!reg)
1393 {
1394 set_first_syntax_error (_("invalid vector register in list"));
1395 error = true;
1396 continue;
1397 }
1398 val = reg->number;
1399 /* reject [bhsd]n */
1400 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1401 {
1402 set_first_syntax_error (_("invalid scalar register in list"));
1403 error = true;
1404 continue;
1405 }
1406
1407 if (typeinfo.defined & NTA_HASINDEX)
1408 expect_index = true;
1409
1410 if (in_range)
1411 {
1412 if (val < val_range)
1413 {
1414 set_first_syntax_error
1415 (_("invalid range in vector register list"));
1416 error = true;
1417 }
1418 val_range++;
1419 }
1420 else
1421 {
1422 val_range = val;
1423 if (nb_regs == 0)
1424 typeinfo_first = typeinfo;
1425 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1426 {
1427 set_first_syntax_error
1428 (_("type mismatch in vector register list"));
1429 error = true;
1430 }
1431 }
1432 if (! error)
1433 for (i = val_range; i <= val; i++)
1434 {
1435 ret_val |= i << (5 * nb_regs);
1436 nb_regs++;
1437 }
1438 in_range = 0;
1439 ptr_flags |= PTR_GOOD_MATCH;
1440 }
1441 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1442
1443 skip_whitespace (str);
1444 if (*str != '}')
1445 {
1446 set_first_syntax_error (_("end of vector register list not found"));
1447 error = true;
1448 }
1449 str++;
1450
1451 skip_whitespace (str);
1452
1453 if (expect_index)
1454 {
1455 if (skip_past_char (&str, '['))
1456 {
1457 if (!parse_index_expression (&str, &typeinfo_first.index))
1458 error = true;
1459 if (! skip_past_char (&str, ']'))
1460 error = true;
1461 }
1462 else
1463 {
1464 set_first_syntax_error (_("expected index"));
1465 error = true;
1466 }
1467 }
1468
1469 if (nb_regs > 4)
1470 {
1471 set_first_syntax_error (_("too many registers in vector register list"));
1472 error = true;
1473 }
1474 else if (nb_regs == 0)
1475 {
1476 set_first_syntax_error (_("empty vector register list"));
1477 error = true;
1478 }
1479
1480 *ccp = str;
1481 if (! error)
1482 *vectype = typeinfo_first;
1483
1484 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1485 }
1486
1487 /* Directives: register aliases. */
1488
1489 static reg_entry *
1490 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1491 {
1492 reg_entry *new;
1493 const char *name;
1494
1495 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1496 {
1497 if (new->builtin)
1498 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1499 str);
1500
1501 /* Only warn about a redefinition if it's not defined as the
1502 same register. */
1503 else if (new->number != number || new->type != type)
1504 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1505
1506 return NULL;
1507 }
1508
1509 name = xstrdup (str);
1510 new = XNEW (reg_entry);
1511
1512 new->name = name;
1513 new->number = number;
1514 new->type = type;
1515 new->builtin = false;
1516
1517 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1518
1519 return new;
1520 }
1521
1522 /* Look for the .req directive. This is of the form:
1523
1524 new_register_name .req existing_register_name
1525
1526 If we find one, or if it looks sufficiently like one that we want to
1527 handle any error here, return TRUE. Otherwise return FALSE. */
1528
1529 static bool
1530 create_register_alias (char *newname, char *p)
1531 {
1532 const reg_entry *old;
1533 char *oldname, *nbuf;
1534 size_t nlen;
1535
1536 /* The input scrubber ensures that whitespace after the mnemonic is
1537 collapsed to single spaces. */
1538 oldname = p;
1539 if (!startswith (oldname, " .req "))
1540 return false;
1541
1542 oldname += 6;
1543 if (*oldname == '\0')
1544 return false;
1545
1546 old = str_hash_find (aarch64_reg_hsh, oldname);
1547 if (!old)
1548 {
1549 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1550 return true;
1551 }
1552
1553 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1554 the desired alias name, and p points to its end. If not, then
1555 the desired alias name is in the global original_case_string. */
1556 #ifdef TC_CASE_SENSITIVE
1557 nlen = p - newname;
1558 #else
1559 newname = original_case_string;
1560 nlen = strlen (newname);
1561 #endif
1562
1563 nbuf = xmemdup0 (newname, nlen);
1564
1565 /* Create aliases under the new name as stated; an all-lowercase
1566 version of the new name; and an all-uppercase version of the new
1567 name. */
1568 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1569 {
1570 for (p = nbuf; *p; p++)
1571 *p = TOUPPER (*p);
1572
1573 if (strncmp (nbuf, newname, nlen))
1574 {
1575 /* If this attempt to create an additional alias fails, do not bother
1576 trying to create the all-lower case alias. We will fail and issue
1577 a second, duplicate error message. This situation arises when the
1578 programmer does something like:
1579 foo .req r0
1580 Foo .req r1
1581 The second .req creates the "Foo" alias but then fails to create
1582 the artificial FOO alias because it has already been created by the
1583 first .req. */
1584 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1585 {
1586 free (nbuf);
1587 return true;
1588 }
1589 }
1590
1591 for (p = nbuf; *p; p++)
1592 *p = TOLOWER (*p);
1593
1594 if (strncmp (nbuf, newname, nlen))
1595 insert_reg_alias (nbuf, old->number, old->type);
1596 }
1597
1598 free (nbuf);
1599 return true;
1600 }
1601
1602 /* Should never be called, as .req goes between the alias and the
1603 register name, not at the beginning of the line. */
1604 static void
1605 s_req (int a ATTRIBUTE_UNUSED)
1606 {
1607 as_bad (_("invalid syntax for .req directive"));
1608 }
1609
1610 /* The .unreq directive deletes an alias which was previously defined
1611 by .req. For example:
1612
1613 my_alias .req r11
1614 .unreq my_alias */
1615
1616 static void
1617 s_unreq (int a ATTRIBUTE_UNUSED)
1618 {
1619 char *name;
1620 char saved_char;
1621
1622 name = input_line_pointer;
1623 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1624 saved_char = *input_line_pointer;
1625 *input_line_pointer = 0;
1626
1627 if (!*name)
1628 as_bad (_("invalid syntax for .unreq directive"));
1629 else
1630 {
1631 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1632
1633 if (!reg)
1634 as_bad (_("unknown register alias '%s'"), name);
1635 else if (reg->builtin)
1636 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1637 name);
1638 else
1639 {
1640 char *p;
1641 char *nbuf;
1642
1643 str_hash_delete (aarch64_reg_hsh, name);
1644 free ((char *) reg->name);
1645 free (reg);
1646
1647 /* Also locate the all upper case and all lower case versions.
1648 Do not complain if we cannot find one or the other as it
1649 was probably deleted above. */
1650
1651 nbuf = strdup (name);
1652 for (p = nbuf; *p; p++)
1653 *p = TOUPPER (*p);
1654 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1655 if (reg)
1656 {
1657 str_hash_delete (aarch64_reg_hsh, nbuf);
1658 free ((char *) reg->name);
1659 free (reg);
1660 }
1661
1662 for (p = nbuf; *p; p++)
1663 *p = TOLOWER (*p);
1664 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1665 if (reg)
1666 {
1667 str_hash_delete (aarch64_reg_hsh, nbuf);
1668 free ((char *) reg->name);
1669 free (reg);
1670 }
1671
1672 free (nbuf);
1673 }
1674 }
1675
1676 *input_line_pointer = saved_char;
1677 demand_empty_rest_of_line ();
1678 }
1679
1680 /* Directives: Instruction set selection. */
1681
1682 #if defined OBJ_ELF || defined OBJ_COFF
1683 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1684 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1685 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1686 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1687
1688 /* Create a new mapping symbol for the transition to STATE. */
1689
1690 static void
1691 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1692 {
1693 symbolS *symbolP;
1694 const char *symname;
1695 int type;
1696
1697 switch (state)
1698 {
1699 case MAP_DATA:
1700 symname = "$d";
1701 type = BSF_NO_FLAGS;
1702 break;
1703 case MAP_INSN:
1704 symname = "$x";
1705 type = BSF_NO_FLAGS;
1706 break;
1707 default:
1708 abort ();
1709 }
1710
1711 symbolP = symbol_new (symname, now_seg, frag, value);
1712 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1713
1714 /* Save the mapping symbols for future reference. Also check that
1715 we do not place two mapping symbols at the same offset within a
1716 frag. We'll handle overlap between frags in
1717 check_mapping_symbols.
1718
1719 If .fill or other data filling directive generates zero sized data,
1720 the mapping symbol for the following code will have the same value
1721 as the one generated for the data filling directive. In this case,
1722 we replace the old symbol with the new one at the same address. */
1723 if (value == 0)
1724 {
1725 if (frag->tc_frag_data.first_map != NULL)
1726 {
1727 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1728 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1729 &symbol_lastP);
1730 }
1731 frag->tc_frag_data.first_map = symbolP;
1732 }
1733 if (frag->tc_frag_data.last_map != NULL)
1734 {
1735 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1736 S_GET_VALUE (symbolP));
1737 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1738 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1739 &symbol_lastP);
1740 }
1741 frag->tc_frag_data.last_map = symbolP;
1742 }
1743
1744 /* We must sometimes convert a region marked as code to data during
1745 code alignment, if an odd number of bytes have to be padded. The
1746 code mapping symbol is pushed to an aligned address. */
1747
1748 static void
1749 insert_data_mapping_symbol (enum mstate state,
1750 valueT value, fragS * frag, offsetT bytes)
1751 {
1752 /* If there was already a mapping symbol, remove it. */
1753 if (frag->tc_frag_data.last_map != NULL
1754 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1755 frag->fr_address + value)
1756 {
1757 symbolS *symp = frag->tc_frag_data.last_map;
1758
1759 if (value == 0)
1760 {
1761 know (frag->tc_frag_data.first_map == symp);
1762 frag->tc_frag_data.first_map = NULL;
1763 }
1764 frag->tc_frag_data.last_map = NULL;
1765 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1766 }
1767
1768 make_mapping_symbol (MAP_DATA, value, frag);
1769 make_mapping_symbol (state, value + bytes, frag);
1770 }
1771
1772 static void mapping_state_2 (enum mstate state, int max_chars);
1773
1774 /* Set the mapping state to STATE. Only call this when about to
1775 emit some STATE bytes to the file. */
1776
1777 void
1778 mapping_state (enum mstate state)
1779 {
1780 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1781
1782 if (state == MAP_INSN)
1783 /* AArch64 instructions require 4-byte alignment. When emitting
1784 instructions into any section, record the appropriate section
1785 alignment. */
1786 record_alignment (now_seg, 2);
1787
1788 if (mapstate == state)
1789 /* The mapping symbol has already been emitted.
1790 There is nothing else to do. */
1791 return;
1792
1793 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1794 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1795 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1796 evaluated later in the next else. */
1797 return;
1798 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1799 {
1800 /* Only add the symbol if the offset is > 0:
1801 if we're at the first frag, check it's size > 0;
1802 if we're not at the first frag, then for sure
1803 the offset is > 0. */
1804 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1805 const int add_symbol = (frag_now != frag_first)
1806 || (frag_now_fix () > 0);
1807
1808 if (add_symbol)
1809 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1810 }
1811 #undef TRANSITION
1812
1813 mapping_state_2 (state, 0);
1814 }
1815
1816 /* Same as mapping_state, but MAX_CHARS bytes have already been
1817 allocated. Put the mapping symbol that far back. */
1818
1819 static void
1820 mapping_state_2 (enum mstate state, int max_chars)
1821 {
1822 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1823
1824 if (!SEG_NORMAL (now_seg))
1825 return;
1826
1827 if (mapstate == state)
1828 /* The mapping symbol has already been emitted.
1829 There is nothing else to do. */
1830 return;
1831
1832 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1833 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1834 }
1835 #else
1836 #define mapping_state(x) /* nothing */
1837 #define mapping_state_2(x, y) /* nothing */
1838 #endif
1839
1840 /* Directives: sectioning and alignment. */
1841
1842 static void
1843 s_bss (int ignore ATTRIBUTE_UNUSED)
1844 {
1845 /* We don't support putting frags in the BSS segment, we fake it by
1846 marking in_bss, then looking at s_skip for clues. */
1847 subseg_set (bss_section, 0);
1848 demand_empty_rest_of_line ();
1849 mapping_state (MAP_DATA);
1850 }
1851
1852 static void
1853 s_even (int ignore ATTRIBUTE_UNUSED)
1854 {
1855 /* Never make frag if expect extra pass. */
1856 if (!need_pass_2)
1857 frag_align (1, 0, 0);
1858
1859 record_alignment (now_seg, 1);
1860
1861 demand_empty_rest_of_line ();
1862 }
1863
1864 /* Directives: Literal pools. */
1865
1866 static literal_pool *
1867 find_literal_pool (int size)
1868 {
1869 literal_pool *pool;
1870
1871 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1872 {
1873 if (pool->section == now_seg
1874 && pool->sub_section == now_subseg && pool->size == size)
1875 break;
1876 }
1877
1878 return pool;
1879 }
1880
1881 static literal_pool *
1882 find_or_make_literal_pool (int size)
1883 {
1884 /* Next literal pool ID number. */
1885 static unsigned int latest_pool_num = 1;
1886 literal_pool *pool;
1887
1888 pool = find_literal_pool (size);
1889
1890 if (pool == NULL)
1891 {
1892 /* Create a new pool. */
1893 pool = XNEW (literal_pool);
1894 if (!pool)
1895 return NULL;
1896
1897 /* Currently we always put the literal pool in the current text
1898 section. If we were generating "small" model code where we
1899 knew that all code and initialised data was within 1MB then
1900 we could output literals to mergeable, read-only data
1901 sections. */
1902
1903 pool->next_free_entry = 0;
1904 pool->section = now_seg;
1905 pool->sub_section = now_subseg;
1906 pool->size = size;
1907 pool->next = list_of_pools;
1908 pool->symbol = NULL;
1909
1910 /* Add it to the list. */
1911 list_of_pools = pool;
1912 }
1913
1914 /* New pools, and emptied pools, will have a NULL symbol. */
1915 if (pool->symbol == NULL)
1916 {
1917 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1918 &zero_address_frag, 0);
1919 pool->id = latest_pool_num++;
1920 }
1921
1922 /* Done. */
1923 return pool;
1924 }
1925
1926 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1927 Return TRUE on success, otherwise return FALSE. */
1928 static bool
1929 add_to_lit_pool (expressionS *exp, int size)
1930 {
1931 literal_pool *pool;
1932 unsigned int entry;
1933
1934 pool = find_or_make_literal_pool (size);
1935
1936 /* Check if this literal value is already in the pool. */
1937 for (entry = 0; entry < pool->next_free_entry; entry++)
1938 {
1939 expressionS * litexp = & pool->literals[entry].exp;
1940
1941 if ((litexp->X_op == exp->X_op)
1942 && (exp->X_op == O_constant)
1943 && (litexp->X_add_number == exp->X_add_number)
1944 && (litexp->X_unsigned == exp->X_unsigned))
1945 break;
1946
1947 if ((litexp->X_op == exp->X_op)
1948 && (exp->X_op == O_symbol)
1949 && (litexp->X_add_number == exp->X_add_number)
1950 && (litexp->X_add_symbol == exp->X_add_symbol)
1951 && (litexp->X_op_symbol == exp->X_op_symbol))
1952 break;
1953 }
1954
1955 /* Do we need to create a new entry? */
1956 if (entry == pool->next_free_entry)
1957 {
1958 if (entry >= MAX_LITERAL_POOL_SIZE)
1959 {
1960 set_syntax_error (_("literal pool overflow"));
1961 return false;
1962 }
1963
1964 pool->literals[entry].exp = *exp;
1965 pool->next_free_entry += 1;
1966 if (exp->X_op == O_big)
1967 {
1968 /* PR 16688: Bignums are held in a single global array. We must
1969 copy and preserve that value now, before it is overwritten. */
1970 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1971 exp->X_add_number);
1972 memcpy (pool->literals[entry].bignum, generic_bignum,
1973 CHARS_PER_LITTLENUM * exp->X_add_number);
1974 }
1975 else
1976 pool->literals[entry].bignum = NULL;
1977 }
1978
1979 exp->X_op = O_symbol;
1980 exp->X_add_number = ((int) entry) * size;
1981 exp->X_add_symbol = pool->symbol;
1982
1983 return true;
1984 }
1985
1986 /* Can't use symbol_new here, so have to create a symbol and then at
1987 a later date assign it a value. That's what these functions do. */
1988
1989 static void
1990 symbol_locate (symbolS * symbolP,
1991 const char *name,/* It is copied, the caller can modify. */
1992 segT segment, /* Segment identifier (SEG_<something>). */
1993 valueT valu, /* Symbol value. */
1994 fragS * frag) /* Associated fragment. */
1995 {
1996 size_t name_length;
1997 char *preserved_copy_of_name;
1998
1999 name_length = strlen (name) + 1; /* +1 for \0. */
2000 obstack_grow (&notes, name, name_length);
2001 preserved_copy_of_name = obstack_finish (&notes);
2002
2003 #ifdef tc_canonicalize_symbol_name
2004 preserved_copy_of_name =
2005 tc_canonicalize_symbol_name (preserved_copy_of_name);
2006 #endif
2007
2008 S_SET_NAME (symbolP, preserved_copy_of_name);
2009
2010 S_SET_SEGMENT (symbolP, segment);
2011 S_SET_VALUE (symbolP, valu);
2012 symbol_clear_list_pointers (symbolP);
2013
2014 symbol_set_frag (symbolP, frag);
2015
2016 /* Link to end of symbol chain. */
2017 {
2018 extern int symbol_table_frozen;
2019
2020 if (symbol_table_frozen)
2021 abort ();
2022 }
2023
2024 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
2025
2026 obj_symbol_new_hook (symbolP);
2027
2028 #ifdef tc_symbol_new_hook
2029 tc_symbol_new_hook (symbolP);
2030 #endif
2031
2032 #ifdef DEBUG_SYMS
2033 verify_symbol_chain (symbol_rootP, symbol_lastP);
2034 #endif /* DEBUG_SYMS */
2035 }
2036
2037
2038 static void
2039 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2040 {
2041 unsigned int entry;
2042 literal_pool *pool;
2043 char sym_name[20];
2044 int align;
2045
2046 for (align = 2; align <= 4; align++)
2047 {
2048 int size = 1 << align;
2049
2050 pool = find_literal_pool (size);
2051 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
2052 continue;
2053
2054 /* Align pool as you have word accesses.
2055 Only make a frag if we have to. */
2056 if (!need_pass_2)
2057 frag_align (align, 0, 0);
2058
2059 mapping_state (MAP_DATA);
2060
2061 record_alignment (now_seg, align);
2062
2063 sprintf (sym_name, "$$lit_\002%x", pool->id);
2064
2065 symbol_locate (pool->symbol, sym_name, now_seg,
2066 (valueT) frag_now_fix (), frag_now);
2067 symbol_table_insert (pool->symbol);
2068
2069 for (entry = 0; entry < pool->next_free_entry; entry++)
2070 {
2071 expressionS * exp = & pool->literals[entry].exp;
2072
2073 if (exp->X_op == O_big)
2074 {
2075 /* PR 16688: Restore the global bignum value. */
2076 gas_assert (pool->literals[entry].bignum != NULL);
2077 memcpy (generic_bignum, pool->literals[entry].bignum,
2078 CHARS_PER_LITTLENUM * exp->X_add_number);
2079 }
2080
2081 /* First output the expression in the instruction to the pool. */
2082 emit_expr (exp, size); /* .word|.xword */
2083
2084 if (exp->X_op == O_big)
2085 {
2086 free (pool->literals[entry].bignum);
2087 pool->literals[entry].bignum = NULL;
2088 }
2089 }
2090
2091 /* Mark the pool as empty. */
2092 pool->next_free_entry = 0;
2093 pool->symbol = NULL;
2094 }
2095 }
2096
2097 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2098 /* Forward declarations for functions below, in the MD interface
2099 section. */
2100 static struct reloc_table_entry * find_reloc_table_entry (char **);
2101
2102 /* Directives: Data. */
2103 /* N.B. the support for relocation suffix in this directive needs to be
2104 implemented properly. */
2105
2106 static void
2107 s_aarch64_cons (int nbytes)
2108 {
2109 expressionS exp;
2110
2111 #ifdef md_flush_pending_output
2112 md_flush_pending_output ();
2113 #endif
2114
2115 if (is_it_end_of_statement ())
2116 {
2117 demand_empty_rest_of_line ();
2118 return;
2119 }
2120
2121 #ifdef md_cons_align
2122 md_cons_align (nbytes);
2123 #endif
2124
2125 mapping_state (MAP_DATA);
2126 do
2127 {
2128 struct reloc_table_entry *reloc;
2129
2130 expression (&exp);
2131
2132 if (exp.X_op != O_symbol)
2133 emit_expr (&exp, (unsigned int) nbytes);
2134 else
2135 {
2136 skip_past_char (&input_line_pointer, '#');
2137 if (skip_past_char (&input_line_pointer, ':'))
2138 {
2139 reloc = find_reloc_table_entry (&input_line_pointer);
2140 if (reloc == NULL)
2141 as_bad (_("unrecognized relocation suffix"));
2142 else
2143 as_bad (_("unimplemented relocation suffix"));
2144 ignore_rest_of_line ();
2145 return;
2146 }
2147 else
2148 emit_expr (&exp, (unsigned int) nbytes);
2149 }
2150 }
2151 while (*input_line_pointer++ == ',');
2152
2153 /* Put terminator back into stream. */
2154 input_line_pointer--;
2155 demand_empty_rest_of_line ();
2156 }
2157 #endif
2158
2159 #ifdef OBJ_ELF
2160 /* Forward declarations for functions below, in the MD interface
2161 section. */
2162 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2163
2164 /* Mark symbol that it follows a variant PCS convention. */
2165
2166 static void
2167 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2168 {
2169 char *name;
2170 char c;
2171 symbolS *sym;
2172 asymbol *bfdsym;
2173 elf_symbol_type *elfsym;
2174
2175 c = get_symbol_name (&name);
2176 if (!*name)
2177 as_bad (_("Missing symbol name in directive"));
2178 sym = symbol_find_or_make (name);
2179 restore_line_pointer (c);
2180 demand_empty_rest_of_line ();
2181 bfdsym = symbol_get_bfdsym (sym);
2182 elfsym = elf_symbol_from (bfdsym);
2183 gas_assert (elfsym);
2184 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2185 }
2186 #endif /* OBJ_ELF */
2187
2188 /* Output a 32-bit word, but mark as an instruction. */
2189
2190 static void
2191 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2192 {
2193 expressionS exp;
2194 unsigned n = 0;
2195
2196 #ifdef md_flush_pending_output
2197 md_flush_pending_output ();
2198 #endif
2199
2200 if (is_it_end_of_statement ())
2201 {
2202 demand_empty_rest_of_line ();
2203 return;
2204 }
2205
2206 /* Sections are assumed to start aligned. In executable section, there is no
2207 MAP_DATA symbol pending. So we only align the address during
2208 MAP_DATA --> MAP_INSN transition.
2209 For other sections, this is not guaranteed. */
2210 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2211 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2212 frag_align_code (2, 0);
2213
2214 #ifdef OBJ_ELF
2215 mapping_state (MAP_INSN);
2216 #endif
2217
2218 do
2219 {
2220 expression (&exp);
2221 if (exp.X_op != O_constant)
2222 {
2223 as_bad (_("constant expression required"));
2224 ignore_rest_of_line ();
2225 return;
2226 }
2227
2228 if (target_big_endian)
2229 {
2230 unsigned int val = exp.X_add_number;
2231 exp.X_add_number = SWAP_32 (val);
2232 }
2233 emit_expr (&exp, INSN_SIZE);
2234 ++n;
2235 }
2236 while (*input_line_pointer++ == ',');
2237
2238 dwarf2_emit_insn (n * INSN_SIZE);
2239
2240 /* Put terminator back into stream. */
2241 input_line_pointer--;
2242 demand_empty_rest_of_line ();
2243 }
2244
2245 static void
2246 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2247 {
2248 demand_empty_rest_of_line ();
2249 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2250 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2251 }
2252
2253 #ifdef OBJ_ELF
2254 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2255
2256 static void
2257 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2258 {
2259 expressionS exp;
2260
2261 expression (&exp);
2262 frag_grow (4);
2263 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2264 BFD_RELOC_AARCH64_TLSDESC_ADD);
2265
2266 demand_empty_rest_of_line ();
2267 }
2268
2269 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2270
2271 static void
2272 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2273 {
2274 expressionS exp;
2275
2276 /* Since we're just labelling the code, there's no need to define a
2277 mapping symbol. */
2278 expression (&exp);
2279 /* Make sure there is enough room in this frag for the following
2280 blr. This trick only works if the blr follows immediately after
2281 the .tlsdesc directive. */
2282 frag_grow (4);
2283 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2284 BFD_RELOC_AARCH64_TLSDESC_CALL);
2285
2286 demand_empty_rest_of_line ();
2287 }
2288
2289 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2290
2291 static void
2292 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2293 {
2294 expressionS exp;
2295
2296 expression (&exp);
2297 frag_grow (4);
2298 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2299 BFD_RELOC_AARCH64_TLSDESC_LDR);
2300
2301 demand_empty_rest_of_line ();
2302 }
2303 #endif /* OBJ_ELF */
2304
2305 #ifdef TE_PE
2306 static void
2307 s_secrel (int dummy ATTRIBUTE_UNUSED)
2308 {
2309 expressionS exp;
2310
2311 do
2312 {
2313 expression (&exp);
2314 if (exp.X_op == O_symbol)
2315 exp.X_op = O_secrel;
2316
2317 emit_expr (&exp, 4);
2318 }
2319 while (*input_line_pointer++ == ',');
2320
2321 input_line_pointer--;
2322 demand_empty_rest_of_line ();
2323 }
2324
2325 void
2326 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2327 {
2328 expressionS exp;
2329
2330 exp.X_op = O_secrel;
2331 exp.X_add_symbol = symbol;
2332 exp.X_add_number = 0;
2333 emit_expr (&exp, size);
2334 }
2335
2336 static void
2337 s_secidx (int dummy ATTRIBUTE_UNUSED)
2338 {
2339 expressionS exp;
2340
2341 do
2342 {
2343 expression (&exp);
2344 if (exp.X_op == O_symbol)
2345 exp.X_op = O_secidx;
2346
2347 emit_expr (&exp, 2);
2348 }
2349 while (*input_line_pointer++ == ',');
2350
2351 input_line_pointer--;
2352 demand_empty_rest_of_line ();
2353 }
2354 #endif /* TE_PE */
2355
2356 static void s_aarch64_arch (int);
2357 static void s_aarch64_cpu (int);
2358 static void s_aarch64_arch_extension (int);
2359
2360 /* This table describes all the machine specific pseudo-ops the assembler
2361 has to support. The fields are:
2362 pseudo-op name without dot
2363 function to call to execute this pseudo-op
2364 Integer arg to pass to the function. */
2365
2366 const pseudo_typeS md_pseudo_table[] = {
2367 /* Never called because '.req' does not start a line. */
2368 {"req", s_req, 0},
2369 {"unreq", s_unreq, 0},
2370 {"bss", s_bss, 0},
2371 {"even", s_even, 0},
2372 {"ltorg", s_ltorg, 0},
2373 {"pool", s_ltorg, 0},
2374 {"cpu", s_aarch64_cpu, 0},
2375 {"arch", s_aarch64_arch, 0},
2376 {"arch_extension", s_aarch64_arch_extension, 0},
2377 {"inst", s_aarch64_inst, 0},
2378 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2379 #ifdef OBJ_ELF
2380 {"tlsdescadd", s_tlsdescadd, 0},
2381 {"tlsdesccall", s_tlsdesccall, 0},
2382 {"tlsdescldr", s_tlsdescldr, 0},
2383 {"variant_pcs", s_variant_pcs, 0},
2384 #endif
2385 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2386 {"word", s_aarch64_cons, 4},
2387 {"long", s_aarch64_cons, 4},
2388 {"xword", s_aarch64_cons, 8},
2389 {"dword", s_aarch64_cons, 8},
2390 #endif
2391 #ifdef TE_PE
2392 {"secrel32", s_secrel, 0},
2393 {"secidx", s_secidx, 0},
2394 #endif
2395 {"float16", float_cons, 'h'},
2396 {"bfloat16", float_cons, 'b'},
2397 {0, 0, 0}
2398 };
2399 \f
2400
2401 /* Check whether STR points to a register name followed by a comma or the
2402 end of line; REG_TYPE indicates which register types are checked
2403 against. Return TRUE if STR is such a register name; otherwise return
2404 FALSE. The function does not intend to produce any diagnostics, but since
2405 the register parser aarch64_reg_parse, which is called by this function,
2406 does produce diagnostics, we call clear_error to clear any diagnostics
2407 that may be generated by aarch64_reg_parse.
2408 Also, the function returns FALSE directly if there is any user error
2409 present at the function entry. This prevents the existing diagnostics
2410 state from being spoiled.
2411 The function currently serves parse_constant_immediate and
2412 parse_big_immediate only. */
2413 static bool
2414 reg_name_p (char *str, aarch64_reg_type reg_type)
2415 {
2416 const reg_entry *reg;
2417
2418 /* Prevent the diagnostics state from being spoiled. */
2419 if (error_p ())
2420 return false;
2421
2422 reg = aarch64_reg_parse (&str, reg_type, NULL);
2423
2424 /* Clear the parsing error that may be set by the reg parser. */
2425 clear_error ();
2426
2427 if (!reg)
2428 return false;
2429
2430 skip_whitespace (str);
2431 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2432 return true;
2433
2434 return false;
2435 }
2436
2437 /* Parser functions used exclusively in instruction operands. */
2438
2439 /* Parse an immediate expression which may not be constant.
2440
2441 To prevent the expression parser from pushing a register name
2442 into the symbol table as an undefined symbol, firstly a check is
2443 done to find out whether STR is a register of type REG_TYPE followed
2444 by a comma or the end of line. Return FALSE if STR is such a string. */
2445
2446 static bool
2447 parse_immediate_expression (char **str, expressionS *exp,
2448 aarch64_reg_type reg_type)
2449 {
2450 if (reg_name_p (*str, reg_type))
2451 {
2452 set_recoverable_error (_("immediate operand required"));
2453 return false;
2454 }
2455
2456 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2457
2458 if (exp->X_op == O_absent)
2459 {
2460 set_fatal_syntax_error (_("missing immediate expression"));
2461 return false;
2462 }
2463
2464 return true;
2465 }
2466
2467 /* Constant immediate-value read function for use in insn parsing.
2468 STR points to the beginning of the immediate (with the optional
2469 leading #); *VAL receives the value. REG_TYPE says which register
2470 names should be treated as registers rather than as symbolic immediates.
2471
2472 Return TRUE on success; otherwise return FALSE. */
2473
2474 static bool
2475 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2476 {
2477 expressionS exp;
2478
2479 if (! parse_immediate_expression (str, &exp, reg_type))
2480 return false;
2481
2482 if (exp.X_op != O_constant)
2483 {
2484 set_syntax_error (_("constant expression required"));
2485 return false;
2486 }
2487
2488 *val = exp.X_add_number;
2489 return true;
2490 }
2491
2492 static uint32_t
2493 encode_imm_float_bits (uint32_t imm)
2494 {
2495 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2496 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2497 }
2498
2499 /* Return TRUE if the single-precision floating-point value encoded in IMM
2500 can be expressed in the AArch64 8-bit signed floating-point format with
2501 3-bit exponent and normalized 4 bits of precision; in other words, the
2502 floating-point value must be expressable as
2503 (+/-) n / 16 * power (2, r)
2504 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2505
2506 static bool
2507 aarch64_imm_float_p (uint32_t imm)
2508 {
2509 /* If a single-precision floating-point value has the following bit
2510 pattern, it can be expressed in the AArch64 8-bit floating-point
2511 format:
2512
2513 3 32222222 2221111111111
2514 1 09876543 21098765432109876543210
2515 n Eeeeeexx xxxx0000000000000000000
2516
2517 where n, e and each x are either 0 or 1 independently, with
2518 E == ~ e. */
2519
2520 uint32_t pattern;
2521
2522 /* Prepare the pattern for 'Eeeeee'. */
2523 if (((imm >> 30) & 0x1) == 0)
2524 pattern = 0x3e000000;
2525 else
2526 pattern = 0x40000000;
2527
2528 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2529 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2530 }
2531
2532 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2533 as an IEEE float without any loss of precision. Store the value in
2534 *FPWORD if so. */
2535
2536 static bool
2537 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2538 {
2539 /* If a double-precision floating-point value has the following bit
2540 pattern, it can be expressed in a float:
2541
2542 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2543 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2544 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2545
2546 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2547 if Eeee_eeee != 1111_1111
2548
2549 where n, e, s and S are either 0 or 1 independently and where ~ is the
2550 inverse of E. */
2551
2552 uint32_t pattern;
2553 uint32_t high32 = imm >> 32;
2554 uint32_t low32 = imm;
2555
2556 /* Lower 29 bits need to be 0s. */
2557 if ((imm & 0x1fffffff) != 0)
2558 return false;
2559
2560 /* Prepare the pattern for 'Eeeeeeeee'. */
2561 if (((high32 >> 30) & 0x1) == 0)
2562 pattern = 0x38000000;
2563 else
2564 pattern = 0x40000000;
2565
2566 /* Check E~~~. */
2567 if ((high32 & 0x78000000) != pattern)
2568 return false;
2569
2570 /* Check Eeee_eeee != 1111_1111. */
2571 if ((high32 & 0x7ff00000) == 0x47f00000)
2572 return false;
2573
2574 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2575 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2576 | (low32 >> 29)); /* 3 S bits. */
2577 return true;
2578 }
2579
2580 /* Return true if we should treat OPERAND as a double-precision
2581 floating-point operand rather than a single-precision one. */
2582 static bool
2583 double_precision_operand_p (const aarch64_opnd_info *operand)
2584 {
2585 /* Check for unsuffixed SVE registers, which are allowed
2586 for LDR and STR but not in instructions that require an
2587 immediate. We get better error messages if we arbitrarily
2588 pick one size, parse the immediate normally, and then
2589 report the match failure in the normal way. */
2590 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2591 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2592 }
2593
2594 /* Parse a floating-point immediate. Return TRUE on success and return the
2595 value in *IMMED in the format of IEEE754 single-precision encoding.
2596 *CCP points to the start of the string; DP_P is TRUE when the immediate
2597 is expected to be in double-precision (N.B. this only matters when
2598 hexadecimal representation is involved). REG_TYPE says which register
2599 names should be treated as registers rather than as symbolic immediates.
2600
2601 This routine accepts any IEEE float; it is up to the callers to reject
2602 invalid ones. */
2603
2604 static bool
2605 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2606 aarch64_reg_type reg_type)
2607 {
2608 char *str = *ccp;
2609 char *fpnum;
2610 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2611 int64_t val = 0;
2612 unsigned fpword = 0;
2613 bool hex_p = false;
2614
2615 skip_past_char (&str, '#');
2616
2617 fpnum = str;
2618 skip_whitespace (fpnum);
2619
2620 if (startswith (fpnum, "0x"))
2621 {
2622 /* Support the hexadecimal representation of the IEEE754 encoding.
2623 Double-precision is expected when DP_P is TRUE, otherwise the
2624 representation should be in single-precision. */
2625 if (! parse_constant_immediate (&str, &val, reg_type))
2626 goto invalid_fp;
2627
2628 if (dp_p)
2629 {
2630 if (!can_convert_double_to_float (val, &fpword))
2631 goto invalid_fp;
2632 }
2633 else if ((uint64_t) val > 0xffffffff)
2634 goto invalid_fp;
2635 else
2636 fpword = val;
2637
2638 hex_p = true;
2639 }
2640 else if (reg_name_p (str, reg_type))
2641 {
2642 set_recoverable_error (_("immediate operand required"));
2643 return false;
2644 }
2645
2646 if (! hex_p)
2647 {
2648 int i;
2649
2650 if ((str = atof_ieee (str, 's', words)) == NULL)
2651 goto invalid_fp;
2652
2653 /* Our FP word must be 32 bits (single-precision FP). */
2654 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2655 {
2656 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2657 fpword |= words[i];
2658 }
2659 }
2660
2661 *immed = fpword;
2662 *ccp = str;
2663 return true;
2664
2665 invalid_fp:
2666 set_fatal_syntax_error (_("invalid floating-point constant"));
2667 return false;
2668 }
2669
2670 /* Less-generic immediate-value read function with the possibility of loading
2671 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2672 instructions.
2673
2674 To prevent the expression parser from pushing a register name into the
2675 symbol table as an undefined symbol, a check is firstly done to find
2676 out whether STR is a register of type REG_TYPE followed by a comma or
2677 the end of line. Return FALSE if STR is such a register. */
2678
2679 static bool
2680 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2681 {
2682 char *ptr = *str;
2683
2684 if (reg_name_p (ptr, reg_type))
2685 {
2686 set_syntax_error (_("immediate operand required"));
2687 return false;
2688 }
2689
2690 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2691
2692 if (inst.reloc.exp.X_op == O_constant)
2693 *imm = inst.reloc.exp.X_add_number;
2694
2695 *str = ptr;
2696
2697 return true;
2698 }
2699
2700 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2701 if NEED_LIBOPCODES is non-zero, the fixup will need
2702 assistance from the libopcodes. */
2703
2704 static inline void
2705 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2706 const aarch64_opnd_info *operand,
2707 int need_libopcodes_p)
2708 {
2709 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2710 reloc->opnd = operand->type;
2711 if (need_libopcodes_p)
2712 reloc->need_libopcodes_p = 1;
2713 };
2714
2715 /* Return TRUE if the instruction needs to be fixed up later internally by
2716 the GAS; otherwise return FALSE. */
2717
2718 static inline bool
2719 aarch64_gas_internal_fixup_p (void)
2720 {
2721 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2722 }
2723
2724 /* Assign the immediate value to the relevant field in *OPERAND if
2725 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2726 needs an internal fixup in a later stage.
2727 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2728 IMM.VALUE that may get assigned with the constant. */
2729 static inline void
2730 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2731 aarch64_opnd_info *operand,
2732 int addr_off_p,
2733 int need_libopcodes_p,
2734 int skip_p)
2735 {
2736 if (reloc->exp.X_op == O_constant)
2737 {
2738 if (addr_off_p)
2739 operand->addr.offset.imm = reloc->exp.X_add_number;
2740 else
2741 operand->imm.value = reloc->exp.X_add_number;
2742 reloc->type = BFD_RELOC_UNUSED;
2743 }
2744 else
2745 {
2746 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2747 /* Tell libopcodes to ignore this operand or not. This is helpful
2748 when one of the operands needs to be fixed up later but we need
2749 libopcodes to check the other operands. */
2750 operand->skip = skip_p;
2751 }
2752 }
2753
2754 /* Relocation modifiers. Each entry in the table contains the textual
2755 name for the relocation which may be placed before a symbol used as
2756 a load/store offset, or add immediate. It must be surrounded by a
2757 leading and trailing colon, for example:
2758
2759 ldr x0, [x1, #:rello:varsym]
2760 add x0, x1, #:rello:varsym */
2761
2762 struct reloc_table_entry
2763 {
2764 const char *name;
2765 int pc_rel;
2766 bfd_reloc_code_real_type adr_type;
2767 bfd_reloc_code_real_type adrp_type;
2768 bfd_reloc_code_real_type movw_type;
2769 bfd_reloc_code_real_type add_type;
2770 bfd_reloc_code_real_type ldst_type;
2771 bfd_reloc_code_real_type ld_literal_type;
2772 };
2773
2774 static struct reloc_table_entry reloc_table[] =
2775 {
2776 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2777 {"lo12", 0,
2778 0, /* adr_type */
2779 0,
2780 0,
2781 BFD_RELOC_AARCH64_ADD_LO12,
2782 BFD_RELOC_AARCH64_LDST_LO12,
2783 0},
2784
2785 /* Higher 21 bits of pc-relative page offset: ADRP */
2786 {"pg_hi21", 1,
2787 0, /* adr_type */
2788 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2789 0,
2790 0,
2791 0,
2792 0},
2793
2794 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2795 {"pg_hi21_nc", 1,
2796 0, /* adr_type */
2797 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2798 0,
2799 0,
2800 0,
2801 0},
2802
2803 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2804 {"abs_g0", 0,
2805 0, /* adr_type */
2806 0,
2807 BFD_RELOC_AARCH64_MOVW_G0,
2808 0,
2809 0,
2810 0},
2811
2812 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2813 {"abs_g0_s", 0,
2814 0, /* adr_type */
2815 0,
2816 BFD_RELOC_AARCH64_MOVW_G0_S,
2817 0,
2818 0,
2819 0},
2820
2821 /* Less significant bits 0-15 of address/value: MOVK, no check */
2822 {"abs_g0_nc", 0,
2823 0, /* adr_type */
2824 0,
2825 BFD_RELOC_AARCH64_MOVW_G0_NC,
2826 0,
2827 0,
2828 0},
2829
2830 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2831 {"abs_g1", 0,
2832 0, /* adr_type */
2833 0,
2834 BFD_RELOC_AARCH64_MOVW_G1,
2835 0,
2836 0,
2837 0},
2838
2839 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2840 {"abs_g1_s", 0,
2841 0, /* adr_type */
2842 0,
2843 BFD_RELOC_AARCH64_MOVW_G1_S,
2844 0,
2845 0,
2846 0},
2847
2848 /* Less significant bits 16-31 of address/value: MOVK, no check */
2849 {"abs_g1_nc", 0,
2850 0, /* adr_type */
2851 0,
2852 BFD_RELOC_AARCH64_MOVW_G1_NC,
2853 0,
2854 0,
2855 0},
2856
2857 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2858 {"abs_g2", 0,
2859 0, /* adr_type */
2860 0,
2861 BFD_RELOC_AARCH64_MOVW_G2,
2862 0,
2863 0,
2864 0},
2865
2866 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2867 {"abs_g2_s", 0,
2868 0, /* adr_type */
2869 0,
2870 BFD_RELOC_AARCH64_MOVW_G2_S,
2871 0,
2872 0,
2873 0},
2874
2875 /* Less significant bits 32-47 of address/value: MOVK, no check */
2876 {"abs_g2_nc", 0,
2877 0, /* adr_type */
2878 0,
2879 BFD_RELOC_AARCH64_MOVW_G2_NC,
2880 0,
2881 0,
2882 0},
2883
2884 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2885 {"abs_g3", 0,
2886 0, /* adr_type */
2887 0,
2888 BFD_RELOC_AARCH64_MOVW_G3,
2889 0,
2890 0,
2891 0},
2892
2893 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2894 {"prel_g0", 1,
2895 0, /* adr_type */
2896 0,
2897 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2898 0,
2899 0,
2900 0},
2901
2902 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2903 {"prel_g0_nc", 1,
2904 0, /* adr_type */
2905 0,
2906 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2907 0,
2908 0,
2909 0},
2910
2911 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2912 {"prel_g1", 1,
2913 0, /* adr_type */
2914 0,
2915 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2916 0,
2917 0,
2918 0},
2919
2920 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2921 {"prel_g1_nc", 1,
2922 0, /* adr_type */
2923 0,
2924 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2925 0,
2926 0,
2927 0},
2928
2929 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2930 {"prel_g2", 1,
2931 0, /* adr_type */
2932 0,
2933 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2934 0,
2935 0,
2936 0},
2937
2938 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2939 {"prel_g2_nc", 1,
2940 0, /* adr_type */
2941 0,
2942 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2943 0,
2944 0,
2945 0},
2946
2947 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2948 {"prel_g3", 1,
2949 0, /* adr_type */
2950 0,
2951 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2952 0,
2953 0,
2954 0},
2955
2956 /* Get to the page containing GOT entry for a symbol. */
2957 {"got", 1,
2958 0, /* adr_type */
2959 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2960 0,
2961 0,
2962 0,
2963 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2964
2965 /* 12 bit offset into the page containing GOT entry for that symbol. */
2966 {"got_lo12", 0,
2967 0, /* adr_type */
2968 0,
2969 0,
2970 0,
2971 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2972 0},
2973
2974 /* 0-15 bits of address/value: MOVk, no check. */
2975 {"gotoff_g0_nc", 0,
2976 0, /* adr_type */
2977 0,
2978 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2979 0,
2980 0,
2981 0},
2982
2983 /* Most significant bits 16-31 of address/value: MOVZ. */
2984 {"gotoff_g1", 0,
2985 0, /* adr_type */
2986 0,
2987 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2988 0,
2989 0,
2990 0},
2991
2992 /* 15 bit offset into the page containing GOT entry for that symbol. */
2993 {"gotoff_lo15", 0,
2994 0, /* adr_type */
2995 0,
2996 0,
2997 0,
2998 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2999 0},
3000
3001 /* Get to the page containing GOT TLS entry for a symbol */
3002 {"gottprel_g0_nc", 0,
3003 0, /* adr_type */
3004 0,
3005 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
3006 0,
3007 0,
3008 0},
3009
3010 /* Get to the page containing GOT TLS entry for a symbol */
3011 {"gottprel_g1", 0,
3012 0, /* adr_type */
3013 0,
3014 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
3015 0,
3016 0,
3017 0},
3018
3019 /* Get to the page containing GOT TLS entry for a symbol */
3020 {"tlsgd", 0,
3021 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
3022 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
3023 0,
3024 0,
3025 0,
3026 0},
3027
3028 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3029 {"tlsgd_lo12", 0,
3030 0, /* adr_type */
3031 0,
3032 0,
3033 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
3034 0,
3035 0},
3036
3037 /* Lower 16 bits address/value: MOVk. */
3038 {"tlsgd_g0_nc", 0,
3039 0, /* adr_type */
3040 0,
3041 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
3042 0,
3043 0,
3044 0},
3045
3046 /* Most significant bits 16-31 of address/value: MOVZ. */
3047 {"tlsgd_g1", 0,
3048 0, /* adr_type */
3049 0,
3050 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
3051 0,
3052 0,
3053 0},
3054
3055 /* Get to the page containing GOT TLS entry for a symbol */
3056 {"tlsdesc", 0,
3057 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
3058 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
3059 0,
3060 0,
3061 0,
3062 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
3063
3064 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3065 {"tlsdesc_lo12", 0,
3066 0, /* adr_type */
3067 0,
3068 0,
3069 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
3070 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
3071 0},
3072
3073 /* Get to the page containing GOT TLS entry for a symbol.
3074 The same as GD, we allocate two consecutive GOT slots
3075 for module index and module offset, the only difference
3076 with GD is the module offset should be initialized to
3077 zero without any outstanding runtime relocation. */
3078 {"tlsldm", 0,
3079 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
3080 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
3081 0,
3082 0,
3083 0,
3084 0},
3085
3086 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3087 {"tlsldm_lo12_nc", 0,
3088 0, /* adr_type */
3089 0,
3090 0,
3091 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
3092 0,
3093 0},
3094
3095 /* 12 bit offset into the module TLS base address. */
3096 {"dtprel_lo12", 0,
3097 0, /* adr_type */
3098 0,
3099 0,
3100 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
3101 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
3102 0},
3103
3104 /* Same as dtprel_lo12, no overflow check. */
3105 {"dtprel_lo12_nc", 0,
3106 0, /* adr_type */
3107 0,
3108 0,
3109 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
3110 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
3111 0},
3112
3113 /* bits[23:12] of offset to the module TLS base address. */
3114 {"dtprel_hi12", 0,
3115 0, /* adr_type */
3116 0,
3117 0,
3118 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3119 0,
3120 0},
3121
3122 /* bits[15:0] of offset to the module TLS base address. */
3123 {"dtprel_g0", 0,
3124 0, /* adr_type */
3125 0,
3126 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3127 0,
3128 0,
3129 0},
3130
3131 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3132 {"dtprel_g0_nc", 0,
3133 0, /* adr_type */
3134 0,
3135 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3136 0,
3137 0,
3138 0},
3139
3140 /* bits[31:16] of offset to the module TLS base address. */
3141 {"dtprel_g1", 0,
3142 0, /* adr_type */
3143 0,
3144 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3145 0,
3146 0,
3147 0},
3148
3149 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3150 {"dtprel_g1_nc", 0,
3151 0, /* adr_type */
3152 0,
3153 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3154 0,
3155 0,
3156 0},
3157
3158 /* bits[47:32] of offset to the module TLS base address. */
3159 {"dtprel_g2", 0,
3160 0, /* adr_type */
3161 0,
3162 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3163 0,
3164 0,
3165 0},
3166
3167 /* Lower 16 bit offset into GOT entry for a symbol */
3168 {"tlsdesc_off_g0_nc", 0,
3169 0, /* adr_type */
3170 0,
3171 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3172 0,
3173 0,
3174 0},
3175
3176 /* Higher 16 bit offset into GOT entry for a symbol */
3177 {"tlsdesc_off_g1", 0,
3178 0, /* adr_type */
3179 0,
3180 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3181 0,
3182 0,
3183 0},
3184
3185 /* Get to the page containing GOT TLS entry for a symbol */
3186 {"gottprel", 0,
3187 0, /* adr_type */
3188 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3189 0,
3190 0,
3191 0,
3192 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3193
3194 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3195 {"gottprel_lo12", 0,
3196 0, /* adr_type */
3197 0,
3198 0,
3199 0,
3200 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3201 0},
3202
3203 /* Get tp offset for a symbol. */
3204 {"tprel", 0,
3205 0, /* adr_type */
3206 0,
3207 0,
3208 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3209 0,
3210 0},
3211
3212 /* Get tp offset for a symbol. */
3213 {"tprel_lo12", 0,
3214 0, /* adr_type */
3215 0,
3216 0,
3217 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3218 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3219 0},
3220
3221 /* Get tp offset for a symbol. */
3222 {"tprel_hi12", 0,
3223 0, /* adr_type */
3224 0,
3225 0,
3226 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3227 0,
3228 0},
3229
3230 /* Get tp offset for a symbol. */
3231 {"tprel_lo12_nc", 0,
3232 0, /* adr_type */
3233 0,
3234 0,
3235 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3236 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3237 0},
3238
3239 /* Most significant bits 32-47 of address/value: MOVZ. */
3240 {"tprel_g2", 0,
3241 0, /* adr_type */
3242 0,
3243 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3244 0,
3245 0,
3246 0},
3247
3248 /* Most significant bits 16-31 of address/value: MOVZ. */
3249 {"tprel_g1", 0,
3250 0, /* adr_type */
3251 0,
3252 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3253 0,
3254 0,
3255 0},
3256
3257 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3258 {"tprel_g1_nc", 0,
3259 0, /* adr_type */
3260 0,
3261 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3262 0,
3263 0,
3264 0},
3265
3266 /* Most significant bits 0-15 of address/value: MOVZ. */
3267 {"tprel_g0", 0,
3268 0, /* adr_type */
3269 0,
3270 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3271 0,
3272 0,
3273 0},
3274
3275 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3276 {"tprel_g0_nc", 0,
3277 0, /* adr_type */
3278 0,
3279 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3280 0,
3281 0,
3282 0},
3283
3284 /* 15bit offset from got entry to base address of GOT table. */
3285 {"gotpage_lo15", 0,
3286 0,
3287 0,
3288 0,
3289 0,
3290 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3291 0},
3292
3293 /* 14bit offset from got entry to base address of GOT table. */
3294 {"gotpage_lo14", 0,
3295 0,
3296 0,
3297 0,
3298 0,
3299 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3300 0},
3301 };
3302
3303 /* Given the address of a pointer pointing to the textual name of a
3304 relocation as may appear in assembler source, attempt to find its
3305 details in reloc_table. The pointer will be updated to the character
3306 after the trailing colon. On failure, NULL will be returned;
3307 otherwise return the reloc_table_entry. */
3308
3309 static struct reloc_table_entry *
3310 find_reloc_table_entry (char **str)
3311 {
3312 unsigned int i;
3313 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3314 {
3315 int length = strlen (reloc_table[i].name);
3316
3317 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3318 && (*str)[length] == ':')
3319 {
3320 *str += (length + 1);
3321 return &reloc_table[i];
3322 }
3323 }
3324
3325 return NULL;
3326 }
3327
3328 /* Returns 0 if the relocation should never be forced,
3329 1 if the relocation must be forced, and -1 if either
3330 result is OK. */
3331
3332 static signed int
3333 aarch64_force_reloc (unsigned int type)
3334 {
3335 switch (type)
3336 {
3337 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3338 /* Perform these "immediate" internal relocations
3339 even if the symbol is extern or weak. */
3340 return 0;
3341
3342 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3343 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3344 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3345 /* Pseudo relocs that need to be fixed up according to
3346 ilp32_p. */
3347 return 1;
3348
3349 case BFD_RELOC_AARCH64_ADD_LO12:
3350 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3351 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3352 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3353 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3354 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3355 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3356 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3357 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3358 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3359 case BFD_RELOC_AARCH64_LDST128_LO12:
3360 case BFD_RELOC_AARCH64_LDST16_LO12:
3361 case BFD_RELOC_AARCH64_LDST32_LO12:
3362 case BFD_RELOC_AARCH64_LDST64_LO12:
3363 case BFD_RELOC_AARCH64_LDST8_LO12:
3364 case BFD_RELOC_AARCH64_LDST_LO12:
3365 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3366 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3367 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3368 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3369 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3370 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3371 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3372 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3373 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3374 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3375 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3376 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3377 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3378 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3379 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3380 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3381 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3382 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3383 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3384 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3385 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3386 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3387 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3388 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3389 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3390 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3391 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3392 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3393 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3394 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3395 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3396 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3397 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3398 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3399 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3400 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3401 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3402 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3403 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3404 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3405 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3406 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3407 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3408 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3409 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3410 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3411 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3412 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3413 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3414 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3415 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3416 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3417 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3418 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3419 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3420 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3421 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3422 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3423 /* Always leave these relocations for the linker. */
3424 return 1;
3425
3426 default:
3427 return -1;
3428 }
3429 }
3430
3431 int
3432 aarch64_force_relocation (struct fix *fixp)
3433 {
3434 int res = aarch64_force_reloc (fixp->fx_r_type);
3435
3436 if (res == -1)
3437 return generic_force_reloc (fixp);
3438 return res;
3439 }
3440
3441 /* Mode argument to parse_shift and parser_shifter_operand. */
3442 enum parse_shift_mode
3443 {
3444 SHIFTED_NONE, /* no shifter allowed */
3445 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3446 "#imm{,lsl #n}" */
3447 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3448 "#imm" */
3449 SHIFTED_LSL, /* bare "lsl #n" */
3450 SHIFTED_MUL, /* bare "mul #n" */
3451 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3452 SHIFTED_MUL_VL, /* "mul vl" */
3453 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3454 };
3455
3456 /* Parse a <shift> operator on an AArch64 data processing instruction.
3457 Return TRUE on success; otherwise return FALSE. */
3458 static bool
3459 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3460 {
3461 const struct aarch64_name_value_pair *shift_op;
3462 enum aarch64_modifier_kind kind;
3463 expressionS exp;
3464 int exp_has_prefix;
3465 char *s = *str;
3466 char *p = s;
3467
3468 for (p = *str; ISALPHA (*p); p++)
3469 ;
3470
3471 if (p == *str)
3472 {
3473 set_syntax_error (_("shift expression expected"));
3474 return false;
3475 }
3476
3477 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3478
3479 if (shift_op == NULL)
3480 {
3481 set_syntax_error (_("shift operator expected"));
3482 return false;
3483 }
3484
3485 kind = aarch64_get_operand_modifier (shift_op);
3486
3487 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3488 {
3489 set_syntax_error (_("invalid use of 'MSL'"));
3490 return false;
3491 }
3492
3493 if (kind == AARCH64_MOD_MUL
3494 && mode != SHIFTED_MUL
3495 && mode != SHIFTED_MUL_VL)
3496 {
3497 set_syntax_error (_("invalid use of 'MUL'"));
3498 return false;
3499 }
3500
3501 switch (mode)
3502 {
3503 case SHIFTED_LOGIC_IMM:
3504 if (aarch64_extend_operator_p (kind))
3505 {
3506 set_syntax_error (_("extending shift is not permitted"));
3507 return false;
3508 }
3509 break;
3510
3511 case SHIFTED_ARITH_IMM:
3512 if (kind == AARCH64_MOD_ROR)
3513 {
3514 set_syntax_error (_("'ROR' shift is not permitted"));
3515 return false;
3516 }
3517 break;
3518
3519 case SHIFTED_LSL:
3520 if (kind != AARCH64_MOD_LSL)
3521 {
3522 set_syntax_error (_("only 'LSL' shift is permitted"));
3523 return false;
3524 }
3525 break;
3526
3527 case SHIFTED_MUL:
3528 if (kind != AARCH64_MOD_MUL)
3529 {
3530 set_syntax_error (_("only 'MUL' is permitted"));
3531 return false;
3532 }
3533 break;
3534
3535 case SHIFTED_MUL_VL:
3536 /* "MUL VL" consists of two separate tokens. Require the first
3537 token to be "MUL" and look for a following "VL". */
3538 if (kind == AARCH64_MOD_MUL)
3539 {
3540 skip_whitespace (p);
3541 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3542 {
3543 p += 2;
3544 kind = AARCH64_MOD_MUL_VL;
3545 break;
3546 }
3547 }
3548 set_syntax_error (_("only 'MUL VL' is permitted"));
3549 return false;
3550
3551 case SHIFTED_REG_OFFSET:
3552 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3553 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3554 {
3555 set_fatal_syntax_error
3556 (_("invalid shift for the register offset addressing mode"));
3557 return false;
3558 }
3559 break;
3560
3561 case SHIFTED_LSL_MSL:
3562 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3563 {
3564 set_syntax_error (_("invalid shift operator"));
3565 return false;
3566 }
3567 break;
3568
3569 default:
3570 abort ();
3571 }
3572
3573 /* Whitespace can appear here if the next thing is a bare digit. */
3574 skip_whitespace (p);
3575
3576 /* Parse shift amount. */
3577 exp_has_prefix = 0;
3578 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3579 exp.X_op = O_absent;
3580 else
3581 {
3582 if (is_immediate_prefix (*p))
3583 {
3584 p++;
3585 exp_has_prefix = 1;
3586 }
3587 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3588 }
3589 if (kind == AARCH64_MOD_MUL_VL)
3590 /* For consistency, give MUL VL the same shift amount as an implicit
3591 MUL #1. */
3592 operand->shifter.amount = 1;
3593 else if (exp.X_op == O_absent)
3594 {
3595 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3596 {
3597 set_syntax_error (_("missing shift amount"));
3598 return false;
3599 }
3600 operand->shifter.amount = 0;
3601 }
3602 else if (exp.X_op != O_constant)
3603 {
3604 set_syntax_error (_("constant shift amount required"));
3605 return false;
3606 }
3607 /* For parsing purposes, MUL #n has no inherent range. The range
3608 depends on the operand and will be checked by operand-specific
3609 routines. */
3610 else if (kind != AARCH64_MOD_MUL
3611 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3612 {
3613 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3614 return false;
3615 }
3616 else
3617 {
3618 operand->shifter.amount = exp.X_add_number;
3619 operand->shifter.amount_present = 1;
3620 }
3621
3622 operand->shifter.operator_present = 1;
3623 operand->shifter.kind = kind;
3624
3625 *str = p;
3626 return true;
3627 }
3628
3629 /* Parse a <shifter_operand> for a data processing instruction:
3630
3631 #<immediate>
3632 #<immediate>, LSL #imm
3633
3634 Validation of immediate operands is deferred to md_apply_fix.
3635
3636 Return TRUE on success; otherwise return FALSE. */
3637
3638 static bool
3639 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3640 enum parse_shift_mode mode)
3641 {
3642 char *p;
3643
3644 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3645 return false;
3646
3647 p = *str;
3648
3649 /* Accept an immediate expression. */
3650 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3651 REJECT_ABSENT))
3652 return false;
3653
3654 /* Accept optional LSL for arithmetic immediate values. */
3655 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3656 if (! parse_shift (&p, operand, SHIFTED_LSL))
3657 return false;
3658
3659 /* Not accept any shifter for logical immediate values. */
3660 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3661 && parse_shift (&p, operand, mode))
3662 {
3663 set_syntax_error (_("unexpected shift operator"));
3664 return false;
3665 }
3666
3667 *str = p;
3668 return true;
3669 }
3670
3671 /* Parse a <shifter_operand> for a data processing instruction:
3672
3673 <Rm>
3674 <Rm>, <shift>
3675 #<immediate>
3676 #<immediate>, LSL #imm
3677
3678 where <shift> is handled by parse_shift above, and the last two
3679 cases are handled by the function above.
3680
3681 Validation of immediate operands is deferred to md_apply_fix.
3682
3683 Return TRUE on success; otherwise return FALSE. */
3684
3685 static bool
3686 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3687 enum parse_shift_mode mode)
3688 {
3689 const reg_entry *reg;
3690 aarch64_opnd_qualifier_t qualifier;
3691 enum aarch64_operand_class opd_class
3692 = aarch64_get_operand_class (operand->type);
3693
3694 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3695 if (reg)
3696 {
3697 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3698 {
3699 set_syntax_error (_("unexpected register in the immediate operand"));
3700 return false;
3701 }
3702
3703 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3704 {
3705 set_expected_reg_error (REG_TYPE_R_Z, reg, 0);
3706 return false;
3707 }
3708
3709 operand->reg.regno = reg->number;
3710 operand->qualifier = qualifier;
3711
3712 /* Accept optional shift operation on register. */
3713 if (! skip_past_comma (str))
3714 return true;
3715
3716 if (! parse_shift (str, operand, mode))
3717 return false;
3718
3719 return true;
3720 }
3721 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3722 {
3723 set_syntax_error
3724 (_("integer register expected in the extended/shifted operand "
3725 "register"));
3726 return false;
3727 }
3728
3729 /* We have a shifted immediate variable. */
3730 return parse_shifter_operand_imm (str, operand, mode);
3731 }
3732
3733 /* Return TRUE on success; return FALSE otherwise. */
3734
3735 static bool
3736 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3737 enum parse_shift_mode mode)
3738 {
3739 char *p = *str;
3740
3741 /* Determine if we have the sequence of characters #: or just :
3742 coming next. If we do, then we check for a :rello: relocation
3743 modifier. If we don't, punt the whole lot to
3744 parse_shifter_operand. */
3745
3746 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3747 {
3748 struct reloc_table_entry *entry;
3749
3750 if (p[0] == '#')
3751 p += 2;
3752 else
3753 p++;
3754 *str = p;
3755
3756 /* Try to parse a relocation. Anything else is an error. */
3757 if (!(entry = find_reloc_table_entry (str)))
3758 {
3759 set_syntax_error (_("unknown relocation modifier"));
3760 return false;
3761 }
3762
3763 if (entry->add_type == 0)
3764 {
3765 set_syntax_error
3766 (_("this relocation modifier is not allowed on this instruction"));
3767 return false;
3768 }
3769
3770 /* Save str before we decompose it. */
3771 p = *str;
3772
3773 /* Next, we parse the expression. */
3774 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3775 REJECT_ABSENT))
3776 return false;
3777
3778 /* Record the relocation type (use the ADD variant here). */
3779 inst.reloc.type = entry->add_type;
3780 inst.reloc.pc_rel = entry->pc_rel;
3781
3782 /* If str is empty, we've reached the end, stop here. */
3783 if (**str == '\0')
3784 return true;
3785
3786 /* Otherwise, we have a shifted reloc modifier, so rewind to
3787 recover the variable name and continue parsing for the shifter. */
3788 *str = p;
3789 return parse_shifter_operand_imm (str, operand, mode);
3790 }
3791
3792 return parse_shifter_operand (str, operand, mode);
3793 }
3794
3795 /* Parse all forms of an address expression. Information is written
3796 to *OPERAND and/or inst.reloc.
3797
3798 The A64 instruction set has the following addressing modes:
3799
3800 Offset
3801 [base] // in SIMD ld/st structure
3802 [base{,#0}] // in ld/st exclusive
3803 [base{,#imm}]
3804 [base,Xm{,LSL #imm}]
3805 [base,Xm,SXTX {#imm}]
3806 [base,Wm,(S|U)XTW {#imm}]
3807 Pre-indexed
3808 [base]! // in ldraa/ldrab exclusive
3809 [base,#imm]!
3810 Post-indexed
3811 [base],#imm
3812 [base],Xm // in SIMD ld/st structure
3813 PC-relative (literal)
3814 label
3815 SVE:
3816 [base,#imm,MUL VL]
3817 [base,Zm.D{,LSL #imm}]
3818 [base,Zm.S,(S|U)XTW {#imm}]
3819 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3820 [Zn.S,#imm]
3821 [Zn.D,#imm]
3822 [Zn.S{, Xm}]
3823 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3824 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3825 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3826
3827 (As a convenience, the notation "=immediate" is permitted in conjunction
3828 with the pc-relative literal load instructions to automatically place an
3829 immediate value or symbolic address in a nearby literal pool and generate
3830 a hidden label which references it.)
3831
3832 Upon a successful parsing, the address structure in *OPERAND will be
3833 filled in the following way:
3834
3835 .base_regno = <base>
3836 .offset.is_reg // 1 if the offset is a register
3837 .offset.imm = <imm>
3838 .offset.regno = <Rm>
3839
3840 For different addressing modes defined in the A64 ISA:
3841
3842 Offset
3843 .pcrel=0; .preind=1; .postind=0; .writeback=0
3844 Pre-indexed
3845 .pcrel=0; .preind=1; .postind=0; .writeback=1
3846 Post-indexed
3847 .pcrel=0; .preind=0; .postind=1; .writeback=1
3848 PC-relative (literal)
3849 .pcrel=1; .preind=1; .postind=0; .writeback=0
3850
3851 The shift/extension information, if any, will be stored in .shifter.
3852 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3853 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3854 corresponding register.
3855
3856 BASE_TYPE says which types of base register should be accepted and
3857 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3858 is the type of shifter that is allowed for immediate offsets,
3859 or SHIFTED_NONE if none.
3860
3861 In all other respects, it is the caller's responsibility to check
3862 for addressing modes not supported by the instruction, and to set
3863 inst.reloc.type. */
3864
3865 static bool
3866 parse_address_main (char **str, aarch64_opnd_info *operand,
3867 aarch64_opnd_qualifier_t *base_qualifier,
3868 aarch64_opnd_qualifier_t *offset_qualifier,
3869 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3870 enum parse_shift_mode imm_shift_mode)
3871 {
3872 char *p = *str;
3873 const reg_entry *reg;
3874 expressionS *exp = &inst.reloc.exp;
3875
3876 *base_qualifier = AARCH64_OPND_QLF_NIL;
3877 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3878 if (! skip_past_char (&p, '['))
3879 {
3880 /* =immediate or label. */
3881 operand->addr.pcrel = 1;
3882 operand->addr.preind = 1;
3883
3884 /* #:<reloc_op>:<symbol> */
3885 skip_past_char (&p, '#');
3886 if (skip_past_char (&p, ':'))
3887 {
3888 bfd_reloc_code_real_type ty;
3889 struct reloc_table_entry *entry;
3890
3891 /* Try to parse a relocation modifier. Anything else is
3892 an error. */
3893 entry = find_reloc_table_entry (&p);
3894 if (! entry)
3895 {
3896 set_syntax_error (_("unknown relocation modifier"));
3897 return false;
3898 }
3899
3900 switch (operand->type)
3901 {
3902 case AARCH64_OPND_ADDR_PCREL21:
3903 /* adr */
3904 ty = entry->adr_type;
3905 break;
3906
3907 default:
3908 ty = entry->ld_literal_type;
3909 break;
3910 }
3911
3912 if (ty == 0)
3913 {
3914 set_syntax_error
3915 (_("this relocation modifier is not allowed on this "
3916 "instruction"));
3917 return false;
3918 }
3919
3920 /* #:<reloc_op>: */
3921 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3922 {
3923 set_syntax_error (_("invalid relocation expression"));
3924 return false;
3925 }
3926 /* #:<reloc_op>:<expr> */
3927 /* Record the relocation type. */
3928 inst.reloc.type = ty;
3929 inst.reloc.pc_rel = entry->pc_rel;
3930 }
3931 else
3932 {
3933 if (skip_past_char (&p, '='))
3934 /* =immediate; need to generate the literal in the literal pool. */
3935 inst.gen_lit_pool = 1;
3936
3937 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3938 {
3939 set_syntax_error (_("invalid address"));
3940 return false;
3941 }
3942 }
3943
3944 *str = p;
3945 return true;
3946 }
3947
3948 /* [ */
3949
3950 bool alpha_base_p = ISALPHA (*p);
3951 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3952 if (!reg || !aarch64_check_reg_type (reg, base_type))
3953 {
3954 if (reg
3955 && aarch64_check_reg_type (reg, REG_TYPE_R_SP)
3956 && *base_qualifier == AARCH64_OPND_QLF_W)
3957 set_syntax_error (_("expected a 64-bit base register"));
3958 else if (alpha_base_p)
3959 set_syntax_error (_("invalid base register"));
3960 else
3961 set_syntax_error (_("expected a base register"));
3962 return false;
3963 }
3964 operand->addr.base_regno = reg->number;
3965
3966 /* [Xn */
3967 if (skip_past_comma (&p))
3968 {
3969 /* [Xn, */
3970 operand->addr.preind = 1;
3971
3972 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3973 if (reg)
3974 {
3975 if (!aarch64_check_reg_type (reg, offset_type))
3976 {
3977 set_syntax_error (_("invalid offset register"));
3978 return false;
3979 }
3980
3981 /* [Xn,Rm */
3982 operand->addr.offset.regno = reg->number;
3983 operand->addr.offset.is_reg = 1;
3984 /* Shifted index. */
3985 if (skip_past_comma (&p))
3986 {
3987 /* [Xn,Rm, */
3988 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3989 /* Use the diagnostics set in parse_shift, so not set new
3990 error message here. */
3991 return false;
3992 }
3993 /* We only accept:
3994 [base,Xm] # For vector plus scalar SVE2 indexing.
3995 [base,Xm{,LSL #imm}]
3996 [base,Xm,SXTX {#imm}]
3997 [base,Wm,(S|U)XTW {#imm}] */
3998 if (operand->shifter.kind == AARCH64_MOD_NONE
3999 || operand->shifter.kind == AARCH64_MOD_LSL
4000 || operand->shifter.kind == AARCH64_MOD_SXTX)
4001 {
4002 if (*offset_qualifier == AARCH64_OPND_QLF_W)
4003 {
4004 set_syntax_error (_("invalid use of 32-bit register offset"));
4005 return false;
4006 }
4007 if (aarch64_get_qualifier_esize (*base_qualifier)
4008 != aarch64_get_qualifier_esize (*offset_qualifier)
4009 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
4010 || *base_qualifier != AARCH64_OPND_QLF_S_S
4011 || *offset_qualifier != AARCH64_OPND_QLF_X))
4012 {
4013 set_syntax_error (_("offset has different size from base"));
4014 return false;
4015 }
4016 }
4017 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
4018 {
4019 set_syntax_error (_("invalid use of 64-bit register offset"));
4020 return false;
4021 }
4022 }
4023 else
4024 {
4025 /* [Xn,#:<reloc_op>:<symbol> */
4026 skip_past_char (&p, '#');
4027 if (skip_past_char (&p, ':'))
4028 {
4029 struct reloc_table_entry *entry;
4030
4031 /* Try to parse a relocation modifier. Anything else is
4032 an error. */
4033 if (!(entry = find_reloc_table_entry (&p)))
4034 {
4035 set_syntax_error (_("unknown relocation modifier"));
4036 return false;
4037 }
4038
4039 if (entry->ldst_type == 0)
4040 {
4041 set_syntax_error
4042 (_("this relocation modifier is not allowed on this "
4043 "instruction"));
4044 return false;
4045 }
4046
4047 /* [Xn,#:<reloc_op>: */
4048 /* We now have the group relocation table entry corresponding to
4049 the name in the assembler source. Next, we parse the
4050 expression. */
4051 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4052 {
4053 set_syntax_error (_("invalid relocation expression"));
4054 return false;
4055 }
4056
4057 /* [Xn,#:<reloc_op>:<expr> */
4058 /* Record the load/store relocation type. */
4059 inst.reloc.type = entry->ldst_type;
4060 inst.reloc.pc_rel = entry->pc_rel;
4061 }
4062 else
4063 {
4064 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4065 {
4066 set_syntax_error (_("invalid expression in the address"));
4067 return false;
4068 }
4069 /* [Xn,<expr> */
4070 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
4071 /* [Xn,<expr>,<shifter> */
4072 if (! parse_shift (&p, operand, imm_shift_mode))
4073 return false;
4074 }
4075 }
4076 }
4077
4078 if (! skip_past_char (&p, ']'))
4079 {
4080 set_syntax_error (_("']' expected"));
4081 return false;
4082 }
4083
4084 if (skip_past_char (&p, '!'))
4085 {
4086 if (operand->addr.preind && operand->addr.offset.is_reg)
4087 {
4088 set_syntax_error (_("register offset not allowed in pre-indexed "
4089 "addressing mode"));
4090 return false;
4091 }
4092 /* [Xn]! */
4093 operand->addr.writeback = 1;
4094 }
4095 else if (skip_past_comma (&p))
4096 {
4097 /* [Xn], */
4098 operand->addr.postind = 1;
4099 operand->addr.writeback = 1;
4100
4101 if (operand->addr.preind)
4102 {
4103 set_syntax_error (_("cannot combine pre- and post-indexing"));
4104 return false;
4105 }
4106
4107 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
4108 if (reg)
4109 {
4110 /* [Xn],Xm */
4111 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
4112 {
4113 set_syntax_error (_("invalid offset register"));
4114 return false;
4115 }
4116
4117 operand->addr.offset.regno = reg->number;
4118 operand->addr.offset.is_reg = 1;
4119 }
4120 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4121 {
4122 /* [Xn],#expr */
4123 set_syntax_error (_("invalid expression in the address"));
4124 return false;
4125 }
4126 }
4127
4128 /* If at this point neither .preind nor .postind is set, we have a
4129 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4130 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4131 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4132 [Zn.<T>, xzr]. */
4133 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4134 {
4135 if (operand->addr.writeback)
4136 {
4137 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4138 {
4139 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4140 operand->addr.offset.is_reg = 0;
4141 operand->addr.offset.imm = 0;
4142 operand->addr.preind = 1;
4143 }
4144 else
4145 {
4146 /* Reject [Rn]! */
4147 set_syntax_error (_("missing offset in the pre-indexed address"));
4148 return false;
4149 }
4150 }
4151 else
4152 {
4153 operand->addr.preind = 1;
4154 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4155 {
4156 operand->addr.offset.is_reg = 1;
4157 operand->addr.offset.regno = REG_ZR;
4158 *offset_qualifier = AARCH64_OPND_QLF_X;
4159 }
4160 else
4161 {
4162 inst.reloc.exp.X_op = O_constant;
4163 inst.reloc.exp.X_add_number = 0;
4164 }
4165 }
4166 }
4167
4168 *str = p;
4169 return true;
4170 }
4171
4172 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4173 on success. */
4174 static bool
4175 parse_address (char **str, aarch64_opnd_info *operand)
4176 {
4177 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4178 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4179 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
4180 }
4181
4182 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4183 The arguments have the same meaning as for parse_address_main.
4184 Return TRUE on success. */
4185 static bool
4186 parse_sve_address (char **str, aarch64_opnd_info *operand,
4187 aarch64_opnd_qualifier_t *base_qualifier,
4188 aarch64_opnd_qualifier_t *offset_qualifier)
4189 {
4190 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4191 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4192 SHIFTED_MUL_VL);
4193 }
4194
4195 /* Parse a register X0-X30. The register must be 64-bit and register 31
4196 is unallocated. */
4197 static bool
4198 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4199 {
4200 const reg_entry *reg = parse_reg (str);
4201 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4202 {
4203 set_expected_reg_error (REG_TYPE_R_64, reg, 0);
4204 return false;
4205 }
4206 operand->reg.regno = reg->number;
4207 operand->qualifier = AARCH64_OPND_QLF_X;
4208 return true;
4209 }
4210
4211 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4212 Return TRUE on success; otherwise return FALSE. */
4213 static bool
4214 parse_half (char **str, int *internal_fixup_p)
4215 {
4216 char *p = *str;
4217
4218 skip_past_char (&p, '#');
4219
4220 gas_assert (internal_fixup_p);
4221 *internal_fixup_p = 0;
4222
4223 if (*p == ':')
4224 {
4225 struct reloc_table_entry *entry;
4226
4227 /* Try to parse a relocation. Anything else is an error. */
4228 ++p;
4229
4230 if (!(entry = find_reloc_table_entry (&p)))
4231 {
4232 set_syntax_error (_("unknown relocation modifier"));
4233 return false;
4234 }
4235
4236 if (entry->movw_type == 0)
4237 {
4238 set_syntax_error
4239 (_("this relocation modifier is not allowed on this instruction"));
4240 return false;
4241 }
4242
4243 inst.reloc.type = entry->movw_type;
4244 }
4245 else
4246 *internal_fixup_p = 1;
4247
4248 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4249 return false;
4250
4251 *str = p;
4252 return true;
4253 }
4254
4255 /* Parse an operand for an ADRP instruction:
4256 ADRP <Xd>, <label>
4257 Return TRUE on success; otherwise return FALSE. */
4258
4259 static bool
4260 parse_adrp (char **str)
4261 {
4262 char *p;
4263
4264 p = *str;
4265 if (*p == ':')
4266 {
4267 struct reloc_table_entry *entry;
4268
4269 /* Try to parse a relocation. Anything else is an error. */
4270 ++p;
4271 if (!(entry = find_reloc_table_entry (&p)))
4272 {
4273 set_syntax_error (_("unknown relocation modifier"));
4274 return false;
4275 }
4276
4277 if (entry->adrp_type == 0)
4278 {
4279 set_syntax_error
4280 (_("this relocation modifier is not allowed on this instruction"));
4281 return false;
4282 }
4283
4284 inst.reloc.type = entry->adrp_type;
4285 }
4286 else
4287 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4288
4289 inst.reloc.pc_rel = 1;
4290 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4291 return false;
4292 *str = p;
4293 return true;
4294 }
4295
4296 /* Miscellaneous. */
4297
4298 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4299 of SIZE tokens in which index I gives the token for field value I,
4300 or is null if field value I is invalid. REG_TYPE says which register
4301 names should be treated as registers rather than as symbolic immediates.
4302
4303 Return true on success, moving *STR past the operand and storing the
4304 field value in *VAL. */
4305
4306 static int
4307 parse_enum_string (char **str, int64_t *val, const char *const *array,
4308 size_t size, aarch64_reg_type reg_type)
4309 {
4310 expressionS exp;
4311 char *p, *q;
4312 size_t i;
4313
4314 /* Match C-like tokens. */
4315 p = q = *str;
4316 while (ISALNUM (*q))
4317 q++;
4318
4319 for (i = 0; i < size; ++i)
4320 if (array[i]
4321 && strncasecmp (array[i], p, q - p) == 0
4322 && array[i][q - p] == 0)
4323 {
4324 *val = i;
4325 *str = q;
4326 return true;
4327 }
4328
4329 if (!parse_immediate_expression (&p, &exp, reg_type))
4330 return false;
4331
4332 if (exp.X_op == O_constant
4333 && (uint64_t) exp.X_add_number < size)
4334 {
4335 *val = exp.X_add_number;
4336 *str = p;
4337 return true;
4338 }
4339
4340 /* Use the default error for this operand. */
4341 return false;
4342 }
4343
4344 /* Parse an option for a preload instruction. Returns the encoding for the
4345 option, or PARSE_FAIL. */
4346
4347 static int
4348 parse_pldop (char **str)
4349 {
4350 char *p, *q;
4351 const struct aarch64_name_value_pair *o;
4352
4353 p = q = *str;
4354 while (ISALNUM (*q))
4355 q++;
4356
4357 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4358 if (!o)
4359 return PARSE_FAIL;
4360
4361 *str = q;
4362 return o->value;
4363 }
4364
4365 /* Parse an option for a barrier instruction. Returns the encoding for the
4366 option, or PARSE_FAIL. */
4367
4368 static int
4369 parse_barrier (char **str)
4370 {
4371 char *p, *q;
4372 const struct aarch64_name_value_pair *o;
4373
4374 p = q = *str;
4375 while (ISALPHA (*q))
4376 q++;
4377
4378 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4379 if (!o)
4380 return PARSE_FAIL;
4381
4382 *str = q;
4383 return o->value;
4384 }
4385
4386 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4387 return 0 if successful. Otherwise return PARSE_FAIL. */
4388
4389 static int
4390 parse_barrier_psb (char **str,
4391 const struct aarch64_name_value_pair ** hint_opt)
4392 {
4393 char *p, *q;
4394 const struct aarch64_name_value_pair *o;
4395
4396 p = q = *str;
4397 while (ISALPHA (*q))
4398 q++;
4399
4400 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4401 if (!o)
4402 {
4403 set_fatal_syntax_error
4404 ( _("unknown or missing option to PSB/TSB"));
4405 return PARSE_FAIL;
4406 }
4407
4408 if (o->value != 0x11)
4409 {
4410 /* PSB only accepts option name 'CSYNC'. */
4411 set_syntax_error
4412 (_("the specified option is not accepted for PSB/TSB"));
4413 return PARSE_FAIL;
4414 }
4415
4416 *str = q;
4417 *hint_opt = o;
4418 return 0;
4419 }
4420
4421 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4422 return 0 if successful. Otherwise return PARSE_FAIL. */
4423
4424 static int
4425 parse_bti_operand (char **str,
4426 const struct aarch64_name_value_pair ** hint_opt)
4427 {
4428 char *p, *q;
4429 const struct aarch64_name_value_pair *o;
4430
4431 p = q = *str;
4432 while (ISALPHA (*q))
4433 q++;
4434
4435 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4436 if (!o)
4437 {
4438 set_fatal_syntax_error
4439 ( _("unknown option to BTI"));
4440 return PARSE_FAIL;
4441 }
4442
4443 switch (o->value)
4444 {
4445 /* Valid BTI operands. */
4446 case HINT_OPD_C:
4447 case HINT_OPD_J:
4448 case HINT_OPD_JC:
4449 break;
4450
4451 default:
4452 set_syntax_error
4453 (_("unknown option to BTI"));
4454 return PARSE_FAIL;
4455 }
4456
4457 *str = q;
4458 *hint_opt = o;
4459 return 0;
4460 }
4461
4462 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4463 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4464 on failure. Format:
4465
4466 REG_TYPE.QUALIFIER
4467
4468 Side effect: Update STR with current parse position of success.
4469
4470 FLAGS is as for parse_typed_reg. */
4471
4472 static const reg_entry *
4473 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4474 aarch64_opnd_qualifier_t *qualifier, unsigned int flags)
4475 {
4476 struct vector_type_el vectype;
4477 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4478 PTR_FULL_REG | flags);
4479 if (!reg)
4480 return NULL;
4481
4482 if (vectype.type == NT_invtype)
4483 *qualifier = AARCH64_OPND_QLF_NIL;
4484 else
4485 {
4486 *qualifier = vectype_to_qualifier (&vectype);
4487 if (*qualifier == AARCH64_OPND_QLF_NIL)
4488 return NULL;
4489 }
4490
4491 return reg;
4492 }
4493
4494 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4495
4496 #<imm>
4497 <imm>
4498
4499 Function return TRUE if immediate was found, or FALSE.
4500 */
4501 static bool
4502 parse_sme_immediate (char **str, int64_t *imm)
4503 {
4504 int64_t val;
4505 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4506 return false;
4507
4508 *imm = val;
4509 return true;
4510 }
4511
4512 /* Parse index with selection register and immediate offset:
4513
4514 [<Wv>, <imm>]
4515 [<Wv>, #<imm>]
4516
4517 Return true on success, populating OPND with the parsed index. */
4518
4519 static bool
4520 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4521 {
4522 const reg_entry *reg;
4523
4524 if (!skip_past_char (str, '['))
4525 {
4526 set_syntax_error (_("expected '['"));
4527 return false;
4528 }
4529
4530 /* The selection register, encoded in the 2-bit Rv field. */
4531 reg = parse_reg (str);
4532 if (reg == NULL || reg->type != REG_TYPE_R_32)
4533 {
4534 set_syntax_error (_("expected a 32-bit selection register"));
4535 return false;
4536 }
4537 opnd->index.regno = reg->number;
4538
4539 if (!skip_past_char (str, ','))
4540 {
4541 set_syntax_error (_("missing immediate offset"));
4542 return false;
4543 }
4544
4545 if (!parse_sme_immediate (str, &opnd->index.imm))
4546 {
4547 set_syntax_error (_("expected a constant immediate offset"));
4548 return false;
4549 }
4550
4551 if (!skip_past_char (str, ']'))
4552 {
4553 set_syntax_error (_("expected ']'"));
4554 return false;
4555 }
4556
4557 return true;
4558 }
4559
4560 /* Parse a register of type REG_TYPE that might have an element type
4561 qualifier and that is indexed by two values: a 32-bit register,
4562 followed by an immediate. The ranges of the register and the
4563 immediate vary by opcode and are checked in libopcodes.
4564
4565 Return true on success, populating OPND with information about
4566 the operand and setting QUALIFIER to the register qualifier.
4567
4568 Field format examples:
4569
4570 <Pm>.<T>[<Wv>< #<imm>]
4571 ZA[<Wv>, #<imm>]
4572 <ZAn><HV>.<T>[<Wv>, #<imm>]
4573
4574 FLAGS is as for parse_typed_reg. */
4575
4576 static bool
4577 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4578 struct aarch64_indexed_za *opnd,
4579 aarch64_opnd_qualifier_t *qualifier,
4580 unsigned int flags)
4581 {
4582 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier, flags);
4583 if (!reg)
4584 return false;
4585
4586 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4587 opnd->regno = reg->number;
4588
4589 return parse_sme_za_index (str, opnd);
4590 }
4591
4592 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4593 operand. */
4594
4595 static bool
4596 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4597 struct aarch64_indexed_za *opnd,
4598 aarch64_opnd_qualifier_t *qualifier)
4599 {
4600 if (!skip_past_char (str, '{'))
4601 {
4602 set_expected_reglist_error (REG_TYPE_ZATHV, parse_reg (str));
4603 return false;
4604 }
4605
4606 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier,
4607 PTR_IN_REGLIST))
4608 return false;
4609
4610 if (!skip_past_char (str, '}'))
4611 {
4612 set_syntax_error (_("expected '}'"));
4613 return false;
4614 }
4615
4616 return true;
4617 }
4618
4619 /* Parse list of up to eight 64-bit element tile names separated by commas in
4620 SME's ZERO instruction:
4621
4622 ZERO { <mask> }
4623
4624 Function returns <mask>:
4625
4626 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4627 */
4628 static int
4629 parse_sme_zero_mask(char **str)
4630 {
4631 char *q;
4632 int mask;
4633 aarch64_opnd_qualifier_t qualifier;
4634 unsigned int ptr_flags = PTR_IN_REGLIST;
4635
4636 mask = 0x00;
4637 q = *str;
4638 do
4639 {
4640 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4641 &qualifier, ptr_flags);
4642 if (!reg)
4643 return PARSE_FAIL;
4644
4645 if (reg->type == REG_TYPE_ZA)
4646 {
4647 if (qualifier != AARCH64_OPND_QLF_NIL)
4648 {
4649 set_syntax_error ("ZA should not have a size suffix");
4650 return PARSE_FAIL;
4651 }
4652 /* { ZA } is assembled as all-ones immediate. */
4653 mask = 0xff;
4654 }
4655 else
4656 {
4657 int regno = reg->number;
4658 if (qualifier == AARCH64_OPND_QLF_S_B)
4659 {
4660 /* { ZA0.B } is assembled as all-ones immediate. */
4661 mask = 0xff;
4662 }
4663 else if (qualifier == AARCH64_OPND_QLF_S_H)
4664 mask |= 0x55 << regno;
4665 else if (qualifier == AARCH64_OPND_QLF_S_S)
4666 mask |= 0x11 << regno;
4667 else if (qualifier == AARCH64_OPND_QLF_S_D)
4668 mask |= 0x01 << regno;
4669 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4670 {
4671 set_syntax_error (_("ZA tile masks do not operate at .Q"
4672 " granularity"));
4673 return PARSE_FAIL;
4674 }
4675 else if (qualifier == AARCH64_OPND_QLF_NIL)
4676 {
4677 set_syntax_error (_("missing ZA tile size"));
4678 return PARSE_FAIL;
4679 }
4680 else
4681 {
4682 set_syntax_error (_("invalid ZA tile"));
4683 return PARSE_FAIL;
4684 }
4685 }
4686 ptr_flags |= PTR_GOOD_MATCH;
4687 }
4688 while (skip_past_char (&q, ','));
4689
4690 *str = q;
4691 return mask;
4692 }
4693
4694 /* Wraps in curly braces <mask> operand ZERO instruction:
4695
4696 ZERO { <mask> }
4697
4698 Function returns value of <mask> bit-field.
4699 */
4700 static int
4701 parse_sme_list_of_64bit_tiles (char **str)
4702 {
4703 int regno;
4704
4705 if (!skip_past_char (str, '{'))
4706 {
4707 set_syntax_error (_("expected '{'"));
4708 return PARSE_FAIL;
4709 }
4710
4711 /* Empty <mask> list is an all-zeros immediate. */
4712 if (!skip_past_char (str, '}'))
4713 {
4714 regno = parse_sme_zero_mask (str);
4715 if (regno == PARSE_FAIL)
4716 return PARSE_FAIL;
4717
4718 if (!skip_past_char (str, '}'))
4719 {
4720 set_syntax_error (_("expected '}'"));
4721 return PARSE_FAIL;
4722 }
4723 }
4724 else
4725 regno = 0x00;
4726
4727 return regno;
4728 }
4729
4730 /* Parse streaming mode operand for SMSTART and SMSTOP.
4731
4732 {SM | ZA}
4733
4734 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4735 */
4736 static int
4737 parse_sme_sm_za (char **str)
4738 {
4739 char *p, *q;
4740
4741 p = q = *str;
4742 while (ISALPHA (*q))
4743 q++;
4744
4745 if ((q - p != 2)
4746 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4747 {
4748 set_syntax_error (_("expected SM or ZA operand"));
4749 return PARSE_FAIL;
4750 }
4751
4752 *str = q;
4753 return TOLOWER (p[0]);
4754 }
4755
4756 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4757 Returns the encoding for the option, or PARSE_FAIL.
4758
4759 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4760 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4761
4762 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4763 field, otherwise as a system register.
4764 */
4765
4766 static int
4767 parse_sys_reg (char **str, htab_t sys_regs,
4768 int imple_defined_p, int pstatefield_p,
4769 uint32_t* flags)
4770 {
4771 char *p, *q;
4772 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4773 const aarch64_sys_reg *o;
4774 int value;
4775
4776 p = buf;
4777 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4778 if (p < buf + (sizeof (buf) - 1))
4779 *p++ = TOLOWER (*q);
4780 *p = '\0';
4781
4782 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4783 valid system register. This is enforced by construction of the hash
4784 table. */
4785 if (p - buf != q - *str)
4786 return PARSE_FAIL;
4787
4788 o = str_hash_find (sys_regs, buf);
4789 if (!o)
4790 {
4791 if (!imple_defined_p)
4792 return PARSE_FAIL;
4793 else
4794 {
4795 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4796 unsigned int op0, op1, cn, cm, op2;
4797
4798 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4799 != 5)
4800 return PARSE_FAIL;
4801 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4802 return PARSE_FAIL;
4803 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4804 if (flags)
4805 *flags = 0;
4806 }
4807 }
4808 else
4809 {
4810 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4811 as_bad (_("selected processor does not support PSTATE field "
4812 "name '%s'"), buf);
4813 if (!pstatefield_p
4814 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4815 o->value, o->flags, o->features))
4816 as_bad (_("selected processor does not support system register "
4817 "name '%s'"), buf);
4818 if (aarch64_sys_reg_deprecated_p (o->flags))
4819 as_warn (_("system register name '%s' is deprecated and may be "
4820 "removed in a future release"), buf);
4821 value = o->value;
4822 if (flags)
4823 *flags = o->flags;
4824 }
4825
4826 *str = q;
4827 return value;
4828 }
4829
4830 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4831 for the option, or NULL. */
4832
4833 static const aarch64_sys_ins_reg *
4834 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4835 {
4836 char *p, *q;
4837 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4838 const aarch64_sys_ins_reg *o;
4839
4840 p = buf;
4841 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4842 if (p < buf + (sizeof (buf) - 1))
4843 *p++ = TOLOWER (*q);
4844 *p = '\0';
4845
4846 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4847 valid system register. This is enforced by construction of the hash
4848 table. */
4849 if (p - buf != q - *str)
4850 return NULL;
4851
4852 o = str_hash_find (sys_ins_regs, buf);
4853 if (!o)
4854 return NULL;
4855
4856 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4857 o->name, o->value, o->flags, 0))
4858 as_bad (_("selected processor does not support system register "
4859 "name '%s'"), buf);
4860 if (aarch64_sys_reg_deprecated_p (o->flags))
4861 as_warn (_("system register name '%s' is deprecated and may be "
4862 "removed in a future release"), buf);
4863
4864 *str = q;
4865 return o;
4866 }
4867 \f
4868 #define po_char_or_fail(chr) do { \
4869 if (! skip_past_char (&str, chr)) \
4870 goto failure; \
4871 } while (0)
4872
4873 #define po_reg_or_fail(regtype) do { \
4874 reg = aarch64_reg_parse (&str, regtype, NULL); \
4875 if (!reg) \
4876 goto failure; \
4877 } while (0)
4878
4879 #define po_int_fp_reg_or_fail(reg_type) do { \
4880 reg = parse_reg (&str); \
4881 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4882 { \
4883 set_expected_reg_error (reg_type, reg, 0); \
4884 goto failure; \
4885 } \
4886 info->reg.regno = reg->number; \
4887 info->qualifier = inherent_reg_qualifier (reg); \
4888 } while (0)
4889
4890 #define po_imm_nc_or_fail() do { \
4891 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4892 goto failure; \
4893 } while (0)
4894
4895 #define po_imm_or_fail(min, max) do { \
4896 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4897 goto failure; \
4898 if (val < min || val > max) \
4899 { \
4900 set_fatal_syntax_error (_("immediate value out of range "\
4901 #min " to "#max)); \
4902 goto failure; \
4903 } \
4904 } while (0)
4905
4906 #define po_enum_or_fail(array) do { \
4907 if (!parse_enum_string (&str, &val, array, \
4908 ARRAY_SIZE (array), imm_reg_type)) \
4909 goto failure; \
4910 } while (0)
4911
4912 #define po_misc_or_fail(expr) do { \
4913 if (!expr) \
4914 goto failure; \
4915 } while (0)
4916 \f
4917 /* encode the 12-bit imm field of Add/sub immediate */
4918 static inline uint32_t
4919 encode_addsub_imm (uint32_t imm)
4920 {
4921 return imm << 10;
4922 }
4923
4924 /* encode the shift amount field of Add/sub immediate */
4925 static inline uint32_t
4926 encode_addsub_imm_shift_amount (uint32_t cnt)
4927 {
4928 return cnt << 22;
4929 }
4930
4931
4932 /* encode the imm field of Adr instruction */
4933 static inline uint32_t
4934 encode_adr_imm (uint32_t imm)
4935 {
4936 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4937 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4938 }
4939
4940 /* encode the immediate field of Move wide immediate */
4941 static inline uint32_t
4942 encode_movw_imm (uint32_t imm)
4943 {
4944 return imm << 5;
4945 }
4946
4947 /* encode the 26-bit offset of unconditional branch */
4948 static inline uint32_t
4949 encode_branch_ofs_26 (uint32_t ofs)
4950 {
4951 return ofs & ((1 << 26) - 1);
4952 }
4953
4954 /* encode the 19-bit offset of conditional branch and compare & branch */
4955 static inline uint32_t
4956 encode_cond_branch_ofs_19 (uint32_t ofs)
4957 {
4958 return (ofs & ((1 << 19) - 1)) << 5;
4959 }
4960
4961 /* encode the 19-bit offset of ld literal */
4962 static inline uint32_t
4963 encode_ld_lit_ofs_19 (uint32_t ofs)
4964 {
4965 return (ofs & ((1 << 19) - 1)) << 5;
4966 }
4967
4968 /* Encode the 14-bit offset of test & branch. */
4969 static inline uint32_t
4970 encode_tst_branch_ofs_14 (uint32_t ofs)
4971 {
4972 return (ofs & ((1 << 14) - 1)) << 5;
4973 }
4974
4975 /* Encode the 16-bit imm field of svc/hvc/smc. */
4976 static inline uint32_t
4977 encode_svc_imm (uint32_t imm)
4978 {
4979 return imm << 5;
4980 }
4981
4982 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4983 static inline uint32_t
4984 reencode_addsub_switch_add_sub (uint32_t opcode)
4985 {
4986 return opcode ^ (1 << 30);
4987 }
4988
4989 static inline uint32_t
4990 reencode_movzn_to_movz (uint32_t opcode)
4991 {
4992 return opcode | (1 << 30);
4993 }
4994
4995 static inline uint32_t
4996 reencode_movzn_to_movn (uint32_t opcode)
4997 {
4998 return opcode & ~(1 << 30);
4999 }
5000
5001 /* Overall per-instruction processing. */
5002
5003 /* We need to be able to fix up arbitrary expressions in some statements.
5004 This is so that we can handle symbols that are an arbitrary distance from
5005 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5006 which returns part of an address in a form which will be valid for
5007 a data instruction. We do this by pushing the expression into a symbol
5008 in the expr_section, and creating a fix for that. */
5009
5010 static fixS *
5011 fix_new_aarch64 (fragS * frag,
5012 int where,
5013 short int size,
5014 expressionS * exp,
5015 int pc_rel,
5016 int reloc)
5017 {
5018 fixS *new_fix;
5019
5020 switch (exp->X_op)
5021 {
5022 case O_constant:
5023 case O_symbol:
5024 case O_add:
5025 case O_subtract:
5026 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5027 break;
5028
5029 default:
5030 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5031 pc_rel, reloc);
5032 break;
5033 }
5034 return new_fix;
5035 }
5036 \f
5037 /* Diagnostics on operands errors. */
5038
5039 /* By default, output verbose error message.
5040 Disable the verbose error message by -mno-verbose-error. */
5041 static int verbose_error_p = 1;
5042
5043 #ifdef DEBUG_AARCH64
5044 /* N.B. this is only for the purpose of debugging. */
5045 const char* operand_mismatch_kind_names[] =
5046 {
5047 "AARCH64_OPDE_NIL",
5048 "AARCH64_OPDE_RECOVERABLE",
5049 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5050 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5051 "AARCH64_OPDE_SYNTAX_ERROR",
5052 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5053 "AARCH64_OPDE_INVALID_VARIANT",
5054 "AARCH64_OPDE_OUT_OF_RANGE",
5055 "AARCH64_OPDE_UNALIGNED",
5056 "AARCH64_OPDE_REG_LIST",
5057 "AARCH64_OPDE_OTHER_ERROR",
5058 };
5059 #endif /* DEBUG_AARCH64 */
5060
5061 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5062
5063 When multiple errors of different kinds are found in the same assembly
5064 line, only the error of the highest severity will be picked up for
5065 issuing the diagnostics. */
5066
5067 static inline bool
5068 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5069 enum aarch64_operand_error_kind rhs)
5070 {
5071 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5072 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5073 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5074 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5075 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5076 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5077 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5078 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5079 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5080 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5081 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5082 return lhs > rhs;
5083 }
5084
5085 /* Helper routine to get the mnemonic name from the assembly instruction
5086 line; should only be called for the diagnosis purpose, as there is
5087 string copy operation involved, which may affect the runtime
5088 performance if used in elsewhere. */
5089
5090 static const char*
5091 get_mnemonic_name (const char *str)
5092 {
5093 static char mnemonic[32];
5094 char *ptr;
5095
5096 /* Get the first 15 bytes and assume that the full name is included. */
5097 strncpy (mnemonic, str, 31);
5098 mnemonic[31] = '\0';
5099
5100 /* Scan up to the end of the mnemonic, which must end in white space,
5101 '.', or end of string. */
5102 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5103 ;
5104
5105 *ptr = '\0';
5106
5107 /* Append '...' to the truncated long name. */
5108 if (ptr - mnemonic == 31)
5109 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5110
5111 return mnemonic;
5112 }
5113
5114 static void
5115 reset_aarch64_instruction (aarch64_instruction *instruction)
5116 {
5117 memset (instruction, '\0', sizeof (aarch64_instruction));
5118 instruction->reloc.type = BFD_RELOC_UNUSED;
5119 }
5120
5121 /* Data structures storing one user error in the assembly code related to
5122 operands. */
5123
5124 struct operand_error_record
5125 {
5126 const aarch64_opcode *opcode;
5127 aarch64_operand_error detail;
5128 struct operand_error_record *next;
5129 };
5130
5131 typedef struct operand_error_record operand_error_record;
5132
5133 struct operand_errors
5134 {
5135 operand_error_record *head;
5136 operand_error_record *tail;
5137 };
5138
5139 typedef struct operand_errors operand_errors;
5140
5141 /* Top-level data structure reporting user errors for the current line of
5142 the assembly code.
5143 The way md_assemble works is that all opcodes sharing the same mnemonic
5144 name are iterated to find a match to the assembly line. In this data
5145 structure, each of the such opcodes will have one operand_error_record
5146 allocated and inserted. In other words, excessive errors related with
5147 a single opcode are disregarded. */
5148 operand_errors operand_error_report;
5149
5150 /* Free record nodes. */
5151 static operand_error_record *free_opnd_error_record_nodes = NULL;
5152
5153 /* Initialize the data structure that stores the operand mismatch
5154 information on assembling one line of the assembly code. */
5155 static void
5156 init_operand_error_report (void)
5157 {
5158 if (operand_error_report.head != NULL)
5159 {
5160 gas_assert (operand_error_report.tail != NULL);
5161 operand_error_report.tail->next = free_opnd_error_record_nodes;
5162 free_opnd_error_record_nodes = operand_error_report.head;
5163 operand_error_report.head = NULL;
5164 operand_error_report.tail = NULL;
5165 return;
5166 }
5167 gas_assert (operand_error_report.tail == NULL);
5168 }
5169
5170 /* Return TRUE if some operand error has been recorded during the
5171 parsing of the current assembly line using the opcode *OPCODE;
5172 otherwise return FALSE. */
5173 static inline bool
5174 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5175 {
5176 operand_error_record *record = operand_error_report.head;
5177 return record && record->opcode == opcode;
5178 }
5179
5180 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5181 OPCODE field is initialized with OPCODE.
5182 N.B. only one record for each opcode, i.e. the maximum of one error is
5183 recorded for each instruction template. */
5184
5185 static void
5186 add_operand_error_record (const operand_error_record* new_record)
5187 {
5188 const aarch64_opcode *opcode = new_record->opcode;
5189 operand_error_record* record = operand_error_report.head;
5190
5191 /* The record may have been created for this opcode. If not, we need
5192 to prepare one. */
5193 if (! opcode_has_operand_error_p (opcode))
5194 {
5195 /* Get one empty record. */
5196 if (free_opnd_error_record_nodes == NULL)
5197 {
5198 record = XNEW (operand_error_record);
5199 }
5200 else
5201 {
5202 record = free_opnd_error_record_nodes;
5203 free_opnd_error_record_nodes = record->next;
5204 }
5205 record->opcode = opcode;
5206 /* Insert at the head. */
5207 record->next = operand_error_report.head;
5208 operand_error_report.head = record;
5209 if (operand_error_report.tail == NULL)
5210 operand_error_report.tail = record;
5211 }
5212 else if (record->detail.kind != AARCH64_OPDE_NIL
5213 && record->detail.index <= new_record->detail.index
5214 && operand_error_higher_severity_p (record->detail.kind,
5215 new_record->detail.kind))
5216 {
5217 /* In the case of multiple errors found on operands related with a
5218 single opcode, only record the error of the leftmost operand and
5219 only if the error is of higher severity. */
5220 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5221 " the existing error %s on operand %d",
5222 operand_mismatch_kind_names[new_record->detail.kind],
5223 new_record->detail.index,
5224 operand_mismatch_kind_names[record->detail.kind],
5225 record->detail.index);
5226 return;
5227 }
5228
5229 record->detail = new_record->detail;
5230 }
5231
5232 static inline void
5233 record_operand_error_info (const aarch64_opcode *opcode,
5234 aarch64_operand_error *error_info)
5235 {
5236 operand_error_record record;
5237 record.opcode = opcode;
5238 record.detail = *error_info;
5239 add_operand_error_record (&record);
5240 }
5241
5242 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5243 error message *ERROR, for operand IDX (count from 0). */
5244
5245 static void
5246 record_operand_error (const aarch64_opcode *opcode, int idx,
5247 enum aarch64_operand_error_kind kind,
5248 const char* error)
5249 {
5250 aarch64_operand_error info;
5251 memset(&info, 0, sizeof (info));
5252 info.index = idx;
5253 info.kind = kind;
5254 info.error = error;
5255 info.non_fatal = false;
5256 record_operand_error_info (opcode, &info);
5257 }
5258
5259 static void
5260 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5261 enum aarch64_operand_error_kind kind,
5262 const char* error, const int *extra_data)
5263 {
5264 aarch64_operand_error info;
5265 info.index = idx;
5266 info.kind = kind;
5267 info.error = error;
5268 info.data[0].i = extra_data[0];
5269 info.data[1].i = extra_data[1];
5270 info.data[2].i = extra_data[2];
5271 info.non_fatal = false;
5272 record_operand_error_info (opcode, &info);
5273 }
5274
5275 static void
5276 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5277 const char* error, int lower_bound,
5278 int upper_bound)
5279 {
5280 int data[3] = {lower_bound, upper_bound, 0};
5281 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5282 error, data);
5283 }
5284
5285 /* Remove the operand error record for *OPCODE. */
5286 static void ATTRIBUTE_UNUSED
5287 remove_operand_error_record (const aarch64_opcode *opcode)
5288 {
5289 if (opcode_has_operand_error_p (opcode))
5290 {
5291 operand_error_record* record = operand_error_report.head;
5292 gas_assert (record != NULL && operand_error_report.tail != NULL);
5293 operand_error_report.head = record->next;
5294 record->next = free_opnd_error_record_nodes;
5295 free_opnd_error_record_nodes = record;
5296 if (operand_error_report.head == NULL)
5297 {
5298 gas_assert (operand_error_report.tail == record);
5299 operand_error_report.tail = NULL;
5300 }
5301 }
5302 }
5303
5304 /* Given the instruction in *INSTR, return the index of the best matched
5305 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5306
5307 Return -1 if there is no qualifier sequence; return the first match
5308 if there is multiple matches found. */
5309
5310 static int
5311 find_best_match (const aarch64_inst *instr,
5312 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5313 {
5314 int i, num_opnds, max_num_matched, idx;
5315
5316 num_opnds = aarch64_num_of_operands (instr->opcode);
5317 if (num_opnds == 0)
5318 {
5319 DEBUG_TRACE ("no operand");
5320 return -1;
5321 }
5322
5323 max_num_matched = 0;
5324 idx = 0;
5325
5326 /* For each pattern. */
5327 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5328 {
5329 int j, num_matched;
5330 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5331
5332 /* Most opcodes has much fewer patterns in the list. */
5333 if (empty_qualifier_sequence_p (qualifiers))
5334 {
5335 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5336 break;
5337 }
5338
5339 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5340 if (*qualifiers == instr->operands[j].qualifier)
5341 ++num_matched;
5342
5343 if (num_matched > max_num_matched)
5344 {
5345 max_num_matched = num_matched;
5346 idx = i;
5347 }
5348 }
5349
5350 DEBUG_TRACE ("return with %d", idx);
5351 return idx;
5352 }
5353
5354 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5355 corresponding operands in *INSTR. */
5356
5357 static inline void
5358 assign_qualifier_sequence (aarch64_inst *instr,
5359 const aarch64_opnd_qualifier_t *qualifiers)
5360 {
5361 int i = 0;
5362 int num_opnds = aarch64_num_of_operands (instr->opcode);
5363 gas_assert (num_opnds);
5364 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5365 instr->operands[i].qualifier = *qualifiers;
5366 }
5367
5368 /* Callback used by aarch64_print_operand to apply STYLE to the
5369 disassembler output created from FMT and ARGS. The STYLER object holds
5370 any required state. Must return a pointer to a string (created from FMT
5371 and ARGS) that will continue to be valid until the complete disassembled
5372 instruction has been printed.
5373
5374 We don't currently add any styling to the output of the disassembler as
5375 used within assembler error messages, and so STYLE is ignored here. A
5376 new string is allocated on the obstack help within STYLER and returned
5377 to the caller. */
5378
5379 static const char *aarch64_apply_style
5380 (struct aarch64_styler *styler,
5381 enum disassembler_style style ATTRIBUTE_UNUSED,
5382 const char *fmt, va_list args)
5383 {
5384 int res;
5385 char *ptr;
5386 struct obstack *stack = (struct obstack *) styler->state;
5387 va_list ap;
5388
5389 /* Calculate the required space. */
5390 va_copy (ap, args);
5391 res = vsnprintf (NULL, 0, fmt, ap);
5392 va_end (ap);
5393 gas_assert (res >= 0);
5394
5395 /* Allocate space on the obstack and format the result. */
5396 ptr = (char *) obstack_alloc (stack, res + 1);
5397 res = vsnprintf (ptr, (res + 1), fmt, args);
5398 gas_assert (res >= 0);
5399
5400 return ptr;
5401 }
5402
5403 /* Print operands for the diagnosis purpose. */
5404
5405 static void
5406 print_operands (char *buf, const aarch64_opcode *opcode,
5407 const aarch64_opnd_info *opnds)
5408 {
5409 int i;
5410 struct aarch64_styler styler;
5411 struct obstack content;
5412 obstack_init (&content);
5413
5414 styler.apply_style = aarch64_apply_style;
5415 styler.state = (void *) &content;
5416
5417 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5418 {
5419 char str[128];
5420 char cmt[128];
5421
5422 /* We regard the opcode operand info more, however we also look into
5423 the inst->operands to support the disassembling of the optional
5424 operand.
5425 The two operand code should be the same in all cases, apart from
5426 when the operand can be optional. */
5427 if (opcode->operands[i] == AARCH64_OPND_NIL
5428 || opnds[i].type == AARCH64_OPND_NIL)
5429 break;
5430
5431 /* Generate the operand string in STR. */
5432 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5433 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5434
5435 /* Delimiter. */
5436 if (str[0] != '\0')
5437 strcat (buf, i == 0 ? " " : ", ");
5438
5439 /* Append the operand string. */
5440 strcat (buf, str);
5441
5442 /* Append a comment. This works because only the last operand ever
5443 adds a comment. If that ever changes then we'll need to be
5444 smarter here. */
5445 if (cmt[0] != '\0')
5446 {
5447 strcat (buf, "\t// ");
5448 strcat (buf, cmt);
5449 }
5450 }
5451
5452 obstack_free (&content, NULL);
5453 }
5454
5455 /* Send to stderr a string as information. */
5456
5457 static void
5458 output_info (const char *format, ...)
5459 {
5460 const char *file;
5461 unsigned int line;
5462 va_list args;
5463
5464 file = as_where (&line);
5465 if (file)
5466 {
5467 if (line != 0)
5468 fprintf (stderr, "%s:%u: ", file, line);
5469 else
5470 fprintf (stderr, "%s: ", file);
5471 }
5472 fprintf (stderr, _("Info: "));
5473 va_start (args, format);
5474 vfprintf (stderr, format, args);
5475 va_end (args);
5476 (void) putc ('\n', stderr);
5477 }
5478
5479 /* See if the AARCH64_OPDE_SYNTAX_ERROR error described by DETAIL
5480 relates to registers or register lists. If so, return a string that
5481 reports the error against "operand %d", otherwise return null. */
5482
5483 static const char *
5484 get_reg_error_message (const aarch64_operand_error *detail)
5485 {
5486 /* Handle the case where we found a register that was expected
5487 to be in a register list outside of a register list. */
5488 if ((detail->data[1].i & detail->data[2].i) != 0
5489 && (detail->data[1].i & SEF_IN_REGLIST) == 0)
5490 return _("missing braces at operand %d");
5491
5492 /* If some opcodes expected a register, and we found a register,
5493 complain about the difference. */
5494 if (detail->data[2].i)
5495 {
5496 unsigned int expected = (detail->data[1].i & SEF_IN_REGLIST
5497 ? detail->data[1].i & ~SEF_IN_REGLIST
5498 : detail->data[0].i & ~SEF_DEFAULT_ERROR);
5499 const char *msg = get_reg_expected_msg (expected, detail->data[2].i);
5500 if (!msg)
5501 msg = N_("unexpected register type at operand %d");
5502 return msg;
5503 }
5504
5505 /* Handle the case where we got to the point of trying to parse a
5506 register within a register list, but didn't find a known register. */
5507 if (detail->data[1].i & SEF_IN_REGLIST)
5508 {
5509 unsigned int expected = detail->data[1].i & ~SEF_IN_REGLIST;
5510 const char *msg = get_reg_expected_msg (expected, 0);
5511 if (!msg)
5512 msg = _("invalid register list at operand %d");
5513 return msg;
5514 }
5515
5516 /* Punt if register-related problems weren't the only errors. */
5517 if (detail->data[0].i & SEF_DEFAULT_ERROR)
5518 return NULL;
5519
5520 /* Handle the case where the only acceptable things are registers. */
5521 if (detail->data[1].i == 0)
5522 {
5523 const char *msg = get_reg_expected_msg (detail->data[0].i, 0);
5524 if (!msg)
5525 msg = _("expected a register at operand %d");
5526 return msg;
5527 }
5528
5529 /* Handle the case where the only acceptable things are register lists,
5530 and there was no opening '{'. */
5531 if (detail->data[0].i == 0)
5532 return _("expected '{' at operand %d");
5533
5534 return _("expected a register or register list at operand %d");
5535 }
5536
5537 /* Output one operand error record. */
5538
5539 static void
5540 output_operand_error_record (const operand_error_record *record, char *str)
5541 {
5542 const aarch64_operand_error *detail = &record->detail;
5543 int idx = detail->index;
5544 const aarch64_opcode *opcode = record->opcode;
5545 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5546 : AARCH64_OPND_NIL);
5547
5548 typedef void (*handler_t)(const char *format, ...);
5549 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5550 const char *msg = detail->error;
5551
5552 switch (detail->kind)
5553 {
5554 case AARCH64_OPDE_NIL:
5555 gas_assert (0);
5556 break;
5557
5558 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5559 handler (_("this `%s' should have an immediately preceding `%s'"
5560 " -- `%s'"),
5561 detail->data[0].s, detail->data[1].s, str);
5562 break;
5563
5564 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5565 handler (_("the preceding `%s' should be followed by `%s` rather"
5566 " than `%s` -- `%s'"),
5567 detail->data[1].s, detail->data[0].s, opcode->name, str);
5568 break;
5569
5570 case AARCH64_OPDE_SYNTAX_ERROR:
5571 if (!msg && idx >= 0)
5572 {
5573 msg = get_reg_error_message (detail);
5574 if (msg)
5575 {
5576 char *full_msg = xasprintf (msg, idx + 1);
5577 handler (_("%s -- `%s'"), full_msg, str);
5578 free (full_msg);
5579 break;
5580 }
5581 }
5582 /* Fall through. */
5583
5584 case AARCH64_OPDE_RECOVERABLE:
5585 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5586 case AARCH64_OPDE_OTHER_ERROR:
5587 /* Use the prepared error message if there is, otherwise use the
5588 operand description string to describe the error. */
5589 if (msg != NULL)
5590 {
5591 if (idx < 0)
5592 handler (_("%s -- `%s'"), msg, str);
5593 else
5594 handler (_("%s at operand %d -- `%s'"),
5595 msg, idx + 1, str);
5596 }
5597 else
5598 {
5599 gas_assert (idx >= 0);
5600 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5601 aarch64_get_operand_desc (opd_code), str);
5602 }
5603 break;
5604
5605 case AARCH64_OPDE_INVALID_VARIANT:
5606 handler (_("operand mismatch -- `%s'"), str);
5607 if (verbose_error_p)
5608 {
5609 /* We will try to correct the erroneous instruction and also provide
5610 more information e.g. all other valid variants.
5611
5612 The string representation of the corrected instruction and other
5613 valid variants are generated by
5614
5615 1) obtaining the intermediate representation of the erroneous
5616 instruction;
5617 2) manipulating the IR, e.g. replacing the operand qualifier;
5618 3) printing out the instruction by calling the printer functions
5619 shared with the disassembler.
5620
5621 The limitation of this method is that the exact input assembly
5622 line cannot be accurately reproduced in some cases, for example an
5623 optional operand present in the actual assembly line will be
5624 omitted in the output; likewise for the optional syntax rules,
5625 e.g. the # before the immediate. Another limitation is that the
5626 assembly symbols and relocation operations in the assembly line
5627 currently cannot be printed out in the error report. Last but not
5628 least, when there is other error(s) co-exist with this error, the
5629 'corrected' instruction may be still incorrect, e.g. given
5630 'ldnp h0,h1,[x0,#6]!'
5631 this diagnosis will provide the version:
5632 'ldnp s0,s1,[x0,#6]!'
5633 which is still not right. */
5634 size_t len = strlen (get_mnemonic_name (str));
5635 int i, qlf_idx;
5636 bool result;
5637 char buf[2048];
5638 aarch64_inst *inst_base = &inst.base;
5639 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5640
5641 /* Init inst. */
5642 reset_aarch64_instruction (&inst);
5643 inst_base->opcode = opcode;
5644
5645 /* Reset the error report so that there is no side effect on the
5646 following operand parsing. */
5647 init_operand_error_report ();
5648
5649 /* Fill inst. */
5650 result = parse_operands (str + len, opcode)
5651 && programmer_friendly_fixup (&inst);
5652 gas_assert (result);
5653 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5654 NULL, NULL, insn_sequence);
5655 gas_assert (!result);
5656
5657 /* Find the most matched qualifier sequence. */
5658 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5659 gas_assert (qlf_idx > -1);
5660
5661 /* Assign the qualifiers. */
5662 assign_qualifier_sequence (inst_base,
5663 opcode->qualifiers_list[qlf_idx]);
5664
5665 /* Print the hint. */
5666 output_info (_(" did you mean this?"));
5667 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5668 print_operands (buf, opcode, inst_base->operands);
5669 output_info (_(" %s"), buf);
5670
5671 /* Print out other variant(s) if there is any. */
5672 if (qlf_idx != 0 ||
5673 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5674 output_info (_(" other valid variant(s):"));
5675
5676 /* For each pattern. */
5677 qualifiers_list = opcode->qualifiers_list;
5678 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5679 {
5680 /* Most opcodes has much fewer patterns in the list.
5681 First NIL qualifier indicates the end in the list. */
5682 if (empty_qualifier_sequence_p (*qualifiers_list))
5683 break;
5684
5685 if (i != qlf_idx)
5686 {
5687 /* Mnemonics name. */
5688 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5689
5690 /* Assign the qualifiers. */
5691 assign_qualifier_sequence (inst_base, *qualifiers_list);
5692
5693 /* Print instruction. */
5694 print_operands (buf, opcode, inst_base->operands);
5695
5696 output_info (_(" %s"), buf);
5697 }
5698 }
5699 }
5700 break;
5701
5702 case AARCH64_OPDE_UNTIED_IMMS:
5703 handler (_("operand %d must have the same immediate value "
5704 "as operand 1 -- `%s'"),
5705 detail->index + 1, str);
5706 break;
5707
5708 case AARCH64_OPDE_UNTIED_OPERAND:
5709 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5710 detail->index + 1, str);
5711 break;
5712
5713 case AARCH64_OPDE_OUT_OF_RANGE:
5714 if (detail->data[0].i != detail->data[1].i)
5715 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5716 msg ? msg : _("immediate value"),
5717 detail->data[0].i, detail->data[1].i, idx + 1, str);
5718 else
5719 handler (_("%s must be %d at operand %d -- `%s'"),
5720 msg ? msg : _("immediate value"),
5721 detail->data[0].i, idx + 1, str);
5722 break;
5723
5724 case AARCH64_OPDE_REG_LIST:
5725 if (detail->data[0].i == 1)
5726 handler (_("invalid number of registers in the list; "
5727 "only 1 register is expected at operand %d -- `%s'"),
5728 idx + 1, str);
5729 else
5730 handler (_("invalid number of registers in the list; "
5731 "%d registers are expected at operand %d -- `%s'"),
5732 detail->data[0].i, idx + 1, str);
5733 break;
5734
5735 case AARCH64_OPDE_UNALIGNED:
5736 handler (_("immediate value must be a multiple of "
5737 "%d at operand %d -- `%s'"),
5738 detail->data[0].i, idx + 1, str);
5739 break;
5740
5741 default:
5742 gas_assert (0);
5743 break;
5744 }
5745 }
5746
5747 /* Process and output the error message about the operand mismatching.
5748
5749 When this function is called, the operand error information had
5750 been collected for an assembly line and there will be multiple
5751 errors in the case of multiple instruction templates; output the
5752 error message that most closely describes the problem.
5753
5754 The errors to be printed can be filtered on printing all errors
5755 or only non-fatal errors. This distinction has to be made because
5756 the error buffer may already be filled with fatal errors we don't want to
5757 print due to the different instruction templates. */
5758
5759 static void
5760 output_operand_error_report (char *str, bool non_fatal_only)
5761 {
5762 enum aarch64_operand_error_kind kind;
5763 operand_error_record *curr;
5764 operand_error_record *head = operand_error_report.head;
5765 operand_error_record *record = NULL;
5766
5767 /* No error to report. */
5768 if (head == NULL)
5769 return;
5770
5771 gas_assert (head != NULL && operand_error_report.tail != NULL);
5772
5773 /* Only one error. */
5774 if (head == operand_error_report.tail)
5775 {
5776 /* If the only error is a non-fatal one and we don't want to print it,
5777 just exit. */
5778 if (!non_fatal_only || head->detail.non_fatal)
5779 {
5780 DEBUG_TRACE ("single opcode entry with error kind: %s",
5781 operand_mismatch_kind_names[head->detail.kind]);
5782 output_operand_error_record (head, str);
5783 }
5784 return;
5785 }
5786
5787 /* Find the error kind of the highest severity. */
5788 DEBUG_TRACE ("multiple opcode entries with error kind");
5789 kind = AARCH64_OPDE_NIL;
5790 for (curr = head; curr != NULL; curr = curr->next)
5791 {
5792 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5793 if (curr->detail.kind == AARCH64_OPDE_SYNTAX_ERROR)
5794 {
5795 DEBUG_TRACE ("\t%s [%x, %x, %x]",
5796 operand_mismatch_kind_names[curr->detail.kind],
5797 curr->detail.data[0].i, curr->detail.data[1].i,
5798 curr->detail.data[2].i);
5799 }
5800 else
5801 {
5802 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5803 }
5804 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5805 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5806 kind = curr->detail.kind;
5807 }
5808
5809 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5810
5811 /* Pick up one of errors of KIND to report. */
5812 for (curr = head; curr != NULL; curr = curr->next)
5813 {
5814 /* If we don't want to print non-fatal errors then don't consider them
5815 at all. */
5816 if (curr->detail.kind != kind
5817 || (non_fatal_only && !curr->detail.non_fatal))
5818 continue;
5819 /* If there are multiple errors, pick up the one with the highest
5820 mismatching operand index. In the case of multiple errors with
5821 the equally highest operand index, pick up the first one or the
5822 first one with non-NULL error message. */
5823 if (!record || curr->detail.index > record->detail.index)
5824 record = curr;
5825 else if (curr->detail.index == record->detail.index
5826 && !record->detail.error)
5827 {
5828 if (curr->detail.error)
5829 record = curr;
5830 else if (kind == AARCH64_OPDE_SYNTAX_ERROR)
5831 {
5832 record->detail.data[0].i |= curr->detail.data[0].i;
5833 record->detail.data[1].i |= curr->detail.data[1].i;
5834 record->detail.data[2].i |= curr->detail.data[2].i;
5835 DEBUG_TRACE ("\t--> %s [%x, %x, %x]",
5836 operand_mismatch_kind_names[kind],
5837 curr->detail.data[0].i, curr->detail.data[1].i,
5838 curr->detail.data[2].i);
5839 }
5840 }
5841 }
5842
5843 /* The way errors are collected in the back-end is a bit non-intuitive. But
5844 essentially, because each operand template is tried recursively you may
5845 always have errors collected from the previous tried OPND. These are
5846 usually skipped if there is one successful match. However now with the
5847 non-fatal errors we have to ignore those previously collected hard errors
5848 when we're only interested in printing the non-fatal ones. This condition
5849 prevents us from printing errors that are not appropriate, since we did
5850 match a condition, but it also has warnings that it wants to print. */
5851 if (non_fatal_only && !record)
5852 return;
5853
5854 gas_assert (record);
5855 DEBUG_TRACE ("Pick up error kind %s to report",
5856 operand_mismatch_kind_names[kind]);
5857
5858 /* Output. */
5859 output_operand_error_record (record, str);
5860 }
5861 \f
5862 /* Write an AARCH64 instruction to buf - always little-endian. */
5863 static void
5864 put_aarch64_insn (char *buf, uint32_t insn)
5865 {
5866 unsigned char *where = (unsigned char *) buf;
5867 where[0] = insn;
5868 where[1] = insn >> 8;
5869 where[2] = insn >> 16;
5870 where[3] = insn >> 24;
5871 }
5872
5873 static uint32_t
5874 get_aarch64_insn (char *buf)
5875 {
5876 unsigned char *where = (unsigned char *) buf;
5877 uint32_t result;
5878 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5879 | ((uint32_t) where[3] << 24)));
5880 return result;
5881 }
5882
5883 static void
5884 output_inst (struct aarch64_inst *new_inst)
5885 {
5886 char *to = NULL;
5887
5888 to = frag_more (INSN_SIZE);
5889
5890 frag_now->tc_frag_data.recorded = 1;
5891
5892 put_aarch64_insn (to, inst.base.value);
5893
5894 if (inst.reloc.type != BFD_RELOC_UNUSED)
5895 {
5896 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5897 INSN_SIZE, &inst.reloc.exp,
5898 inst.reloc.pc_rel,
5899 inst.reloc.type);
5900 DEBUG_TRACE ("Prepared relocation fix up");
5901 /* Don't check the addend value against the instruction size,
5902 that's the job of our code in md_apply_fix(). */
5903 fixp->fx_no_overflow = 1;
5904 if (new_inst != NULL)
5905 fixp->tc_fix_data.inst = new_inst;
5906 if (aarch64_gas_internal_fixup_p ())
5907 {
5908 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5909 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5910 fixp->fx_addnumber = inst.reloc.flags;
5911 }
5912 }
5913
5914 dwarf2_emit_insn (INSN_SIZE);
5915 }
5916
5917 /* Link together opcodes of the same name. */
5918
5919 struct templates
5920 {
5921 const aarch64_opcode *opcode;
5922 struct templates *next;
5923 };
5924
5925 typedef struct templates templates;
5926
5927 static templates *
5928 lookup_mnemonic (const char *start, int len)
5929 {
5930 templates *templ = NULL;
5931
5932 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5933 return templ;
5934 }
5935
5936 /* Subroutine of md_assemble, responsible for looking up the primary
5937 opcode from the mnemonic the user wrote. BASE points to the beginning
5938 of the mnemonic, DOT points to the first '.' within the mnemonic
5939 (if any) and END points to the end of the mnemonic. */
5940
5941 static templates *
5942 opcode_lookup (char *base, char *dot, char *end)
5943 {
5944 const aarch64_cond *cond;
5945 char condname[16];
5946 int len;
5947
5948 if (dot == end)
5949 return 0;
5950
5951 inst.cond = COND_ALWAYS;
5952
5953 /* Handle a possible condition. */
5954 if (dot)
5955 {
5956 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5957 if (!cond)
5958 return 0;
5959 inst.cond = cond->value;
5960 len = dot - base;
5961 }
5962 else
5963 len = end - base;
5964
5965 if (inst.cond == COND_ALWAYS)
5966 {
5967 /* Look for unaffixed mnemonic. */
5968 return lookup_mnemonic (base, len);
5969 }
5970 else if (len <= 13)
5971 {
5972 /* append ".c" to mnemonic if conditional */
5973 memcpy (condname, base, len);
5974 memcpy (condname + len, ".c", 2);
5975 base = condname;
5976 len += 2;
5977 return lookup_mnemonic (base, len);
5978 }
5979
5980 return NULL;
5981 }
5982
5983 /* Process an optional operand that is found omitted from the assembly line.
5984 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5985 instruction's opcode entry while IDX is the index of this omitted operand.
5986 */
5987
5988 static void
5989 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5990 int idx, aarch64_opnd_info *operand)
5991 {
5992 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5993 gas_assert (optional_operand_p (opcode, idx));
5994 gas_assert (!operand->present);
5995
5996 switch (type)
5997 {
5998 case AARCH64_OPND_Rd:
5999 case AARCH64_OPND_Rn:
6000 case AARCH64_OPND_Rm:
6001 case AARCH64_OPND_Rt:
6002 case AARCH64_OPND_Rt2:
6003 case AARCH64_OPND_Rt_LS64:
6004 case AARCH64_OPND_Rt_SP:
6005 case AARCH64_OPND_Rs:
6006 case AARCH64_OPND_Ra:
6007 case AARCH64_OPND_Rt_SYS:
6008 case AARCH64_OPND_Rd_SP:
6009 case AARCH64_OPND_Rn_SP:
6010 case AARCH64_OPND_Rm_SP:
6011 case AARCH64_OPND_Fd:
6012 case AARCH64_OPND_Fn:
6013 case AARCH64_OPND_Fm:
6014 case AARCH64_OPND_Fa:
6015 case AARCH64_OPND_Ft:
6016 case AARCH64_OPND_Ft2:
6017 case AARCH64_OPND_Sd:
6018 case AARCH64_OPND_Sn:
6019 case AARCH64_OPND_Sm:
6020 case AARCH64_OPND_Va:
6021 case AARCH64_OPND_Vd:
6022 case AARCH64_OPND_Vn:
6023 case AARCH64_OPND_Vm:
6024 case AARCH64_OPND_VdD1:
6025 case AARCH64_OPND_VnD1:
6026 operand->reg.regno = default_value;
6027 break;
6028
6029 case AARCH64_OPND_Ed:
6030 case AARCH64_OPND_En:
6031 case AARCH64_OPND_Em:
6032 case AARCH64_OPND_Em16:
6033 case AARCH64_OPND_SM3_IMM2:
6034 operand->reglane.regno = default_value;
6035 break;
6036
6037 case AARCH64_OPND_IDX:
6038 case AARCH64_OPND_BIT_NUM:
6039 case AARCH64_OPND_IMMR:
6040 case AARCH64_OPND_IMMS:
6041 case AARCH64_OPND_SHLL_IMM:
6042 case AARCH64_OPND_IMM_VLSL:
6043 case AARCH64_OPND_IMM_VLSR:
6044 case AARCH64_OPND_CCMP_IMM:
6045 case AARCH64_OPND_FBITS:
6046 case AARCH64_OPND_UIMM4:
6047 case AARCH64_OPND_UIMM3_OP1:
6048 case AARCH64_OPND_UIMM3_OP2:
6049 case AARCH64_OPND_IMM:
6050 case AARCH64_OPND_IMM_2:
6051 case AARCH64_OPND_WIDTH:
6052 case AARCH64_OPND_UIMM7:
6053 case AARCH64_OPND_NZCV:
6054 case AARCH64_OPND_SVE_PATTERN:
6055 case AARCH64_OPND_SVE_PRFOP:
6056 operand->imm.value = default_value;
6057 break;
6058
6059 case AARCH64_OPND_SVE_PATTERN_SCALED:
6060 operand->imm.value = default_value;
6061 operand->shifter.kind = AARCH64_MOD_MUL;
6062 operand->shifter.amount = 1;
6063 break;
6064
6065 case AARCH64_OPND_EXCEPTION:
6066 inst.reloc.type = BFD_RELOC_UNUSED;
6067 break;
6068
6069 case AARCH64_OPND_BARRIER_ISB:
6070 operand->barrier = aarch64_barrier_options + default_value;
6071 break;
6072
6073 case AARCH64_OPND_BTI_TARGET:
6074 operand->hint_option = aarch64_hint_options + default_value;
6075 break;
6076
6077 default:
6078 break;
6079 }
6080 }
6081
6082 /* Process the relocation type for move wide instructions.
6083 Return TRUE on success; otherwise return FALSE. */
6084
6085 static bool
6086 process_movw_reloc_info (void)
6087 {
6088 int is32;
6089 unsigned shift;
6090
6091 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6092
6093 if (inst.base.opcode->op == OP_MOVK)
6094 switch (inst.reloc.type)
6095 {
6096 case BFD_RELOC_AARCH64_MOVW_G0_S:
6097 case BFD_RELOC_AARCH64_MOVW_G1_S:
6098 case BFD_RELOC_AARCH64_MOVW_G2_S:
6099 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6100 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6101 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6102 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6103 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6104 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6105 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6106 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6107 set_syntax_error
6108 (_("the specified relocation type is not allowed for MOVK"));
6109 return false;
6110 default:
6111 break;
6112 }
6113
6114 switch (inst.reloc.type)
6115 {
6116 case BFD_RELOC_AARCH64_MOVW_G0:
6117 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6118 case BFD_RELOC_AARCH64_MOVW_G0_S:
6119 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6120 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6121 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6122 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6123 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6124 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6125 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6126 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6127 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6128 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6129 shift = 0;
6130 break;
6131 case BFD_RELOC_AARCH64_MOVW_G1:
6132 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6133 case BFD_RELOC_AARCH64_MOVW_G1_S:
6134 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6135 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6136 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6137 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6138 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6139 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6140 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6141 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6142 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6143 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6144 shift = 16;
6145 break;
6146 case BFD_RELOC_AARCH64_MOVW_G2:
6147 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6148 case BFD_RELOC_AARCH64_MOVW_G2_S:
6149 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6150 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6151 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6152 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6153 if (is32)
6154 {
6155 set_fatal_syntax_error
6156 (_("the specified relocation type is not allowed for 32-bit "
6157 "register"));
6158 return false;
6159 }
6160 shift = 32;
6161 break;
6162 case BFD_RELOC_AARCH64_MOVW_G3:
6163 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6164 if (is32)
6165 {
6166 set_fatal_syntax_error
6167 (_("the specified relocation type is not allowed for 32-bit "
6168 "register"));
6169 return false;
6170 }
6171 shift = 48;
6172 break;
6173 default:
6174 /* More cases should be added when more MOVW-related relocation types
6175 are supported in GAS. */
6176 gas_assert (aarch64_gas_internal_fixup_p ());
6177 /* The shift amount should have already been set by the parser. */
6178 return true;
6179 }
6180 inst.base.operands[1].shifter.amount = shift;
6181 return true;
6182 }
6183
6184 /* A primitive log calculator. */
6185
6186 static inline unsigned int
6187 get_logsz (unsigned int size)
6188 {
6189 const unsigned char ls[16] =
6190 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6191 if (size > 16)
6192 {
6193 gas_assert (0);
6194 return -1;
6195 }
6196 gas_assert (ls[size - 1] != (unsigned char)-1);
6197 return ls[size - 1];
6198 }
6199
6200 /* Determine and return the real reloc type code for an instruction
6201 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6202
6203 static inline bfd_reloc_code_real_type
6204 ldst_lo12_determine_real_reloc_type (void)
6205 {
6206 unsigned logsz, max_logsz;
6207 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6208 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6209
6210 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6211 {
6212 BFD_RELOC_AARCH64_LDST8_LO12,
6213 BFD_RELOC_AARCH64_LDST16_LO12,
6214 BFD_RELOC_AARCH64_LDST32_LO12,
6215 BFD_RELOC_AARCH64_LDST64_LO12,
6216 BFD_RELOC_AARCH64_LDST128_LO12
6217 },
6218 {
6219 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6220 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6221 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6222 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6223 BFD_RELOC_AARCH64_NONE
6224 },
6225 {
6226 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6227 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6228 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6229 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6230 BFD_RELOC_AARCH64_NONE
6231 },
6232 {
6233 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6234 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6235 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6236 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6237 BFD_RELOC_AARCH64_NONE
6238 },
6239 {
6240 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6241 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6242 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6243 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6244 BFD_RELOC_AARCH64_NONE
6245 }
6246 };
6247
6248 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6249 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6250 || (inst.reloc.type
6251 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6252 || (inst.reloc.type
6253 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6254 || (inst.reloc.type
6255 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6256 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6257
6258 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6259 opd1_qlf =
6260 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6261 1, opd0_qlf, 0);
6262 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6263
6264 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6265
6266 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6267 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6268 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6269 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6270 max_logsz = 3;
6271 else
6272 max_logsz = 4;
6273
6274 if (logsz > max_logsz)
6275 {
6276 /* SEE PR 27904 for an example of this. */
6277 set_fatal_syntax_error
6278 (_("relocation qualifier does not match instruction size"));
6279 return BFD_RELOC_AARCH64_NONE;
6280 }
6281
6282 /* In reloc.c, these pseudo relocation types should be defined in similar
6283 order as above reloc_ldst_lo12 array. Because the array index calculation
6284 below relies on this. */
6285 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6286 }
6287
6288 /* Check whether a register list REGINFO is valid. The registers must be
6289 numbered in increasing order (modulo 32), in increments of one or two.
6290
6291 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6292 increments of two.
6293
6294 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6295
6296 static bool
6297 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6298 {
6299 uint32_t i, nb_regs, prev_regno, incr;
6300
6301 nb_regs = 1 + (reginfo & 0x3);
6302 reginfo >>= 2;
6303 prev_regno = reginfo & 0x1f;
6304 incr = accept_alternate ? 2 : 1;
6305
6306 for (i = 1; i < nb_regs; ++i)
6307 {
6308 uint32_t curr_regno;
6309 reginfo >>= 5;
6310 curr_regno = reginfo & 0x1f;
6311 if (curr_regno != ((prev_regno + incr) & 0x1f))
6312 return false;
6313 prev_regno = curr_regno;
6314 }
6315
6316 return true;
6317 }
6318
6319 /* Generic instruction operand parser. This does no encoding and no
6320 semantic validation; it merely squirrels values away in the inst
6321 structure. Returns TRUE or FALSE depending on whether the
6322 specified grammar matched. */
6323
6324 static bool
6325 parse_operands (char *str, const aarch64_opcode *opcode)
6326 {
6327 int i;
6328 char *backtrack_pos = 0;
6329 const enum aarch64_opnd *operands = opcode->operands;
6330 aarch64_reg_type imm_reg_type;
6331
6332 clear_error ();
6333 skip_whitespace (str);
6334
6335 if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
6336 AARCH64_FEATURE_SVE
6337 | AARCH64_FEATURE_SVE2))
6338 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6339 else
6340 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6341
6342 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6343 {
6344 int64_t val;
6345 const reg_entry *reg;
6346 int comma_skipped_p = 0;
6347 struct vector_type_el vectype;
6348 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6349 aarch64_opnd_info *info = &inst.base.operands[i];
6350 aarch64_reg_type reg_type;
6351
6352 DEBUG_TRACE ("parse operand %d", i);
6353
6354 /* Assign the operand code. */
6355 info->type = operands[i];
6356
6357 if (optional_operand_p (opcode, i))
6358 {
6359 /* Remember where we are in case we need to backtrack. */
6360 gas_assert (!backtrack_pos);
6361 backtrack_pos = str;
6362 }
6363
6364 /* Expect comma between operands; the backtrack mechanism will take
6365 care of cases of omitted optional operand. */
6366 if (i > 0 && ! skip_past_char (&str, ','))
6367 {
6368 set_syntax_error (_("comma expected between operands"));
6369 goto failure;
6370 }
6371 else
6372 comma_skipped_p = 1;
6373
6374 switch (operands[i])
6375 {
6376 case AARCH64_OPND_Rd:
6377 case AARCH64_OPND_Rn:
6378 case AARCH64_OPND_Rm:
6379 case AARCH64_OPND_Rt:
6380 case AARCH64_OPND_Rt2:
6381 case AARCH64_OPND_Rs:
6382 case AARCH64_OPND_Ra:
6383 case AARCH64_OPND_Rt_LS64:
6384 case AARCH64_OPND_Rt_SYS:
6385 case AARCH64_OPND_PAIRREG:
6386 case AARCH64_OPND_SVE_Rm:
6387 po_int_fp_reg_or_fail (REG_TYPE_R_Z);
6388
6389 /* In LS64 load/store instructions Rt register number must be even
6390 and <=22. */
6391 if (operands[i] == AARCH64_OPND_Rt_LS64)
6392 {
6393 /* We've already checked if this is valid register.
6394 This will check if register number (Rt) is not undefined for LS64
6395 instructions:
6396 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6397 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6398 {
6399 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6400 goto failure;
6401 }
6402 }
6403 break;
6404
6405 case AARCH64_OPND_Rd_SP:
6406 case AARCH64_OPND_Rn_SP:
6407 case AARCH64_OPND_Rt_SP:
6408 case AARCH64_OPND_SVE_Rn_SP:
6409 case AARCH64_OPND_Rm_SP:
6410 po_int_fp_reg_or_fail (REG_TYPE_R_SP);
6411 break;
6412
6413 case AARCH64_OPND_Rm_EXT:
6414 case AARCH64_OPND_Rm_SFT:
6415 po_misc_or_fail (parse_shifter_operand
6416 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6417 ? SHIFTED_ARITH_IMM
6418 : SHIFTED_LOGIC_IMM)));
6419 if (!info->shifter.operator_present)
6420 {
6421 /* Default to LSL if not present. Libopcodes prefers shifter
6422 kind to be explicit. */
6423 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6424 info->shifter.kind = AARCH64_MOD_LSL;
6425 /* For Rm_EXT, libopcodes will carry out further check on whether
6426 or not stack pointer is used in the instruction (Recall that
6427 "the extend operator is not optional unless at least one of
6428 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6429 }
6430 break;
6431
6432 case AARCH64_OPND_Fd:
6433 case AARCH64_OPND_Fn:
6434 case AARCH64_OPND_Fm:
6435 case AARCH64_OPND_Fa:
6436 case AARCH64_OPND_Ft:
6437 case AARCH64_OPND_Ft2:
6438 case AARCH64_OPND_Sd:
6439 case AARCH64_OPND_Sn:
6440 case AARCH64_OPND_Sm:
6441 case AARCH64_OPND_SVE_VZn:
6442 case AARCH64_OPND_SVE_Vd:
6443 case AARCH64_OPND_SVE_Vm:
6444 case AARCH64_OPND_SVE_Vn:
6445 po_int_fp_reg_or_fail (REG_TYPE_BHSDQ);
6446 break;
6447
6448 case AARCH64_OPND_SVE_Pd:
6449 case AARCH64_OPND_SVE_Pg3:
6450 case AARCH64_OPND_SVE_Pg4_5:
6451 case AARCH64_OPND_SVE_Pg4_10:
6452 case AARCH64_OPND_SVE_Pg4_16:
6453 case AARCH64_OPND_SVE_Pm:
6454 case AARCH64_OPND_SVE_Pn:
6455 case AARCH64_OPND_SVE_Pt:
6456 case AARCH64_OPND_SME_Pm:
6457 reg_type = REG_TYPE_PN;
6458 goto vector_reg;
6459
6460 case AARCH64_OPND_SVE_Za_5:
6461 case AARCH64_OPND_SVE_Za_16:
6462 case AARCH64_OPND_SVE_Zd:
6463 case AARCH64_OPND_SVE_Zm_5:
6464 case AARCH64_OPND_SVE_Zm_16:
6465 case AARCH64_OPND_SVE_Zn:
6466 case AARCH64_OPND_SVE_Zt:
6467 reg_type = REG_TYPE_ZN;
6468 goto vector_reg;
6469
6470 case AARCH64_OPND_Va:
6471 case AARCH64_OPND_Vd:
6472 case AARCH64_OPND_Vn:
6473 case AARCH64_OPND_Vm:
6474 reg_type = REG_TYPE_VN;
6475 vector_reg:
6476 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6477 if (!reg)
6478 goto failure;
6479 if (vectype.defined & NTA_HASINDEX)
6480 goto failure;
6481
6482 info->reg.regno = reg->number;
6483 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6484 && vectype.type == NT_invtype)
6485 /* Unqualified Pn and Zn registers are allowed in certain
6486 contexts. Rely on F_STRICT qualifier checking to catch
6487 invalid uses. */
6488 info->qualifier = AARCH64_OPND_QLF_NIL;
6489 else
6490 {
6491 info->qualifier = vectype_to_qualifier (&vectype);
6492 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6493 goto failure;
6494 }
6495 break;
6496
6497 case AARCH64_OPND_VdD1:
6498 case AARCH64_OPND_VnD1:
6499 reg = aarch64_reg_parse (&str, REG_TYPE_VN, &vectype);
6500 if (!reg)
6501 goto failure;
6502 if (vectype.type != NT_d || vectype.index != 1)
6503 {
6504 set_fatal_syntax_error
6505 (_("the top half of a 128-bit FP/SIMD register is expected"));
6506 goto failure;
6507 }
6508 info->reg.regno = reg->number;
6509 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6510 here; it is correct for the purpose of encoding/decoding since
6511 only the register number is explicitly encoded in the related
6512 instructions, although this appears a bit hacky. */
6513 info->qualifier = AARCH64_OPND_QLF_S_D;
6514 break;
6515
6516 case AARCH64_OPND_SVE_Zm3_INDEX:
6517 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6518 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6519 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6520 case AARCH64_OPND_SVE_Zm4_INDEX:
6521 case AARCH64_OPND_SVE_Zn_INDEX:
6522 reg_type = REG_TYPE_ZN;
6523 goto vector_reg_index;
6524
6525 case AARCH64_OPND_Ed:
6526 case AARCH64_OPND_En:
6527 case AARCH64_OPND_Em:
6528 case AARCH64_OPND_Em16:
6529 case AARCH64_OPND_SM3_IMM2:
6530 reg_type = REG_TYPE_VN;
6531 vector_reg_index:
6532 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6533 if (!reg)
6534 goto failure;
6535 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6536 goto failure;
6537
6538 info->reglane.regno = reg->number;
6539 info->reglane.index = vectype.index;
6540 info->qualifier = vectype_to_qualifier (&vectype);
6541 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6542 goto failure;
6543 break;
6544
6545 case AARCH64_OPND_SVE_ZnxN:
6546 case AARCH64_OPND_SVE_ZtxN:
6547 reg_type = REG_TYPE_ZN;
6548 goto vector_reg_list;
6549
6550 case AARCH64_OPND_LVn:
6551 case AARCH64_OPND_LVt:
6552 case AARCH64_OPND_LVt_AL:
6553 case AARCH64_OPND_LEt:
6554 reg_type = REG_TYPE_VN;
6555 vector_reg_list:
6556 if (reg_type == REG_TYPE_ZN
6557 && get_opcode_dependent_value (opcode) == 1
6558 && *str != '{')
6559 {
6560 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6561 if (!reg)
6562 goto failure;
6563 info->reglist.first_regno = reg->number;
6564 info->reglist.num_regs = 1;
6565 }
6566 else
6567 {
6568 val = parse_vector_reg_list (&str, reg_type, &vectype);
6569 if (val == PARSE_FAIL)
6570 goto failure;
6571
6572 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6573 {
6574 set_fatal_syntax_error (_("invalid register list"));
6575 goto failure;
6576 }
6577
6578 if (vectype.width != 0 && *str != ',')
6579 {
6580 set_fatal_syntax_error
6581 (_("expected element type rather than vector type"));
6582 goto failure;
6583 }
6584
6585 info->reglist.first_regno = (val >> 2) & 0x1f;
6586 info->reglist.num_regs = (val & 0x3) + 1;
6587 }
6588 if (operands[i] == AARCH64_OPND_LEt)
6589 {
6590 if (!(vectype.defined & NTA_HASINDEX))
6591 goto failure;
6592 info->reglist.has_index = 1;
6593 info->reglist.index = vectype.index;
6594 }
6595 else
6596 {
6597 if (vectype.defined & NTA_HASINDEX)
6598 goto failure;
6599 if (!(vectype.defined & NTA_HASTYPE))
6600 {
6601 if (reg_type == REG_TYPE_ZN)
6602 set_fatal_syntax_error (_("missing type suffix"));
6603 goto failure;
6604 }
6605 }
6606 info->qualifier = vectype_to_qualifier (&vectype);
6607 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6608 goto failure;
6609 break;
6610
6611 case AARCH64_OPND_CRn:
6612 case AARCH64_OPND_CRm:
6613 {
6614 char prefix = *(str++);
6615 if (prefix != 'c' && prefix != 'C')
6616 goto failure;
6617
6618 po_imm_nc_or_fail ();
6619 if (val > 15)
6620 {
6621 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6622 goto failure;
6623 }
6624 info->qualifier = AARCH64_OPND_QLF_CR;
6625 info->imm.value = val;
6626 break;
6627 }
6628
6629 case AARCH64_OPND_SHLL_IMM:
6630 case AARCH64_OPND_IMM_VLSR:
6631 po_imm_or_fail (1, 64);
6632 info->imm.value = val;
6633 break;
6634
6635 case AARCH64_OPND_CCMP_IMM:
6636 case AARCH64_OPND_SIMM5:
6637 case AARCH64_OPND_FBITS:
6638 case AARCH64_OPND_TME_UIMM16:
6639 case AARCH64_OPND_UIMM4:
6640 case AARCH64_OPND_UIMM4_ADDG:
6641 case AARCH64_OPND_UIMM10:
6642 case AARCH64_OPND_UIMM3_OP1:
6643 case AARCH64_OPND_UIMM3_OP2:
6644 case AARCH64_OPND_IMM_VLSL:
6645 case AARCH64_OPND_IMM:
6646 case AARCH64_OPND_IMM_2:
6647 case AARCH64_OPND_WIDTH:
6648 case AARCH64_OPND_SVE_INV_LIMM:
6649 case AARCH64_OPND_SVE_LIMM:
6650 case AARCH64_OPND_SVE_LIMM_MOV:
6651 case AARCH64_OPND_SVE_SHLIMM_PRED:
6652 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6653 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6654 case AARCH64_OPND_SVE_SHRIMM_PRED:
6655 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6656 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6657 case AARCH64_OPND_SVE_SIMM5:
6658 case AARCH64_OPND_SVE_SIMM5B:
6659 case AARCH64_OPND_SVE_SIMM6:
6660 case AARCH64_OPND_SVE_SIMM8:
6661 case AARCH64_OPND_SVE_UIMM3:
6662 case AARCH64_OPND_SVE_UIMM7:
6663 case AARCH64_OPND_SVE_UIMM8:
6664 case AARCH64_OPND_SVE_UIMM8_53:
6665 case AARCH64_OPND_IMM_ROT1:
6666 case AARCH64_OPND_IMM_ROT2:
6667 case AARCH64_OPND_IMM_ROT3:
6668 case AARCH64_OPND_SVE_IMM_ROT1:
6669 case AARCH64_OPND_SVE_IMM_ROT2:
6670 case AARCH64_OPND_SVE_IMM_ROT3:
6671 case AARCH64_OPND_CSSC_SIMM8:
6672 case AARCH64_OPND_CSSC_UIMM8:
6673 po_imm_nc_or_fail ();
6674 info->imm.value = val;
6675 break;
6676
6677 case AARCH64_OPND_SVE_AIMM:
6678 case AARCH64_OPND_SVE_ASIMM:
6679 po_imm_nc_or_fail ();
6680 info->imm.value = val;
6681 skip_whitespace (str);
6682 if (skip_past_comma (&str))
6683 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6684 else
6685 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6686 break;
6687
6688 case AARCH64_OPND_SVE_PATTERN:
6689 po_enum_or_fail (aarch64_sve_pattern_array);
6690 info->imm.value = val;
6691 break;
6692
6693 case AARCH64_OPND_SVE_PATTERN_SCALED:
6694 po_enum_or_fail (aarch64_sve_pattern_array);
6695 info->imm.value = val;
6696 if (skip_past_comma (&str)
6697 && !parse_shift (&str, info, SHIFTED_MUL))
6698 goto failure;
6699 if (!info->shifter.operator_present)
6700 {
6701 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6702 info->shifter.kind = AARCH64_MOD_MUL;
6703 info->shifter.amount = 1;
6704 }
6705 break;
6706
6707 case AARCH64_OPND_SVE_PRFOP:
6708 po_enum_or_fail (aarch64_sve_prfop_array);
6709 info->imm.value = val;
6710 break;
6711
6712 case AARCH64_OPND_UIMM7:
6713 po_imm_or_fail (0, 127);
6714 info->imm.value = val;
6715 break;
6716
6717 case AARCH64_OPND_IDX:
6718 case AARCH64_OPND_MASK:
6719 case AARCH64_OPND_BIT_NUM:
6720 case AARCH64_OPND_IMMR:
6721 case AARCH64_OPND_IMMS:
6722 po_imm_or_fail (0, 63);
6723 info->imm.value = val;
6724 break;
6725
6726 case AARCH64_OPND_IMM0:
6727 po_imm_nc_or_fail ();
6728 if (val != 0)
6729 {
6730 set_fatal_syntax_error (_("immediate zero expected"));
6731 goto failure;
6732 }
6733 info->imm.value = 0;
6734 break;
6735
6736 case AARCH64_OPND_FPIMM0:
6737 {
6738 int qfloat;
6739 bool res1 = false, res2 = false;
6740 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6741 it is probably not worth the effort to support it. */
6742 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6743 imm_reg_type))
6744 && (error_p ()
6745 || !(res2 = parse_constant_immediate (&str, &val,
6746 imm_reg_type))))
6747 goto failure;
6748 if ((res1 && qfloat == 0) || (res2 && val == 0))
6749 {
6750 info->imm.value = 0;
6751 info->imm.is_fp = 1;
6752 break;
6753 }
6754 set_fatal_syntax_error (_("immediate zero expected"));
6755 goto failure;
6756 }
6757
6758 case AARCH64_OPND_IMM_MOV:
6759 {
6760 char *saved = str;
6761 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6762 reg_name_p (str, REG_TYPE_VN))
6763 goto failure;
6764 str = saved;
6765 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6766 GE_OPT_PREFIX, REJECT_ABSENT));
6767 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6768 later. fix_mov_imm_insn will try to determine a machine
6769 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6770 message if the immediate cannot be moved by a single
6771 instruction. */
6772 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6773 inst.base.operands[i].skip = 1;
6774 }
6775 break;
6776
6777 case AARCH64_OPND_SIMD_IMM:
6778 case AARCH64_OPND_SIMD_IMM_SFT:
6779 if (! parse_big_immediate (&str, &val, imm_reg_type))
6780 goto failure;
6781 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6782 /* addr_off_p */ 0,
6783 /* need_libopcodes_p */ 1,
6784 /* skip_p */ 1);
6785 /* Parse shift.
6786 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6787 shift, we don't check it here; we leave the checking to
6788 the libopcodes (operand_general_constraint_met_p). By
6789 doing this, we achieve better diagnostics. */
6790 if (skip_past_comma (&str)
6791 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6792 goto failure;
6793 if (!info->shifter.operator_present
6794 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6795 {
6796 /* Default to LSL if not present. Libopcodes prefers shifter
6797 kind to be explicit. */
6798 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6799 info->shifter.kind = AARCH64_MOD_LSL;
6800 }
6801 break;
6802
6803 case AARCH64_OPND_FPIMM:
6804 case AARCH64_OPND_SIMD_FPIMM:
6805 case AARCH64_OPND_SVE_FPIMM8:
6806 {
6807 int qfloat;
6808 bool dp_p;
6809
6810 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6811 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6812 || !aarch64_imm_float_p (qfloat))
6813 {
6814 if (!error_p ())
6815 set_fatal_syntax_error (_("invalid floating-point"
6816 " constant"));
6817 goto failure;
6818 }
6819 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6820 inst.base.operands[i].imm.is_fp = 1;
6821 }
6822 break;
6823
6824 case AARCH64_OPND_SVE_I1_HALF_ONE:
6825 case AARCH64_OPND_SVE_I1_HALF_TWO:
6826 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6827 {
6828 int qfloat;
6829 bool dp_p;
6830
6831 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6832 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6833 {
6834 if (!error_p ())
6835 set_fatal_syntax_error (_("invalid floating-point"
6836 " constant"));
6837 goto failure;
6838 }
6839 inst.base.operands[i].imm.value = qfloat;
6840 inst.base.operands[i].imm.is_fp = 1;
6841 }
6842 break;
6843
6844 case AARCH64_OPND_LIMM:
6845 po_misc_or_fail (parse_shifter_operand (&str, info,
6846 SHIFTED_LOGIC_IMM));
6847 if (info->shifter.operator_present)
6848 {
6849 set_fatal_syntax_error
6850 (_("shift not allowed for bitmask immediate"));
6851 goto failure;
6852 }
6853 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6854 /* addr_off_p */ 0,
6855 /* need_libopcodes_p */ 1,
6856 /* skip_p */ 1);
6857 break;
6858
6859 case AARCH64_OPND_AIMM:
6860 if (opcode->op == OP_ADD)
6861 /* ADD may have relocation types. */
6862 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6863 SHIFTED_ARITH_IMM));
6864 else
6865 po_misc_or_fail (parse_shifter_operand (&str, info,
6866 SHIFTED_ARITH_IMM));
6867 switch (inst.reloc.type)
6868 {
6869 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6870 info->shifter.amount = 12;
6871 break;
6872 case BFD_RELOC_UNUSED:
6873 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6874 if (info->shifter.kind != AARCH64_MOD_NONE)
6875 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6876 inst.reloc.pc_rel = 0;
6877 break;
6878 default:
6879 break;
6880 }
6881 info->imm.value = 0;
6882 if (!info->shifter.operator_present)
6883 {
6884 /* Default to LSL if not present. Libopcodes prefers shifter
6885 kind to be explicit. */
6886 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6887 info->shifter.kind = AARCH64_MOD_LSL;
6888 }
6889 break;
6890
6891 case AARCH64_OPND_HALF:
6892 {
6893 /* #<imm16> or relocation. */
6894 int internal_fixup_p;
6895 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6896 if (internal_fixup_p)
6897 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6898 skip_whitespace (str);
6899 if (skip_past_comma (&str))
6900 {
6901 /* {, LSL #<shift>} */
6902 if (! aarch64_gas_internal_fixup_p ())
6903 {
6904 set_fatal_syntax_error (_("can't mix relocation modifier "
6905 "with explicit shift"));
6906 goto failure;
6907 }
6908 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6909 }
6910 else
6911 inst.base.operands[i].shifter.amount = 0;
6912 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6913 inst.base.operands[i].imm.value = 0;
6914 if (! process_movw_reloc_info ())
6915 goto failure;
6916 }
6917 break;
6918
6919 case AARCH64_OPND_EXCEPTION:
6920 case AARCH64_OPND_UNDEFINED:
6921 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6922 imm_reg_type));
6923 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6924 /* addr_off_p */ 0,
6925 /* need_libopcodes_p */ 0,
6926 /* skip_p */ 1);
6927 break;
6928
6929 case AARCH64_OPND_NZCV:
6930 {
6931 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6932 if (nzcv != NULL)
6933 {
6934 str += 4;
6935 info->imm.value = nzcv->value;
6936 break;
6937 }
6938 po_imm_or_fail (0, 15);
6939 info->imm.value = val;
6940 }
6941 break;
6942
6943 case AARCH64_OPND_COND:
6944 case AARCH64_OPND_COND1:
6945 {
6946 char *start = str;
6947 do
6948 str++;
6949 while (ISALPHA (*str));
6950 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6951 if (info->cond == NULL)
6952 {
6953 set_syntax_error (_("invalid condition"));
6954 goto failure;
6955 }
6956 else if (operands[i] == AARCH64_OPND_COND1
6957 && (info->cond->value & 0xe) == 0xe)
6958 {
6959 /* Do not allow AL or NV. */
6960 set_default_error ();
6961 goto failure;
6962 }
6963 }
6964 break;
6965
6966 case AARCH64_OPND_ADDR_ADRP:
6967 po_misc_or_fail (parse_adrp (&str));
6968 /* Clear the value as operand needs to be relocated. */
6969 info->imm.value = 0;
6970 break;
6971
6972 case AARCH64_OPND_ADDR_PCREL14:
6973 case AARCH64_OPND_ADDR_PCREL19:
6974 case AARCH64_OPND_ADDR_PCREL21:
6975 case AARCH64_OPND_ADDR_PCREL26:
6976 po_misc_or_fail (parse_address (&str, info));
6977 if (!info->addr.pcrel)
6978 {
6979 set_syntax_error (_("invalid pc-relative address"));
6980 goto failure;
6981 }
6982 if (inst.gen_lit_pool
6983 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6984 {
6985 /* Only permit "=value" in the literal load instructions.
6986 The literal will be generated by programmer_friendly_fixup. */
6987 set_syntax_error (_("invalid use of \"=immediate\""));
6988 goto failure;
6989 }
6990 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6991 {
6992 set_syntax_error (_("unrecognized relocation suffix"));
6993 goto failure;
6994 }
6995 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6996 {
6997 info->imm.value = inst.reloc.exp.X_add_number;
6998 inst.reloc.type = BFD_RELOC_UNUSED;
6999 }
7000 else
7001 {
7002 info->imm.value = 0;
7003 if (inst.reloc.type == BFD_RELOC_UNUSED)
7004 switch (opcode->iclass)
7005 {
7006 case compbranch:
7007 case condbranch:
7008 /* e.g. CBZ or B.COND */
7009 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7010 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7011 break;
7012 case testbranch:
7013 /* e.g. TBZ */
7014 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7015 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7016 break;
7017 case branch_imm:
7018 /* e.g. B or BL */
7019 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7020 inst.reloc.type =
7021 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7022 : BFD_RELOC_AARCH64_JUMP26;
7023 break;
7024 case loadlit:
7025 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7026 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7027 break;
7028 case pcreladdr:
7029 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7030 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7031 break;
7032 default:
7033 gas_assert (0);
7034 abort ();
7035 }
7036 inst.reloc.pc_rel = 1;
7037 }
7038 break;
7039
7040 case AARCH64_OPND_ADDR_SIMPLE:
7041 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7042 {
7043 /* [<Xn|SP>{, #<simm>}] */
7044 char *start = str;
7045 /* First use the normal address-parsing routines, to get
7046 the usual syntax errors. */
7047 po_misc_or_fail (parse_address (&str, info));
7048 if (info->addr.pcrel || info->addr.offset.is_reg
7049 || !info->addr.preind || info->addr.postind
7050 || info->addr.writeback)
7051 {
7052 set_syntax_error (_("invalid addressing mode"));
7053 goto failure;
7054 }
7055
7056 /* Then retry, matching the specific syntax of these addresses. */
7057 str = start;
7058 po_char_or_fail ('[');
7059 po_reg_or_fail (REG_TYPE_R64_SP);
7060 /* Accept optional ", #0". */
7061 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7062 && skip_past_char (&str, ','))
7063 {
7064 skip_past_char (&str, '#');
7065 if (! skip_past_char (&str, '0'))
7066 {
7067 set_fatal_syntax_error
7068 (_("the optional immediate offset can only be 0"));
7069 goto failure;
7070 }
7071 }
7072 po_char_or_fail (']');
7073 break;
7074 }
7075
7076 case AARCH64_OPND_ADDR_REGOFF:
7077 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7078 po_misc_or_fail (parse_address (&str, info));
7079 regoff_addr:
7080 if (info->addr.pcrel || !info->addr.offset.is_reg
7081 || !info->addr.preind || info->addr.postind
7082 || info->addr.writeback)
7083 {
7084 set_syntax_error (_("invalid addressing mode"));
7085 goto failure;
7086 }
7087 if (!info->shifter.operator_present)
7088 {
7089 /* Default to LSL if not present. Libopcodes prefers shifter
7090 kind to be explicit. */
7091 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7092 info->shifter.kind = AARCH64_MOD_LSL;
7093 }
7094 /* Qualifier to be deduced by libopcodes. */
7095 break;
7096
7097 case AARCH64_OPND_ADDR_SIMM7:
7098 po_misc_or_fail (parse_address (&str, info));
7099 if (info->addr.pcrel || info->addr.offset.is_reg
7100 || (!info->addr.preind && !info->addr.postind))
7101 {
7102 set_syntax_error (_("invalid addressing mode"));
7103 goto failure;
7104 }
7105 if (inst.reloc.type != BFD_RELOC_UNUSED)
7106 {
7107 set_syntax_error (_("relocation not allowed"));
7108 goto failure;
7109 }
7110 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7111 /* addr_off_p */ 1,
7112 /* need_libopcodes_p */ 1,
7113 /* skip_p */ 0);
7114 break;
7115
7116 case AARCH64_OPND_ADDR_SIMM9:
7117 case AARCH64_OPND_ADDR_SIMM9_2:
7118 case AARCH64_OPND_ADDR_SIMM11:
7119 case AARCH64_OPND_ADDR_SIMM13:
7120 po_misc_or_fail (parse_address (&str, info));
7121 if (info->addr.pcrel || info->addr.offset.is_reg
7122 || (!info->addr.preind && !info->addr.postind)
7123 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7124 && info->addr.writeback))
7125 {
7126 set_syntax_error (_("invalid addressing mode"));
7127 goto failure;
7128 }
7129 if (inst.reloc.type != BFD_RELOC_UNUSED)
7130 {
7131 set_syntax_error (_("relocation not allowed"));
7132 goto failure;
7133 }
7134 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7135 /* addr_off_p */ 1,
7136 /* need_libopcodes_p */ 1,
7137 /* skip_p */ 0);
7138 break;
7139
7140 case AARCH64_OPND_ADDR_SIMM10:
7141 case AARCH64_OPND_ADDR_OFFSET:
7142 po_misc_or_fail (parse_address (&str, info));
7143 if (info->addr.pcrel || info->addr.offset.is_reg
7144 || !info->addr.preind || info->addr.postind)
7145 {
7146 set_syntax_error (_("invalid addressing mode"));
7147 goto failure;
7148 }
7149 if (inst.reloc.type != BFD_RELOC_UNUSED)
7150 {
7151 set_syntax_error (_("relocation not allowed"));
7152 goto failure;
7153 }
7154 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7155 /* addr_off_p */ 1,
7156 /* need_libopcodes_p */ 1,
7157 /* skip_p */ 0);
7158 break;
7159
7160 case AARCH64_OPND_ADDR_UIMM12:
7161 po_misc_or_fail (parse_address (&str, info));
7162 if (info->addr.pcrel || info->addr.offset.is_reg
7163 || !info->addr.preind || info->addr.writeback)
7164 {
7165 set_syntax_error (_("invalid addressing mode"));
7166 goto failure;
7167 }
7168 if (inst.reloc.type == BFD_RELOC_UNUSED)
7169 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7170 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7171 || (inst.reloc.type
7172 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7173 || (inst.reloc.type
7174 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7175 || (inst.reloc.type
7176 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7177 || (inst.reloc.type
7178 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7179 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7180 /* Leave qualifier to be determined by libopcodes. */
7181 break;
7182
7183 case AARCH64_OPND_SIMD_ADDR_POST:
7184 /* [<Xn|SP>], <Xm|#<amount>> */
7185 po_misc_or_fail (parse_address (&str, info));
7186 if (!info->addr.postind || !info->addr.writeback)
7187 {
7188 set_syntax_error (_("invalid addressing mode"));
7189 goto failure;
7190 }
7191 if (!info->addr.offset.is_reg)
7192 {
7193 if (inst.reloc.exp.X_op == O_constant)
7194 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7195 else
7196 {
7197 set_fatal_syntax_error
7198 (_("writeback value must be an immediate constant"));
7199 goto failure;
7200 }
7201 }
7202 /* No qualifier. */
7203 break;
7204
7205 case AARCH64_OPND_SME_SM_ZA:
7206 /* { SM | ZA } */
7207 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7208 {
7209 set_syntax_error (_("unknown or missing PSTATE field name"));
7210 goto failure;
7211 }
7212 info->reg.regno = val;
7213 break;
7214
7215 case AARCH64_OPND_SME_PnT_Wm_imm:
7216 if (!parse_dual_indexed_reg (&str, REG_TYPE_PN,
7217 &info->indexed_za, &qualifier, 0))
7218 goto failure;
7219 info->qualifier = qualifier;
7220 break;
7221
7222 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7223 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7224 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7225 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7226 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7227 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7228 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7229 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7230 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7231 case AARCH64_OPND_SVE_ADDR_RI_U6:
7232 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7233 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7234 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7235 /* [X<n>{, #imm, MUL VL}]
7236 [X<n>{, #imm}]
7237 but recognizing SVE registers. */
7238 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7239 &offset_qualifier));
7240 if (base_qualifier != AARCH64_OPND_QLF_X)
7241 {
7242 set_syntax_error (_("invalid addressing mode"));
7243 goto failure;
7244 }
7245 sve_regimm:
7246 if (info->addr.pcrel || info->addr.offset.is_reg
7247 || !info->addr.preind || info->addr.writeback)
7248 {
7249 set_syntax_error (_("invalid addressing mode"));
7250 goto failure;
7251 }
7252 if (inst.reloc.type != BFD_RELOC_UNUSED
7253 || inst.reloc.exp.X_op != O_constant)
7254 {
7255 /* Make sure this has priority over
7256 "invalid addressing mode". */
7257 set_fatal_syntax_error (_("constant offset required"));
7258 goto failure;
7259 }
7260 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7261 break;
7262
7263 case AARCH64_OPND_SVE_ADDR_R:
7264 /* [<Xn|SP>{, <R><m>}]
7265 but recognizing SVE registers. */
7266 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7267 &offset_qualifier));
7268 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7269 {
7270 offset_qualifier = AARCH64_OPND_QLF_X;
7271 info->addr.offset.is_reg = 1;
7272 info->addr.offset.regno = 31;
7273 }
7274 else if (base_qualifier != AARCH64_OPND_QLF_X
7275 || offset_qualifier != AARCH64_OPND_QLF_X)
7276 {
7277 set_syntax_error (_("invalid addressing mode"));
7278 goto failure;
7279 }
7280 goto regoff_addr;
7281
7282 case AARCH64_OPND_SVE_ADDR_RR:
7283 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7284 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7285 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7286 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7287 case AARCH64_OPND_SVE_ADDR_RX:
7288 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7289 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7290 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7291 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7292 but recognizing SVE registers. */
7293 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7294 &offset_qualifier));
7295 if (base_qualifier != AARCH64_OPND_QLF_X
7296 || offset_qualifier != AARCH64_OPND_QLF_X)
7297 {
7298 set_syntax_error (_("invalid addressing mode"));
7299 goto failure;
7300 }
7301 goto regoff_addr;
7302
7303 case AARCH64_OPND_SVE_ADDR_RZ:
7304 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7305 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7306 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7307 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7308 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7309 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7310 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7311 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7312 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7313 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7314 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7315 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7316 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7317 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7318 &offset_qualifier));
7319 if (base_qualifier != AARCH64_OPND_QLF_X
7320 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7321 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7322 {
7323 set_syntax_error (_("invalid addressing mode"));
7324 goto failure;
7325 }
7326 info->qualifier = offset_qualifier;
7327 goto regoff_addr;
7328
7329 case AARCH64_OPND_SVE_ADDR_ZX:
7330 /* [Zn.<T>{, <Xm>}]. */
7331 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7332 &offset_qualifier));
7333 /* Things to check:
7334 base_qualifier either S_S or S_D
7335 offset_qualifier must be X
7336 */
7337 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7338 && base_qualifier != AARCH64_OPND_QLF_S_D)
7339 || offset_qualifier != AARCH64_OPND_QLF_X)
7340 {
7341 set_syntax_error (_("invalid addressing mode"));
7342 goto failure;
7343 }
7344 info->qualifier = base_qualifier;
7345 if (!info->addr.offset.is_reg || info->addr.pcrel
7346 || !info->addr.preind || info->addr.writeback
7347 || info->shifter.operator_present != 0)
7348 {
7349 set_syntax_error (_("invalid addressing mode"));
7350 goto failure;
7351 }
7352 info->shifter.kind = AARCH64_MOD_LSL;
7353 break;
7354
7355
7356 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7357 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7358 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7359 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7360 /* [Z<n>.<T>{, #imm}] */
7361 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7362 &offset_qualifier));
7363 if (base_qualifier != AARCH64_OPND_QLF_S_S
7364 && base_qualifier != AARCH64_OPND_QLF_S_D)
7365 {
7366 set_syntax_error (_("invalid addressing mode"));
7367 goto failure;
7368 }
7369 info->qualifier = base_qualifier;
7370 goto sve_regimm;
7371
7372 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7373 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7374 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7375 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7376 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7377
7378 We don't reject:
7379
7380 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7381
7382 here since we get better error messages by leaving it to
7383 the qualifier checking routines. */
7384 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7385 &offset_qualifier));
7386 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7387 && base_qualifier != AARCH64_OPND_QLF_S_D)
7388 || offset_qualifier != base_qualifier)
7389 {
7390 set_syntax_error (_("invalid addressing mode"));
7391 goto failure;
7392 }
7393 info->qualifier = base_qualifier;
7394 goto regoff_addr;
7395
7396 case AARCH64_OPND_SYSREG:
7397 {
7398 uint32_t sysreg_flags;
7399 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7400 &sysreg_flags)) == PARSE_FAIL)
7401 {
7402 set_syntax_error (_("unknown or missing system register name"));
7403 goto failure;
7404 }
7405 inst.base.operands[i].sysreg.value = val;
7406 inst.base.operands[i].sysreg.flags = sysreg_flags;
7407 break;
7408 }
7409
7410 case AARCH64_OPND_PSTATEFIELD:
7411 {
7412 uint32_t sysreg_flags;
7413 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7414 &sysreg_flags)) == PARSE_FAIL)
7415 {
7416 set_syntax_error (_("unknown or missing PSTATE field name"));
7417 goto failure;
7418 }
7419 inst.base.operands[i].pstatefield = val;
7420 inst.base.operands[i].sysreg.flags = sysreg_flags;
7421 break;
7422 }
7423
7424 case AARCH64_OPND_SYSREG_IC:
7425 inst.base.operands[i].sysins_op =
7426 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7427 goto sys_reg_ins;
7428
7429 case AARCH64_OPND_SYSREG_DC:
7430 inst.base.operands[i].sysins_op =
7431 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7432 goto sys_reg_ins;
7433
7434 case AARCH64_OPND_SYSREG_AT:
7435 inst.base.operands[i].sysins_op =
7436 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7437 goto sys_reg_ins;
7438
7439 case AARCH64_OPND_SYSREG_SR:
7440 inst.base.operands[i].sysins_op =
7441 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7442 goto sys_reg_ins;
7443
7444 case AARCH64_OPND_SYSREG_TLBI:
7445 inst.base.operands[i].sysins_op =
7446 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7447 sys_reg_ins:
7448 if (inst.base.operands[i].sysins_op == NULL)
7449 {
7450 set_fatal_syntax_error ( _("unknown or missing operation name"));
7451 goto failure;
7452 }
7453 break;
7454
7455 case AARCH64_OPND_BARRIER:
7456 case AARCH64_OPND_BARRIER_ISB:
7457 val = parse_barrier (&str);
7458 if (val != PARSE_FAIL
7459 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7460 {
7461 /* ISB only accepts options name 'sy'. */
7462 set_syntax_error
7463 (_("the specified option is not accepted in ISB"));
7464 /* Turn off backtrack as this optional operand is present. */
7465 backtrack_pos = 0;
7466 goto failure;
7467 }
7468 if (val != PARSE_FAIL
7469 && operands[i] == AARCH64_OPND_BARRIER)
7470 {
7471 /* Regular barriers accept options CRm (C0-C15).
7472 DSB nXS barrier variant accepts values > 15. */
7473 if (val < 0 || val > 15)
7474 {
7475 set_syntax_error (_("the specified option is not accepted in DSB"));
7476 goto failure;
7477 }
7478 }
7479 /* This is an extension to accept a 0..15 immediate. */
7480 if (val == PARSE_FAIL)
7481 po_imm_or_fail (0, 15);
7482 info->barrier = aarch64_barrier_options + val;
7483 break;
7484
7485 case AARCH64_OPND_BARRIER_DSB_NXS:
7486 val = parse_barrier (&str);
7487 if (val != PARSE_FAIL)
7488 {
7489 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7490 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7491 {
7492 set_syntax_error (_("the specified option is not accepted in DSB"));
7493 /* Turn off backtrack as this optional operand is present. */
7494 backtrack_pos = 0;
7495 goto failure;
7496 }
7497 }
7498 else
7499 {
7500 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7501 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7502 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7503 goto failure;
7504 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7505 {
7506 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7507 goto failure;
7508 }
7509 }
7510 /* Option index is encoded as 2-bit value in val<3:2>. */
7511 val = (val >> 2) - 4;
7512 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7513 break;
7514
7515 case AARCH64_OPND_PRFOP:
7516 val = parse_pldop (&str);
7517 /* This is an extension to accept a 0..31 immediate. */
7518 if (val == PARSE_FAIL)
7519 po_imm_or_fail (0, 31);
7520 inst.base.operands[i].prfop = aarch64_prfops + val;
7521 break;
7522
7523 case AARCH64_OPND_BARRIER_PSB:
7524 val = parse_barrier_psb (&str, &(info->hint_option));
7525 if (val == PARSE_FAIL)
7526 goto failure;
7527 break;
7528
7529 case AARCH64_OPND_BTI_TARGET:
7530 val = parse_bti_operand (&str, &(info->hint_option));
7531 if (val == PARSE_FAIL)
7532 goto failure;
7533 break;
7534
7535 case AARCH64_OPND_SME_ZAda_2b:
7536 case AARCH64_OPND_SME_ZAda_3b:
7537 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier, 0);
7538 if (!reg)
7539 goto failure;
7540 info->reg.regno = reg->number;
7541 info->qualifier = qualifier;
7542 break;
7543
7544 case AARCH64_OPND_SME_ZA_HV_idx_src:
7545 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7546 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7547 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7548 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7549 &info->indexed_za,
7550 &qualifier)
7551 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7552 &info->indexed_za, &qualifier, 0))
7553 goto failure;
7554 info->qualifier = qualifier;
7555 break;
7556
7557 case AARCH64_OPND_SME_list_of_64bit_tiles:
7558 val = parse_sme_list_of_64bit_tiles (&str);
7559 if (val == PARSE_FAIL)
7560 goto failure;
7561 info->imm.value = val;
7562 break;
7563
7564 case AARCH64_OPND_SME_ZA_array:
7565 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7566 &info->indexed_za, &qualifier, 0))
7567 goto failure;
7568 info->qualifier = qualifier;
7569 break;
7570
7571 case AARCH64_OPND_MOPS_ADDR_Rd:
7572 case AARCH64_OPND_MOPS_ADDR_Rs:
7573 po_char_or_fail ('[');
7574 if (!parse_x0_to_x30 (&str, info))
7575 goto failure;
7576 po_char_or_fail (']');
7577 po_char_or_fail ('!');
7578 break;
7579
7580 case AARCH64_OPND_MOPS_WB_Rn:
7581 if (!parse_x0_to_x30 (&str, info))
7582 goto failure;
7583 po_char_or_fail ('!');
7584 break;
7585
7586 default:
7587 as_fatal (_("unhandled operand code %d"), operands[i]);
7588 }
7589
7590 /* If we get here, this operand was successfully parsed. */
7591 inst.base.operands[i].present = 1;
7592 continue;
7593
7594 failure:
7595 /* The parse routine should already have set the error, but in case
7596 not, set a default one here. */
7597 if (! error_p ())
7598 set_default_error ();
7599
7600 if (! backtrack_pos)
7601 goto parse_operands_return;
7602
7603 {
7604 /* We reach here because this operand is marked as optional, and
7605 either no operand was supplied or the operand was supplied but it
7606 was syntactically incorrect. In the latter case we report an
7607 error. In the former case we perform a few more checks before
7608 dropping through to the code to insert the default operand. */
7609
7610 char *tmp = backtrack_pos;
7611 char endchar = END_OF_INSN;
7612
7613 if (i != (aarch64_num_of_operands (opcode) - 1))
7614 endchar = ',';
7615 skip_past_char (&tmp, ',');
7616
7617 if (*tmp != endchar)
7618 /* The user has supplied an operand in the wrong format. */
7619 goto parse_operands_return;
7620
7621 /* Make sure there is not a comma before the optional operand.
7622 For example the fifth operand of 'sys' is optional:
7623
7624 sys #0,c0,c0,#0, <--- wrong
7625 sys #0,c0,c0,#0 <--- correct. */
7626 if (comma_skipped_p && i && endchar == END_OF_INSN)
7627 {
7628 set_fatal_syntax_error
7629 (_("unexpected comma before the omitted optional operand"));
7630 goto parse_operands_return;
7631 }
7632 }
7633
7634 /* Reaching here means we are dealing with an optional operand that is
7635 omitted from the assembly line. */
7636 gas_assert (optional_operand_p (opcode, i));
7637 info->present = 0;
7638 process_omitted_operand (operands[i], opcode, i, info);
7639
7640 /* Try again, skipping the optional operand at backtrack_pos. */
7641 str = backtrack_pos;
7642 backtrack_pos = 0;
7643
7644 /* Clear any error record after the omitted optional operand has been
7645 successfully handled. */
7646 clear_error ();
7647 }
7648
7649 /* Check if we have parsed all the operands. */
7650 if (*str != '\0' && ! error_p ())
7651 {
7652 /* Set I to the index of the last present operand; this is
7653 for the purpose of diagnostics. */
7654 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7655 ;
7656 set_fatal_syntax_error
7657 (_("unexpected characters following instruction"));
7658 }
7659
7660 parse_operands_return:
7661
7662 if (error_p ())
7663 {
7664 inst.parsing_error.index = i;
7665 DEBUG_TRACE ("parsing FAIL: %s - %s",
7666 operand_mismatch_kind_names[inst.parsing_error.kind],
7667 inst.parsing_error.error);
7668 /* Record the operand error properly; this is useful when there
7669 are multiple instruction templates for a mnemonic name, so that
7670 later on, we can select the error that most closely describes
7671 the problem. */
7672 record_operand_error_info (opcode, &inst.parsing_error);
7673 return false;
7674 }
7675 else
7676 {
7677 DEBUG_TRACE ("parsing SUCCESS");
7678 return true;
7679 }
7680 }
7681
7682 /* It does some fix-up to provide some programmer friendly feature while
7683 keeping the libopcodes happy, i.e. libopcodes only accepts
7684 the preferred architectural syntax.
7685 Return FALSE if there is any failure; otherwise return TRUE. */
7686
7687 static bool
7688 programmer_friendly_fixup (aarch64_instruction *instr)
7689 {
7690 aarch64_inst *base = &instr->base;
7691 const aarch64_opcode *opcode = base->opcode;
7692 enum aarch64_op op = opcode->op;
7693 aarch64_opnd_info *operands = base->operands;
7694
7695 DEBUG_TRACE ("enter");
7696
7697 switch (opcode->iclass)
7698 {
7699 case testbranch:
7700 /* TBNZ Xn|Wn, #uimm6, label
7701 Test and Branch Not Zero: conditionally jumps to label if bit number
7702 uimm6 in register Xn is not zero. The bit number implies the width of
7703 the register, which may be written and should be disassembled as Wn if
7704 uimm is less than 32. */
7705 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7706 {
7707 if (operands[1].imm.value >= 32)
7708 {
7709 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7710 0, 31);
7711 return false;
7712 }
7713 operands[0].qualifier = AARCH64_OPND_QLF_X;
7714 }
7715 break;
7716 case loadlit:
7717 /* LDR Wt, label | =value
7718 As a convenience assemblers will typically permit the notation
7719 "=value" in conjunction with the pc-relative literal load instructions
7720 to automatically place an immediate value or symbolic address in a
7721 nearby literal pool and generate a hidden label which references it.
7722 ISREG has been set to 0 in the case of =value. */
7723 if (instr->gen_lit_pool
7724 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7725 {
7726 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7727 if (op == OP_LDRSW_LIT)
7728 size = 4;
7729 if (instr->reloc.exp.X_op != O_constant
7730 && instr->reloc.exp.X_op != O_big
7731 && instr->reloc.exp.X_op != O_symbol)
7732 {
7733 record_operand_error (opcode, 1,
7734 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7735 _("constant expression expected"));
7736 return false;
7737 }
7738 if (! add_to_lit_pool (&instr->reloc.exp, size))
7739 {
7740 record_operand_error (opcode, 1,
7741 AARCH64_OPDE_OTHER_ERROR,
7742 _("literal pool insertion failed"));
7743 return false;
7744 }
7745 }
7746 break;
7747 case log_shift:
7748 case bitfield:
7749 /* UXT[BHW] Wd, Wn
7750 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7751 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7752 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7753 A programmer-friendly assembler should accept a destination Xd in
7754 place of Wd, however that is not the preferred form for disassembly.
7755 */
7756 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7757 && operands[1].qualifier == AARCH64_OPND_QLF_W
7758 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7759 operands[0].qualifier = AARCH64_OPND_QLF_W;
7760 break;
7761
7762 case addsub_ext:
7763 {
7764 /* In the 64-bit form, the final register operand is written as Wm
7765 for all but the (possibly omitted) UXTX/LSL and SXTX
7766 operators.
7767 As a programmer-friendly assembler, we accept e.g.
7768 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7769 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7770 int idx = aarch64_operand_index (opcode->operands,
7771 AARCH64_OPND_Rm_EXT);
7772 gas_assert (idx == 1 || idx == 2);
7773 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7774 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7775 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7776 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7777 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7778 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7779 }
7780 break;
7781
7782 default:
7783 break;
7784 }
7785
7786 DEBUG_TRACE ("exit with SUCCESS");
7787 return true;
7788 }
7789
7790 /* Check for loads and stores that will cause unpredictable behavior. */
7791
7792 static void
7793 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7794 {
7795 aarch64_inst *base = &instr->base;
7796 const aarch64_opcode *opcode = base->opcode;
7797 const aarch64_opnd_info *opnds = base->operands;
7798 switch (opcode->iclass)
7799 {
7800 case ldst_pos:
7801 case ldst_imm9:
7802 case ldst_imm10:
7803 case ldst_unscaled:
7804 case ldst_unpriv:
7805 /* Loading/storing the base register is unpredictable if writeback. */
7806 if ((aarch64_get_operand_class (opnds[0].type)
7807 == AARCH64_OPND_CLASS_INT_REG)
7808 && opnds[0].reg.regno == opnds[1].addr.base_regno
7809 && opnds[1].addr.base_regno != REG_SP
7810 /* Exempt STG/STZG/ST2G/STZ2G. */
7811 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7812 && opnds[1].addr.writeback)
7813 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7814 break;
7815
7816 case ldstpair_off:
7817 case ldstnapair_offs:
7818 case ldstpair_indexed:
7819 /* Loading/storing the base register is unpredictable if writeback. */
7820 if ((aarch64_get_operand_class (opnds[0].type)
7821 == AARCH64_OPND_CLASS_INT_REG)
7822 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7823 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7824 && opnds[2].addr.base_regno != REG_SP
7825 /* Exempt STGP. */
7826 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7827 && opnds[2].addr.writeback)
7828 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7829 /* Load operations must load different registers. */
7830 if ((opcode->opcode & (1 << 22))
7831 && opnds[0].reg.regno == opnds[1].reg.regno)
7832 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7833 break;
7834
7835 case ldstexcl:
7836 if ((aarch64_get_operand_class (opnds[0].type)
7837 == AARCH64_OPND_CLASS_INT_REG)
7838 && (aarch64_get_operand_class (opnds[1].type)
7839 == AARCH64_OPND_CLASS_INT_REG))
7840 {
7841 if ((opcode->opcode & (1 << 22)))
7842 {
7843 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7844 if ((opcode->opcode & (1 << 21))
7845 && opnds[0].reg.regno == opnds[1].reg.regno)
7846 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7847 }
7848 else
7849 {
7850 /* Store-Exclusive is unpredictable if Rt == Rs. */
7851 if (opnds[0].reg.regno == opnds[1].reg.regno)
7852 as_warn
7853 (_("unpredictable: identical transfer and status registers"
7854 " --`%s'"),str);
7855
7856 if (opnds[0].reg.regno == opnds[2].reg.regno)
7857 {
7858 if (!(opcode->opcode & (1 << 21)))
7859 /* Store-Exclusive is unpredictable if Rn == Rs. */
7860 as_warn
7861 (_("unpredictable: identical base and status registers"
7862 " --`%s'"),str);
7863 else
7864 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7865 as_warn
7866 (_("unpredictable: "
7867 "identical transfer and status registers"
7868 " --`%s'"),str);
7869 }
7870
7871 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7872 if ((opcode->opcode & (1 << 21))
7873 && opnds[0].reg.regno == opnds[3].reg.regno
7874 && opnds[3].reg.regno != REG_SP)
7875 as_warn (_("unpredictable: identical base and status registers"
7876 " --`%s'"),str);
7877 }
7878 }
7879 break;
7880
7881 default:
7882 break;
7883 }
7884 }
7885
7886 static void
7887 force_automatic_sequence_close (void)
7888 {
7889 struct aarch64_segment_info_type *tc_seg_info;
7890
7891 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7892 if (tc_seg_info->insn_sequence.instr)
7893 {
7894 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7895 _("previous `%s' sequence has not been closed"),
7896 tc_seg_info->insn_sequence.instr->opcode->name);
7897 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7898 }
7899 }
7900
7901 /* A wrapper function to interface with libopcodes on encoding and
7902 record the error message if there is any.
7903
7904 Return TRUE on success; otherwise return FALSE. */
7905
7906 static bool
7907 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7908 aarch64_insn *code)
7909 {
7910 aarch64_operand_error error_info;
7911 memset (&error_info, '\0', sizeof (error_info));
7912 error_info.kind = AARCH64_OPDE_NIL;
7913 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7914 && !error_info.non_fatal)
7915 return true;
7916
7917 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7918 record_operand_error_info (opcode, &error_info);
7919 return error_info.non_fatal;
7920 }
7921
7922 #ifdef DEBUG_AARCH64
7923 static inline void
7924 dump_opcode_operands (const aarch64_opcode *opcode)
7925 {
7926 int i = 0;
7927 while (opcode->operands[i] != AARCH64_OPND_NIL)
7928 {
7929 aarch64_verbose ("\t\t opnd%d: %s", i,
7930 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7931 ? aarch64_get_operand_name (opcode->operands[i])
7932 : aarch64_get_operand_desc (opcode->operands[i]));
7933 ++i;
7934 }
7935 }
7936 #endif /* DEBUG_AARCH64 */
7937
7938 /* This is the guts of the machine-dependent assembler. STR points to a
7939 machine dependent instruction. This function is supposed to emit
7940 the frags/bytes it assembles to. */
7941
7942 void
7943 md_assemble (char *str)
7944 {
7945 templates *template;
7946 const aarch64_opcode *opcode;
7947 struct aarch64_segment_info_type *tc_seg_info;
7948 aarch64_inst *inst_base;
7949 unsigned saved_cond;
7950
7951 /* Align the previous label if needed. */
7952 if (last_label_seen != NULL)
7953 {
7954 symbol_set_frag (last_label_seen, frag_now);
7955 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7956 S_SET_SEGMENT (last_label_seen, now_seg);
7957 }
7958
7959 /* Update the current insn_sequence from the segment. */
7960 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7961 insn_sequence = &tc_seg_info->insn_sequence;
7962 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7963
7964 inst.reloc.type = BFD_RELOC_UNUSED;
7965
7966 DEBUG_TRACE ("\n\n");
7967 DEBUG_TRACE ("==============================");
7968 DEBUG_TRACE ("Enter md_assemble with %s", str);
7969
7970 /* Scan up to the end of the mnemonic, which must end in whitespace,
7971 '.', or end of string. */
7972 char *p = str;
7973 char *dot = 0;
7974 for (; is_part_of_name (*p); p++)
7975 if (*p == '.' && !dot)
7976 dot = p;
7977
7978 if (p == str)
7979 {
7980 as_bad (_("unknown mnemonic -- `%s'"), str);
7981 return;
7982 }
7983
7984 if (!dot && create_register_alias (str, p))
7985 return;
7986
7987 template = opcode_lookup (str, dot, p);
7988 if (!template)
7989 {
7990 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7991 str);
7992 return;
7993 }
7994
7995 skip_whitespace (p);
7996 if (*p == ',')
7997 {
7998 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7999 get_mnemonic_name (str), str);
8000 return;
8001 }
8002
8003 init_operand_error_report ();
8004
8005 /* Sections are assumed to start aligned. In executable section, there is no
8006 MAP_DATA symbol pending. So we only align the address during
8007 MAP_DATA --> MAP_INSN transition.
8008 For other sections, this is not guaranteed. */
8009 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8010 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8011 frag_align_code (2, 0);
8012
8013 saved_cond = inst.cond;
8014 reset_aarch64_instruction (&inst);
8015 inst.cond = saved_cond;
8016
8017 /* Iterate through all opcode entries with the same mnemonic name. */
8018 do
8019 {
8020 opcode = template->opcode;
8021
8022 DEBUG_TRACE ("opcode %s found", opcode->name);
8023 #ifdef DEBUG_AARCH64
8024 if (debug_dump)
8025 dump_opcode_operands (opcode);
8026 #endif /* DEBUG_AARCH64 */
8027
8028 mapping_state (MAP_INSN);
8029
8030 inst_base = &inst.base;
8031 inst_base->opcode = opcode;
8032
8033 /* Truly conditionally executed instructions, e.g. b.cond. */
8034 if (opcode->flags & F_COND)
8035 {
8036 gas_assert (inst.cond != COND_ALWAYS);
8037 inst_base->cond = get_cond_from_value (inst.cond);
8038 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8039 }
8040 else if (inst.cond != COND_ALWAYS)
8041 {
8042 /* It shouldn't arrive here, where the assembly looks like a
8043 conditional instruction but the found opcode is unconditional. */
8044 gas_assert (0);
8045 continue;
8046 }
8047
8048 if (parse_operands (p, opcode)
8049 && programmer_friendly_fixup (&inst)
8050 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8051 {
8052 /* Check that this instruction is supported for this CPU. */
8053 if (!opcode->avariant
8054 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8055 {
8056 as_bad (_("selected processor does not support `%s'"), str);
8057 return;
8058 }
8059
8060 warn_unpredictable_ldst (&inst, str);
8061
8062 if (inst.reloc.type == BFD_RELOC_UNUSED
8063 || !inst.reloc.need_libopcodes_p)
8064 output_inst (NULL);
8065 else
8066 {
8067 /* If there is relocation generated for the instruction,
8068 store the instruction information for the future fix-up. */
8069 struct aarch64_inst *copy;
8070 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8071 copy = XNEW (struct aarch64_inst);
8072 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8073 output_inst (copy);
8074 }
8075
8076 /* Issue non-fatal messages if any. */
8077 output_operand_error_report (str, true);
8078 return;
8079 }
8080
8081 template = template->next;
8082 if (template != NULL)
8083 {
8084 reset_aarch64_instruction (&inst);
8085 inst.cond = saved_cond;
8086 }
8087 }
8088 while (template != NULL);
8089
8090 /* Issue the error messages if any. */
8091 output_operand_error_report (str, false);
8092 }
8093
8094 /* Various frobbings of labels and their addresses. */
8095
8096 void
8097 aarch64_start_line_hook (void)
8098 {
8099 last_label_seen = NULL;
8100 }
8101
8102 void
8103 aarch64_frob_label (symbolS * sym)
8104 {
8105 last_label_seen = sym;
8106
8107 dwarf2_emit_label (sym);
8108 }
8109
8110 void
8111 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8112 {
8113 /* Check to see if we have a block to close. */
8114 force_automatic_sequence_close ();
8115 }
8116
8117 int
8118 aarch64_data_in_code (void)
8119 {
8120 if (startswith (input_line_pointer + 1, "data:"))
8121 {
8122 *input_line_pointer = '/';
8123 input_line_pointer += 5;
8124 *input_line_pointer = 0;
8125 return 1;
8126 }
8127
8128 return 0;
8129 }
8130
8131 char *
8132 aarch64_canonicalize_symbol_name (char *name)
8133 {
8134 int len;
8135
8136 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8137 *(name + len - 5) = 0;
8138
8139 return name;
8140 }
8141 \f
8142 /* Table of all register names defined by default. The user can
8143 define additional names with .req. Note that all register names
8144 should appear in both upper and lowercase variants. Some registers
8145 also have mixed-case names. */
8146
8147 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8148 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8149 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8150 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8151 #define REGSET16(p,t) \
8152 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8153 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8154 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8155 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8156 #define REGSET16S(p,s,t) \
8157 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8158 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8159 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8160 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8161 #define REGSET31(p,t) \
8162 REGSET16(p, t), \
8163 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8164 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8165 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8166 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8167 #define REGSET(p,t) \
8168 REGSET31(p,t), REGNUM(p,31,t)
8169
8170 /* These go into aarch64_reg_hsh hash-table. */
8171 static const reg_entry reg_names[] = {
8172 /* Integer registers. */
8173 REGSET31 (x, R_64), REGSET31 (X, R_64),
8174 REGSET31 (w, R_32), REGSET31 (W, R_32),
8175
8176 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8177 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8178 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8179 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8180 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8181 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8182
8183 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8184 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8185
8186 /* Floating-point single precision registers. */
8187 REGSET (s, FP_S), REGSET (S, FP_S),
8188
8189 /* Floating-point double precision registers. */
8190 REGSET (d, FP_D), REGSET (D, FP_D),
8191
8192 /* Floating-point half precision registers. */
8193 REGSET (h, FP_H), REGSET (H, FP_H),
8194
8195 /* Floating-point byte precision registers. */
8196 REGSET (b, FP_B), REGSET (B, FP_B),
8197
8198 /* Floating-point quad precision registers. */
8199 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8200
8201 /* FP/SIMD registers. */
8202 REGSET (v, VN), REGSET (V, VN),
8203
8204 /* SVE vector registers. */
8205 REGSET (z, ZN), REGSET (Z, ZN),
8206
8207 /* SVE predicate registers. */
8208 REGSET16 (p, PN), REGSET16 (P, PN),
8209
8210 /* SME ZA. We model this as a register because it acts syntactically
8211 like ZA0H, supporting qualifier suffixes and indexing. */
8212 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8213
8214 /* SME ZA tile registers. */
8215 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8216
8217 /* SME ZA tile registers (horizontal slice). */
8218 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8219
8220 /* SME ZA tile registers (vertical slice). */
8221 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV)
8222 };
8223
8224 #undef REGDEF
8225 #undef REGDEF_ALIAS
8226 #undef REGNUM
8227 #undef REGSET16
8228 #undef REGSET31
8229 #undef REGSET
8230
8231 #define N 1
8232 #define n 0
8233 #define Z 1
8234 #define z 0
8235 #define C 1
8236 #define c 0
8237 #define V 1
8238 #define v 0
8239 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8240 static const asm_nzcv nzcv_names[] = {
8241 {"nzcv", B (n, z, c, v)},
8242 {"nzcV", B (n, z, c, V)},
8243 {"nzCv", B (n, z, C, v)},
8244 {"nzCV", B (n, z, C, V)},
8245 {"nZcv", B (n, Z, c, v)},
8246 {"nZcV", B (n, Z, c, V)},
8247 {"nZCv", B (n, Z, C, v)},
8248 {"nZCV", B (n, Z, C, V)},
8249 {"Nzcv", B (N, z, c, v)},
8250 {"NzcV", B (N, z, c, V)},
8251 {"NzCv", B (N, z, C, v)},
8252 {"NzCV", B (N, z, C, V)},
8253 {"NZcv", B (N, Z, c, v)},
8254 {"NZcV", B (N, Z, c, V)},
8255 {"NZCv", B (N, Z, C, v)},
8256 {"NZCV", B (N, Z, C, V)}
8257 };
8258
8259 #undef N
8260 #undef n
8261 #undef Z
8262 #undef z
8263 #undef C
8264 #undef c
8265 #undef V
8266 #undef v
8267 #undef B
8268 \f
8269 /* MD interface: bits in the object file. */
8270
8271 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8272 for use in the a.out file, and stores them in the array pointed to by buf.
8273 This knows about the endian-ness of the target machine and does
8274 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8275 2 (short) and 4 (long) Floating numbers are put out as a series of
8276 LITTLENUMS (shorts, here at least). */
8277
8278 void
8279 md_number_to_chars (char *buf, valueT val, int n)
8280 {
8281 if (target_big_endian)
8282 number_to_chars_bigendian (buf, val, n);
8283 else
8284 number_to_chars_littleendian (buf, val, n);
8285 }
8286
8287 /* MD interface: Sections. */
8288
8289 /* Estimate the size of a frag before relaxing. Assume everything fits in
8290 4 bytes. */
8291
8292 int
8293 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8294 {
8295 fragp->fr_var = 4;
8296 return 4;
8297 }
8298
8299 /* Round up a section size to the appropriate boundary. */
8300
8301 valueT
8302 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8303 {
8304 return size;
8305 }
8306
8307 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8308 of an rs_align_code fragment.
8309
8310 Here we fill the frag with the appropriate info for padding the
8311 output stream. The resulting frag will consist of a fixed (fr_fix)
8312 and of a repeating (fr_var) part.
8313
8314 The fixed content is always emitted before the repeating content and
8315 these two parts are used as follows in constructing the output:
8316 - the fixed part will be used to align to a valid instruction word
8317 boundary, in case that we start at a misaligned address; as no
8318 executable instruction can live at the misaligned location, we
8319 simply fill with zeros;
8320 - the variable part will be used to cover the remaining padding and
8321 we fill using the AArch64 NOP instruction.
8322
8323 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8324 enough storage space for up to 3 bytes for padding the back to a valid
8325 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8326
8327 void
8328 aarch64_handle_align (fragS * fragP)
8329 {
8330 /* NOP = d503201f */
8331 /* AArch64 instructions are always little-endian. */
8332 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8333
8334 int bytes, fix, noop_size;
8335 char *p;
8336
8337 if (fragP->fr_type != rs_align_code)
8338 return;
8339
8340 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8341 p = fragP->fr_literal + fragP->fr_fix;
8342
8343 #ifdef OBJ_ELF
8344 gas_assert (fragP->tc_frag_data.recorded);
8345 #endif
8346
8347 noop_size = sizeof (aarch64_noop);
8348
8349 fix = bytes & (noop_size - 1);
8350 if (fix)
8351 {
8352 #if defined OBJ_ELF || defined OBJ_COFF
8353 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8354 #endif
8355 memset (p, 0, fix);
8356 p += fix;
8357 fragP->fr_fix += fix;
8358 }
8359
8360 if (noop_size)
8361 memcpy (p, aarch64_noop, noop_size);
8362 fragP->fr_var = noop_size;
8363 }
8364
8365 /* Perform target specific initialisation of a frag.
8366 Note - despite the name this initialisation is not done when the frag
8367 is created, but only when its type is assigned. A frag can be created
8368 and used a long time before its type is set, so beware of assuming that
8369 this initialisation is performed first. */
8370
8371 #ifndef OBJ_ELF
8372 void
8373 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8374 int max_chars ATTRIBUTE_UNUSED)
8375 {
8376 }
8377
8378 #else /* OBJ_ELF is defined. */
8379 void
8380 aarch64_init_frag (fragS * fragP, int max_chars)
8381 {
8382 /* Record a mapping symbol for alignment frags. We will delete this
8383 later if the alignment ends up empty. */
8384 if (!fragP->tc_frag_data.recorded)
8385 fragP->tc_frag_data.recorded = 1;
8386
8387 /* PR 21809: Do not set a mapping state for debug sections
8388 - it just confuses other tools. */
8389 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8390 return;
8391
8392 switch (fragP->fr_type)
8393 {
8394 case rs_align_test:
8395 case rs_fill:
8396 mapping_state_2 (MAP_DATA, max_chars);
8397 break;
8398 case rs_align:
8399 /* PR 20364: We can get alignment frags in code sections,
8400 so do not just assume that we should use the MAP_DATA state. */
8401 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8402 break;
8403 case rs_align_code:
8404 mapping_state_2 (MAP_INSN, max_chars);
8405 break;
8406 default:
8407 break;
8408 }
8409 }
8410
8411 /* Whether SFrame stack trace info is supported. */
8412
8413 bool
8414 aarch64_support_sframe_p (void)
8415 {
8416 /* At this time, SFrame is supported for aarch64 only. */
8417 return (aarch64_abi == AARCH64_ABI_LP64);
8418 }
8419
8420 /* Specify if RA tracking is needed. */
8421
8422 bool
8423 aarch64_sframe_ra_tracking_p (void)
8424 {
8425 return true;
8426 }
8427
8428 /* Specify the fixed offset to recover RA from CFA.
8429 (useful only when RA tracking is not needed). */
8430
8431 offsetT
8432 aarch64_sframe_cfa_ra_offset (void)
8433 {
8434 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8435 }
8436
8437 /* Get the abi/arch indentifier for SFrame. */
8438
8439 unsigned char
8440 aarch64_sframe_get_abi_arch (void)
8441 {
8442 unsigned char sframe_abi_arch = 0;
8443
8444 if (aarch64_support_sframe_p ())
8445 {
8446 sframe_abi_arch = target_big_endian
8447 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8448 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8449 }
8450
8451 return sframe_abi_arch;
8452 }
8453
8454 #endif /* OBJ_ELF */
8455 \f
8456 /* Initialize the DWARF-2 unwind information for this procedure. */
8457
8458 void
8459 tc_aarch64_frame_initial_instructions (void)
8460 {
8461 cfi_add_CFA_def_cfa (REG_SP, 0);
8462 }
8463
8464 /* Convert REGNAME to a DWARF-2 register number. */
8465
8466 int
8467 tc_aarch64_regname_to_dw2regnum (char *regname)
8468 {
8469 const reg_entry *reg = parse_reg (&regname);
8470 if (reg == NULL)
8471 return -1;
8472
8473 switch (reg->type)
8474 {
8475 case REG_TYPE_SP_32:
8476 case REG_TYPE_SP_64:
8477 case REG_TYPE_R_32:
8478 case REG_TYPE_R_64:
8479 return reg->number;
8480
8481 case REG_TYPE_FP_B:
8482 case REG_TYPE_FP_H:
8483 case REG_TYPE_FP_S:
8484 case REG_TYPE_FP_D:
8485 case REG_TYPE_FP_Q:
8486 return reg->number + 64;
8487
8488 default:
8489 break;
8490 }
8491 return -1;
8492 }
8493
8494 /* Implement DWARF2_ADDR_SIZE. */
8495
8496 int
8497 aarch64_dwarf2_addr_size (void)
8498 {
8499 if (ilp32_p)
8500 return 4;
8501 else if (llp64_p)
8502 return 8;
8503 return bfd_arch_bits_per_address (stdoutput) / 8;
8504 }
8505
8506 /* MD interface: Symbol and relocation handling. */
8507
8508 /* Return the address within the segment that a PC-relative fixup is
8509 relative to. For AArch64 PC-relative fixups applied to instructions
8510 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8511
8512 long
8513 md_pcrel_from_section (fixS * fixP, segT seg)
8514 {
8515 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8516
8517 /* If this is pc-relative and we are going to emit a relocation
8518 then we just want to put out any pipeline compensation that the linker
8519 will need. Otherwise we want to use the calculated base. */
8520 if (fixP->fx_pcrel
8521 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8522 || aarch64_force_relocation (fixP)))
8523 base = 0;
8524
8525 /* AArch64 should be consistent for all pc-relative relocations. */
8526 return base + AARCH64_PCREL_OFFSET;
8527 }
8528
8529 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8530 Otherwise we have no need to default values of symbols. */
8531
8532 symbolS *
8533 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8534 {
8535 #ifdef OBJ_ELF
8536 if (name[0] == '_' && name[1] == 'G'
8537 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8538 {
8539 if (!GOT_symbol)
8540 {
8541 if (symbol_find (name))
8542 as_bad (_("GOT already in the symbol table"));
8543
8544 GOT_symbol = symbol_new (name, undefined_section,
8545 &zero_address_frag, 0);
8546 }
8547
8548 return GOT_symbol;
8549 }
8550 #endif
8551
8552 return 0;
8553 }
8554
8555 /* Return non-zero if the indicated VALUE has overflowed the maximum
8556 range expressible by a unsigned number with the indicated number of
8557 BITS. */
8558
8559 static bool
8560 unsigned_overflow (valueT value, unsigned bits)
8561 {
8562 valueT lim;
8563 if (bits >= sizeof (valueT) * 8)
8564 return false;
8565 lim = (valueT) 1 << bits;
8566 return (value >= lim);
8567 }
8568
8569
8570 /* Return non-zero if the indicated VALUE has overflowed the maximum
8571 range expressible by an signed number with the indicated number of
8572 BITS. */
8573
8574 static bool
8575 signed_overflow (offsetT value, unsigned bits)
8576 {
8577 offsetT lim;
8578 if (bits >= sizeof (offsetT) * 8)
8579 return false;
8580 lim = (offsetT) 1 << (bits - 1);
8581 return (value < -lim || value >= lim);
8582 }
8583
8584 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8585 unsigned immediate offset load/store instruction, try to encode it as
8586 an unscaled, 9-bit, signed immediate offset load/store instruction.
8587 Return TRUE if it is successful; otherwise return FALSE.
8588
8589 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8590 in response to the standard LDR/STR mnemonics when the immediate offset is
8591 unambiguous, i.e. when it is negative or unaligned. */
8592
8593 static bool
8594 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8595 {
8596 int idx;
8597 enum aarch64_op new_op;
8598 const aarch64_opcode *new_opcode;
8599
8600 gas_assert (instr->opcode->iclass == ldst_pos);
8601
8602 switch (instr->opcode->op)
8603 {
8604 case OP_LDRB_POS:new_op = OP_LDURB; break;
8605 case OP_STRB_POS: new_op = OP_STURB; break;
8606 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8607 case OP_LDRH_POS: new_op = OP_LDURH; break;
8608 case OP_STRH_POS: new_op = OP_STURH; break;
8609 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8610 case OP_LDR_POS: new_op = OP_LDUR; break;
8611 case OP_STR_POS: new_op = OP_STUR; break;
8612 case OP_LDRF_POS: new_op = OP_LDURV; break;
8613 case OP_STRF_POS: new_op = OP_STURV; break;
8614 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8615 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8616 default: new_op = OP_NIL; break;
8617 }
8618
8619 if (new_op == OP_NIL)
8620 return false;
8621
8622 new_opcode = aarch64_get_opcode (new_op);
8623 gas_assert (new_opcode != NULL);
8624
8625 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8626 instr->opcode->op, new_opcode->op);
8627
8628 aarch64_replace_opcode (instr, new_opcode);
8629
8630 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8631 qualifier matching may fail because the out-of-date qualifier will
8632 prevent the operand being updated with a new and correct qualifier. */
8633 idx = aarch64_operand_index (instr->opcode->operands,
8634 AARCH64_OPND_ADDR_SIMM9);
8635 gas_assert (idx == 1);
8636 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8637
8638 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8639
8640 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8641 insn_sequence))
8642 return false;
8643
8644 return true;
8645 }
8646
8647 /* Called by fix_insn to fix a MOV immediate alias instruction.
8648
8649 Operand for a generic move immediate instruction, which is an alias
8650 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8651 a 32-bit/64-bit immediate value into general register. An assembler error
8652 shall result if the immediate cannot be created by a single one of these
8653 instructions. If there is a choice, then to ensure reversability an
8654 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8655
8656 static void
8657 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8658 {
8659 const aarch64_opcode *opcode;
8660
8661 /* Need to check if the destination is SP/ZR. The check has to be done
8662 before any aarch64_replace_opcode. */
8663 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8664 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8665
8666 instr->operands[1].imm.value = value;
8667 instr->operands[1].skip = 0;
8668
8669 if (try_mov_wide_p)
8670 {
8671 /* Try the MOVZ alias. */
8672 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8673 aarch64_replace_opcode (instr, opcode);
8674 if (aarch64_opcode_encode (instr->opcode, instr,
8675 &instr->value, NULL, NULL, insn_sequence))
8676 {
8677 put_aarch64_insn (buf, instr->value);
8678 return;
8679 }
8680 /* Try the MOVK alias. */
8681 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8682 aarch64_replace_opcode (instr, opcode);
8683 if (aarch64_opcode_encode (instr->opcode, instr,
8684 &instr->value, NULL, NULL, insn_sequence))
8685 {
8686 put_aarch64_insn (buf, instr->value);
8687 return;
8688 }
8689 }
8690
8691 if (try_mov_bitmask_p)
8692 {
8693 /* Try the ORR alias. */
8694 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8695 aarch64_replace_opcode (instr, opcode);
8696 if (aarch64_opcode_encode (instr->opcode, instr,
8697 &instr->value, NULL, NULL, insn_sequence))
8698 {
8699 put_aarch64_insn (buf, instr->value);
8700 return;
8701 }
8702 }
8703
8704 as_bad_where (fixP->fx_file, fixP->fx_line,
8705 _("immediate cannot be moved by a single instruction"));
8706 }
8707
8708 /* An instruction operand which is immediate related may have symbol used
8709 in the assembly, e.g.
8710
8711 mov w0, u32
8712 .set u32, 0x00ffff00
8713
8714 At the time when the assembly instruction is parsed, a referenced symbol,
8715 like 'u32' in the above example may not have been seen; a fixS is created
8716 in such a case and is handled here after symbols have been resolved.
8717 Instruction is fixed up with VALUE using the information in *FIXP plus
8718 extra information in FLAGS.
8719
8720 This function is called by md_apply_fix to fix up instructions that need
8721 a fix-up described above but does not involve any linker-time relocation. */
8722
8723 static void
8724 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8725 {
8726 int idx;
8727 uint32_t insn;
8728 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8729 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8730 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8731
8732 if (new_inst)
8733 {
8734 /* Now the instruction is about to be fixed-up, so the operand that
8735 was previously marked as 'ignored' needs to be unmarked in order
8736 to get the encoding done properly. */
8737 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8738 new_inst->operands[idx].skip = 0;
8739 }
8740
8741 gas_assert (opnd != AARCH64_OPND_NIL);
8742
8743 switch (opnd)
8744 {
8745 case AARCH64_OPND_EXCEPTION:
8746 case AARCH64_OPND_UNDEFINED:
8747 if (unsigned_overflow (value, 16))
8748 as_bad_where (fixP->fx_file, fixP->fx_line,
8749 _("immediate out of range"));
8750 insn = get_aarch64_insn (buf);
8751 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8752 put_aarch64_insn (buf, insn);
8753 break;
8754
8755 case AARCH64_OPND_AIMM:
8756 /* ADD or SUB with immediate.
8757 NOTE this assumes we come here with a add/sub shifted reg encoding
8758 3 322|2222|2 2 2 21111 111111
8759 1 098|7654|3 2 1 09876 543210 98765 43210
8760 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8761 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8762 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8763 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8764 ->
8765 3 322|2222|2 2 221111111111
8766 1 098|7654|3 2 109876543210 98765 43210
8767 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8768 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8769 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8770 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8771 Fields sf Rn Rd are already set. */
8772 insn = get_aarch64_insn (buf);
8773 if (value < 0)
8774 {
8775 /* Add <-> sub. */
8776 insn = reencode_addsub_switch_add_sub (insn);
8777 value = -value;
8778 }
8779
8780 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8781 && unsigned_overflow (value, 12))
8782 {
8783 /* Try to shift the value by 12 to make it fit. */
8784 if (((value >> 12) << 12) == value
8785 && ! unsigned_overflow (value, 12 + 12))
8786 {
8787 value >>= 12;
8788 insn |= encode_addsub_imm_shift_amount (1);
8789 }
8790 }
8791
8792 if (unsigned_overflow (value, 12))
8793 as_bad_where (fixP->fx_file, fixP->fx_line,
8794 _("immediate out of range"));
8795
8796 insn |= encode_addsub_imm (value);
8797
8798 put_aarch64_insn (buf, insn);
8799 break;
8800
8801 case AARCH64_OPND_SIMD_IMM:
8802 case AARCH64_OPND_SIMD_IMM_SFT:
8803 case AARCH64_OPND_LIMM:
8804 /* Bit mask immediate. */
8805 gas_assert (new_inst != NULL);
8806 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8807 new_inst->operands[idx].imm.value = value;
8808 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8809 &new_inst->value, NULL, NULL, insn_sequence))
8810 put_aarch64_insn (buf, new_inst->value);
8811 else
8812 as_bad_where (fixP->fx_file, fixP->fx_line,
8813 _("invalid immediate"));
8814 break;
8815
8816 case AARCH64_OPND_HALF:
8817 /* 16-bit unsigned immediate. */
8818 if (unsigned_overflow (value, 16))
8819 as_bad_where (fixP->fx_file, fixP->fx_line,
8820 _("immediate out of range"));
8821 insn = get_aarch64_insn (buf);
8822 insn |= encode_movw_imm (value & 0xffff);
8823 put_aarch64_insn (buf, insn);
8824 break;
8825
8826 case AARCH64_OPND_IMM_MOV:
8827 /* Operand for a generic move immediate instruction, which is
8828 an alias instruction that generates a single MOVZ, MOVN or ORR
8829 instruction to loads a 32-bit/64-bit immediate value into general
8830 register. An assembler error shall result if the immediate cannot be
8831 created by a single one of these instructions. If there is a choice,
8832 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8833 and MOVZ or MOVN to ORR. */
8834 gas_assert (new_inst != NULL);
8835 fix_mov_imm_insn (fixP, buf, new_inst, value);
8836 break;
8837
8838 case AARCH64_OPND_ADDR_SIMM7:
8839 case AARCH64_OPND_ADDR_SIMM9:
8840 case AARCH64_OPND_ADDR_SIMM9_2:
8841 case AARCH64_OPND_ADDR_SIMM10:
8842 case AARCH64_OPND_ADDR_UIMM12:
8843 case AARCH64_OPND_ADDR_SIMM11:
8844 case AARCH64_OPND_ADDR_SIMM13:
8845 /* Immediate offset in an address. */
8846 insn = get_aarch64_insn (buf);
8847
8848 gas_assert (new_inst != NULL && new_inst->value == insn);
8849 gas_assert (new_inst->opcode->operands[1] == opnd
8850 || new_inst->opcode->operands[2] == opnd);
8851
8852 /* Get the index of the address operand. */
8853 if (new_inst->opcode->operands[1] == opnd)
8854 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8855 idx = 1;
8856 else
8857 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8858 idx = 2;
8859
8860 /* Update the resolved offset value. */
8861 new_inst->operands[idx].addr.offset.imm = value;
8862
8863 /* Encode/fix-up. */
8864 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8865 &new_inst->value, NULL, NULL, insn_sequence))
8866 {
8867 put_aarch64_insn (buf, new_inst->value);
8868 break;
8869 }
8870 else if (new_inst->opcode->iclass == ldst_pos
8871 && try_to_encode_as_unscaled_ldst (new_inst))
8872 {
8873 put_aarch64_insn (buf, new_inst->value);
8874 break;
8875 }
8876
8877 as_bad_where (fixP->fx_file, fixP->fx_line,
8878 _("immediate offset out of range"));
8879 break;
8880
8881 default:
8882 gas_assert (0);
8883 as_fatal (_("unhandled operand code %d"), opnd);
8884 }
8885 }
8886
8887 /* Apply a fixup (fixP) to segment data, once it has been determined
8888 by our caller that we have all the info we need to fix it up.
8889
8890 Parameter valP is the pointer to the value of the bits. */
8891
8892 void
8893 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8894 {
8895 offsetT value = *valP;
8896 uint32_t insn;
8897 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8898 int scale;
8899 unsigned flags = fixP->fx_addnumber;
8900
8901 DEBUG_TRACE ("\n\n");
8902 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8903 DEBUG_TRACE ("Enter md_apply_fix");
8904
8905 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8906
8907 /* Note whether this will delete the relocation. */
8908
8909 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8910 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8911 fixP->fx_done = 1;
8912
8913 /* Process the relocations. */
8914 switch (fixP->fx_r_type)
8915 {
8916 case BFD_RELOC_NONE:
8917 /* This will need to go in the object file. */
8918 fixP->fx_done = 0;
8919 break;
8920
8921 case BFD_RELOC_8:
8922 case BFD_RELOC_8_PCREL:
8923 if (fixP->fx_done || !seg->use_rela_p)
8924 md_number_to_chars (buf, value, 1);
8925 break;
8926
8927 case BFD_RELOC_16:
8928 case BFD_RELOC_16_PCREL:
8929 if (fixP->fx_done || !seg->use_rela_p)
8930 md_number_to_chars (buf, value, 2);
8931 break;
8932
8933 case BFD_RELOC_32:
8934 case BFD_RELOC_32_PCREL:
8935 if (fixP->fx_done || !seg->use_rela_p)
8936 md_number_to_chars (buf, value, 4);
8937 break;
8938
8939 case BFD_RELOC_64:
8940 case BFD_RELOC_64_PCREL:
8941 if (fixP->fx_done || !seg->use_rela_p)
8942 md_number_to_chars (buf, value, 8);
8943 break;
8944
8945 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8946 /* We claim that these fixups have been processed here, even if
8947 in fact we generate an error because we do not have a reloc
8948 for them, so tc_gen_reloc() will reject them. */
8949 fixP->fx_done = 1;
8950 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8951 {
8952 as_bad_where (fixP->fx_file, fixP->fx_line,
8953 _("undefined symbol %s used as an immediate value"),
8954 S_GET_NAME (fixP->fx_addsy));
8955 goto apply_fix_return;
8956 }
8957 fix_insn (fixP, flags, value);
8958 break;
8959
8960 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8961 if (fixP->fx_done || !seg->use_rela_p)
8962 {
8963 if (value & 3)
8964 as_bad_where (fixP->fx_file, fixP->fx_line,
8965 _("pc-relative load offset not word aligned"));
8966 if (signed_overflow (value, 21))
8967 as_bad_where (fixP->fx_file, fixP->fx_line,
8968 _("pc-relative load offset out of range"));
8969 insn = get_aarch64_insn (buf);
8970 insn |= encode_ld_lit_ofs_19 (value >> 2);
8971 put_aarch64_insn (buf, insn);
8972 }
8973 break;
8974
8975 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8976 if (fixP->fx_done || !seg->use_rela_p)
8977 {
8978 if (signed_overflow (value, 21))
8979 as_bad_where (fixP->fx_file, fixP->fx_line,
8980 _("pc-relative address offset out of range"));
8981 insn = get_aarch64_insn (buf);
8982 insn |= encode_adr_imm (value);
8983 put_aarch64_insn (buf, insn);
8984 }
8985 break;
8986
8987 case BFD_RELOC_AARCH64_BRANCH19:
8988 if (fixP->fx_done || !seg->use_rela_p)
8989 {
8990 if (value & 3)
8991 as_bad_where (fixP->fx_file, fixP->fx_line,
8992 _("conditional branch target not word aligned"));
8993 if (signed_overflow (value, 21))
8994 as_bad_where (fixP->fx_file, fixP->fx_line,
8995 _("conditional branch out of range"));
8996 insn = get_aarch64_insn (buf);
8997 insn |= encode_cond_branch_ofs_19 (value >> 2);
8998 put_aarch64_insn (buf, insn);
8999 }
9000 break;
9001
9002 case BFD_RELOC_AARCH64_TSTBR14:
9003 if (fixP->fx_done || !seg->use_rela_p)
9004 {
9005 if (value & 3)
9006 as_bad_where (fixP->fx_file, fixP->fx_line,
9007 _("conditional branch target not word aligned"));
9008 if (signed_overflow (value, 16))
9009 as_bad_where (fixP->fx_file, fixP->fx_line,
9010 _("conditional branch out of range"));
9011 insn = get_aarch64_insn (buf);
9012 insn |= encode_tst_branch_ofs_14 (value >> 2);
9013 put_aarch64_insn (buf, insn);
9014 }
9015 break;
9016
9017 case BFD_RELOC_AARCH64_CALL26:
9018 case BFD_RELOC_AARCH64_JUMP26:
9019 if (fixP->fx_done || !seg->use_rela_p)
9020 {
9021 if (value & 3)
9022 as_bad_where (fixP->fx_file, fixP->fx_line,
9023 _("branch target not word aligned"));
9024 if (signed_overflow (value, 28))
9025 as_bad_where (fixP->fx_file, fixP->fx_line,
9026 _("branch out of range"));
9027 insn = get_aarch64_insn (buf);
9028 insn |= encode_branch_ofs_26 (value >> 2);
9029 put_aarch64_insn (buf, insn);
9030 }
9031 break;
9032
9033 case BFD_RELOC_AARCH64_MOVW_G0:
9034 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9035 case BFD_RELOC_AARCH64_MOVW_G0_S:
9036 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9037 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9038 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9039 scale = 0;
9040 goto movw_common;
9041 case BFD_RELOC_AARCH64_MOVW_G1:
9042 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9043 case BFD_RELOC_AARCH64_MOVW_G1_S:
9044 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9045 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9046 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9047 scale = 16;
9048 goto movw_common;
9049 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9050 scale = 0;
9051 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9052 /* Should always be exported to object file, see
9053 aarch64_force_relocation(). */
9054 gas_assert (!fixP->fx_done);
9055 gas_assert (seg->use_rela_p);
9056 goto movw_common;
9057 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9058 scale = 16;
9059 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9060 /* Should always be exported to object file, see
9061 aarch64_force_relocation(). */
9062 gas_assert (!fixP->fx_done);
9063 gas_assert (seg->use_rela_p);
9064 goto movw_common;
9065 case BFD_RELOC_AARCH64_MOVW_G2:
9066 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9067 case BFD_RELOC_AARCH64_MOVW_G2_S:
9068 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9069 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9070 scale = 32;
9071 goto movw_common;
9072 case BFD_RELOC_AARCH64_MOVW_G3:
9073 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9074 scale = 48;
9075 movw_common:
9076 if (fixP->fx_done || !seg->use_rela_p)
9077 {
9078 insn = get_aarch64_insn (buf);
9079
9080 if (!fixP->fx_done)
9081 {
9082 /* REL signed addend must fit in 16 bits */
9083 if (signed_overflow (value, 16))
9084 as_bad_where (fixP->fx_file, fixP->fx_line,
9085 _("offset out of range"));
9086 }
9087 else
9088 {
9089 /* Check for overflow and scale. */
9090 switch (fixP->fx_r_type)
9091 {
9092 case BFD_RELOC_AARCH64_MOVW_G0:
9093 case BFD_RELOC_AARCH64_MOVW_G1:
9094 case BFD_RELOC_AARCH64_MOVW_G2:
9095 case BFD_RELOC_AARCH64_MOVW_G3:
9096 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9097 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9098 if (unsigned_overflow (value, scale + 16))
9099 as_bad_where (fixP->fx_file, fixP->fx_line,
9100 _("unsigned value out of range"));
9101 break;
9102 case BFD_RELOC_AARCH64_MOVW_G0_S:
9103 case BFD_RELOC_AARCH64_MOVW_G1_S:
9104 case BFD_RELOC_AARCH64_MOVW_G2_S:
9105 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9106 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9107 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9108 /* NOTE: We can only come here with movz or movn. */
9109 if (signed_overflow (value, scale + 16))
9110 as_bad_where (fixP->fx_file, fixP->fx_line,
9111 _("signed value out of range"));
9112 if (value < 0)
9113 {
9114 /* Force use of MOVN. */
9115 value = ~value;
9116 insn = reencode_movzn_to_movn (insn);
9117 }
9118 else
9119 {
9120 /* Force use of MOVZ. */
9121 insn = reencode_movzn_to_movz (insn);
9122 }
9123 break;
9124 default:
9125 /* Unchecked relocations. */
9126 break;
9127 }
9128 value >>= scale;
9129 }
9130
9131 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9132 insn |= encode_movw_imm (value & 0xffff);
9133
9134 put_aarch64_insn (buf, insn);
9135 }
9136 break;
9137
9138 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9139 fixP->fx_r_type = (ilp32_p
9140 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9141 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9142 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9143 /* Should always be exported to object file, see
9144 aarch64_force_relocation(). */
9145 gas_assert (!fixP->fx_done);
9146 gas_assert (seg->use_rela_p);
9147 break;
9148
9149 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9150 fixP->fx_r_type = (ilp32_p
9151 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9152 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9153 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9154 /* Should always be exported to object file, see
9155 aarch64_force_relocation(). */
9156 gas_assert (!fixP->fx_done);
9157 gas_assert (seg->use_rela_p);
9158 break;
9159
9160 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9161 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9162 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9163 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9164 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9165 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9166 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9167 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9168 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9169 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9170 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9171 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9172 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9173 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9174 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9175 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9176 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9177 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9178 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9179 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9180 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9181 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9182 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9183 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9184 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9185 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9186 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9187 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9188 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9189 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9190 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9191 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9192 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9193 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9194 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9195 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9196 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9197 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9198 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9199 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9200 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9201 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9202 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9203 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9204 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9205 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9206 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9207 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9208 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9209 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9210 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9211 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9212 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9213 /* Should always be exported to object file, see
9214 aarch64_force_relocation(). */
9215 gas_assert (!fixP->fx_done);
9216 gas_assert (seg->use_rela_p);
9217 break;
9218
9219 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9220 /* Should always be exported to object file, see
9221 aarch64_force_relocation(). */
9222 fixP->fx_r_type = (ilp32_p
9223 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9224 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9225 gas_assert (!fixP->fx_done);
9226 gas_assert (seg->use_rela_p);
9227 break;
9228
9229 case BFD_RELOC_AARCH64_ADD_LO12:
9230 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9231 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9232 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9233 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9234 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9235 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9236 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9237 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9238 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9239 case BFD_RELOC_AARCH64_LDST128_LO12:
9240 case BFD_RELOC_AARCH64_LDST16_LO12:
9241 case BFD_RELOC_AARCH64_LDST32_LO12:
9242 case BFD_RELOC_AARCH64_LDST64_LO12:
9243 case BFD_RELOC_AARCH64_LDST8_LO12:
9244 /* Should always be exported to object file, see
9245 aarch64_force_relocation(). */
9246 gas_assert (!fixP->fx_done);
9247 gas_assert (seg->use_rela_p);
9248 break;
9249
9250 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9251 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9252 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9253 break;
9254
9255 case BFD_RELOC_UNUSED:
9256 /* An error will already have been reported. */
9257 break;
9258
9259 case BFD_RELOC_RVA:
9260 case BFD_RELOC_32_SECREL:
9261 case BFD_RELOC_16_SECIDX:
9262 break;
9263
9264 default:
9265 as_bad_where (fixP->fx_file, fixP->fx_line,
9266 _("unexpected %s fixup"),
9267 bfd_get_reloc_code_name (fixP->fx_r_type));
9268 break;
9269 }
9270
9271 apply_fix_return:
9272 /* Free the allocated the struct aarch64_inst.
9273 N.B. currently there are very limited number of fix-up types actually use
9274 this field, so the impact on the performance should be minimal . */
9275 free (fixP->tc_fix_data.inst);
9276
9277 return;
9278 }
9279
9280 /* Translate internal representation of relocation info to BFD target
9281 format. */
9282
9283 arelent *
9284 tc_gen_reloc (asection * section, fixS * fixp)
9285 {
9286 arelent *reloc;
9287 bfd_reloc_code_real_type code;
9288
9289 reloc = XNEW (arelent);
9290
9291 reloc->sym_ptr_ptr = XNEW (asymbol *);
9292 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9293 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9294
9295 if (fixp->fx_pcrel)
9296 {
9297 if (section->use_rela_p)
9298 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9299 else
9300 fixp->fx_offset = reloc->address;
9301 }
9302 reloc->addend = fixp->fx_offset;
9303
9304 code = fixp->fx_r_type;
9305 switch (code)
9306 {
9307 case BFD_RELOC_16:
9308 if (fixp->fx_pcrel)
9309 code = BFD_RELOC_16_PCREL;
9310 break;
9311
9312 case BFD_RELOC_32:
9313 if (fixp->fx_pcrel)
9314 code = BFD_RELOC_32_PCREL;
9315 break;
9316
9317 case BFD_RELOC_64:
9318 if (fixp->fx_pcrel)
9319 code = BFD_RELOC_64_PCREL;
9320 break;
9321
9322 default:
9323 break;
9324 }
9325
9326 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9327 if (reloc->howto == NULL)
9328 {
9329 as_bad_where (fixp->fx_file, fixp->fx_line,
9330 _
9331 ("cannot represent %s relocation in this object file format"),
9332 bfd_get_reloc_code_name (code));
9333 return NULL;
9334 }
9335
9336 return reloc;
9337 }
9338
9339 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9340
9341 void
9342 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9343 {
9344 bfd_reloc_code_real_type type;
9345 int pcrel = 0;
9346
9347 #ifdef TE_PE
9348 if (exp->X_op == O_secrel)
9349 {
9350 exp->X_op = O_symbol;
9351 type = BFD_RELOC_32_SECREL;
9352 }
9353 else if (exp->X_op == O_secidx)
9354 {
9355 exp->X_op = O_symbol;
9356 type = BFD_RELOC_16_SECIDX;
9357 }
9358 else
9359 {
9360 #endif
9361 /* Pick a reloc.
9362 FIXME: @@ Should look at CPU word size. */
9363 switch (size)
9364 {
9365 case 1:
9366 type = BFD_RELOC_8;
9367 break;
9368 case 2:
9369 type = BFD_RELOC_16;
9370 break;
9371 case 4:
9372 type = BFD_RELOC_32;
9373 break;
9374 case 8:
9375 type = BFD_RELOC_64;
9376 break;
9377 default:
9378 as_bad (_("cannot do %u-byte relocation"), size);
9379 type = BFD_RELOC_UNUSED;
9380 break;
9381 }
9382 #ifdef TE_PE
9383 }
9384 #endif
9385
9386 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9387 }
9388
9389 /* Implement md_after_parse_args. This is the earliest time we need to decide
9390 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9391
9392 void
9393 aarch64_after_parse_args (void)
9394 {
9395 if (aarch64_abi != AARCH64_ABI_NONE)
9396 return;
9397
9398 #ifdef OBJ_ELF
9399 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9400 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9401 aarch64_abi = AARCH64_ABI_ILP32;
9402 else
9403 aarch64_abi = AARCH64_ABI_LP64;
9404 #else
9405 aarch64_abi = AARCH64_ABI_LLP64;
9406 #endif
9407 }
9408
9409 #ifdef OBJ_ELF
9410 const char *
9411 elf64_aarch64_target_format (void)
9412 {
9413 #ifdef TE_CLOUDABI
9414 /* FIXME: What to do for ilp32_p ? */
9415 if (target_big_endian)
9416 return "elf64-bigaarch64-cloudabi";
9417 else
9418 return "elf64-littleaarch64-cloudabi";
9419 #else
9420 if (target_big_endian)
9421 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9422 else
9423 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9424 #endif
9425 }
9426
9427 void
9428 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9429 {
9430 elf_frob_symbol (symp, puntp);
9431 }
9432 #elif defined OBJ_COFF
9433 const char *
9434 coff_aarch64_target_format (void)
9435 {
9436 return "pe-aarch64-little";
9437 }
9438 #endif
9439
9440 /* MD interface: Finalization. */
9441
9442 /* A good place to do this, although this was probably not intended
9443 for this kind of use. We need to dump the literal pool before
9444 references are made to a null symbol pointer. */
9445
9446 void
9447 aarch64_cleanup (void)
9448 {
9449 literal_pool *pool;
9450
9451 for (pool = list_of_pools; pool; pool = pool->next)
9452 {
9453 /* Put it at the end of the relevant section. */
9454 subseg_set (pool->section, pool->sub_section);
9455 s_ltorg (0);
9456 }
9457 }
9458
9459 #ifdef OBJ_ELF
9460 /* Remove any excess mapping symbols generated for alignment frags in
9461 SEC. We may have created a mapping symbol before a zero byte
9462 alignment; remove it if there's a mapping symbol after the
9463 alignment. */
9464 static void
9465 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9466 void *dummy ATTRIBUTE_UNUSED)
9467 {
9468 segment_info_type *seginfo = seg_info (sec);
9469 fragS *fragp;
9470
9471 if (seginfo == NULL || seginfo->frchainP == NULL)
9472 return;
9473
9474 for (fragp = seginfo->frchainP->frch_root;
9475 fragp != NULL; fragp = fragp->fr_next)
9476 {
9477 symbolS *sym = fragp->tc_frag_data.last_map;
9478 fragS *next = fragp->fr_next;
9479
9480 /* Variable-sized frags have been converted to fixed size by
9481 this point. But if this was variable-sized to start with,
9482 there will be a fixed-size frag after it. So don't handle
9483 next == NULL. */
9484 if (sym == NULL || next == NULL)
9485 continue;
9486
9487 if (S_GET_VALUE (sym) < next->fr_address)
9488 /* Not at the end of this frag. */
9489 continue;
9490 know (S_GET_VALUE (sym) == next->fr_address);
9491
9492 do
9493 {
9494 if (next->tc_frag_data.first_map != NULL)
9495 {
9496 /* Next frag starts with a mapping symbol. Discard this
9497 one. */
9498 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9499 break;
9500 }
9501
9502 if (next->fr_next == NULL)
9503 {
9504 /* This mapping symbol is at the end of the section. Discard
9505 it. */
9506 know (next->fr_fix == 0 && next->fr_var == 0);
9507 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9508 break;
9509 }
9510
9511 /* As long as we have empty frags without any mapping symbols,
9512 keep looking. */
9513 /* If the next frag is non-empty and does not start with a
9514 mapping symbol, then this mapping symbol is required. */
9515 if (next->fr_address != next->fr_next->fr_address)
9516 break;
9517
9518 next = next->fr_next;
9519 }
9520 while (next != NULL);
9521 }
9522 }
9523 #endif
9524
9525 /* Adjust the symbol table. */
9526
9527 void
9528 aarch64_adjust_symtab (void)
9529 {
9530 #ifdef OBJ_ELF
9531 /* Remove any overlapping mapping symbols generated by alignment frags. */
9532 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9533 /* Now do generic ELF adjustments. */
9534 elf_adjust_symtab ();
9535 #endif
9536 }
9537
9538 static void
9539 checked_hash_insert (htab_t table, const char *key, void *value)
9540 {
9541 str_hash_insert (table, key, value, 0);
9542 }
9543
9544 static void
9545 sysreg_hash_insert (htab_t table, const char *key, void *value)
9546 {
9547 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9548 checked_hash_insert (table, key, value);
9549 }
9550
9551 static void
9552 fill_instruction_hash_table (void)
9553 {
9554 const aarch64_opcode *opcode = aarch64_opcode_table;
9555
9556 while (opcode->name != NULL)
9557 {
9558 templates *templ, *new_templ;
9559 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9560
9561 new_templ = XNEW (templates);
9562 new_templ->opcode = opcode;
9563 new_templ->next = NULL;
9564
9565 if (!templ)
9566 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9567 else
9568 {
9569 new_templ->next = templ->next;
9570 templ->next = new_templ;
9571 }
9572 ++opcode;
9573 }
9574 }
9575
9576 static inline void
9577 convert_to_upper (char *dst, const char *src, size_t num)
9578 {
9579 unsigned int i;
9580 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9581 *dst = TOUPPER (*src);
9582 *dst = '\0';
9583 }
9584
9585 /* Assume STR point to a lower-case string, allocate, convert and return
9586 the corresponding upper-case string. */
9587 static inline const char*
9588 get_upper_str (const char *str)
9589 {
9590 char *ret;
9591 size_t len = strlen (str);
9592 ret = XNEWVEC (char, len + 1);
9593 convert_to_upper (ret, str, len);
9594 return ret;
9595 }
9596
9597 /* MD interface: Initialization. */
9598
9599 void
9600 md_begin (void)
9601 {
9602 unsigned mach;
9603 unsigned int i;
9604
9605 aarch64_ops_hsh = str_htab_create ();
9606 aarch64_cond_hsh = str_htab_create ();
9607 aarch64_shift_hsh = str_htab_create ();
9608 aarch64_sys_regs_hsh = str_htab_create ();
9609 aarch64_pstatefield_hsh = str_htab_create ();
9610 aarch64_sys_regs_ic_hsh = str_htab_create ();
9611 aarch64_sys_regs_dc_hsh = str_htab_create ();
9612 aarch64_sys_regs_at_hsh = str_htab_create ();
9613 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9614 aarch64_sys_regs_sr_hsh = str_htab_create ();
9615 aarch64_reg_hsh = str_htab_create ();
9616 aarch64_barrier_opt_hsh = str_htab_create ();
9617 aarch64_nzcv_hsh = str_htab_create ();
9618 aarch64_pldop_hsh = str_htab_create ();
9619 aarch64_hint_opt_hsh = str_htab_create ();
9620
9621 fill_instruction_hash_table ();
9622
9623 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9624 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9625 (void *) (aarch64_sys_regs + i));
9626
9627 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9628 sysreg_hash_insert (aarch64_pstatefield_hsh,
9629 aarch64_pstatefields[i].name,
9630 (void *) (aarch64_pstatefields + i));
9631
9632 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9633 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9634 aarch64_sys_regs_ic[i].name,
9635 (void *) (aarch64_sys_regs_ic + i));
9636
9637 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9638 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9639 aarch64_sys_regs_dc[i].name,
9640 (void *) (aarch64_sys_regs_dc + i));
9641
9642 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9643 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9644 aarch64_sys_regs_at[i].name,
9645 (void *) (aarch64_sys_regs_at + i));
9646
9647 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9648 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9649 aarch64_sys_regs_tlbi[i].name,
9650 (void *) (aarch64_sys_regs_tlbi + i));
9651
9652 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9653 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9654 aarch64_sys_regs_sr[i].name,
9655 (void *) (aarch64_sys_regs_sr + i));
9656
9657 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9658 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9659 (void *) (reg_names + i));
9660
9661 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9662 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9663 (void *) (nzcv_names + i));
9664
9665 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9666 {
9667 const char *name = aarch64_operand_modifiers[i].name;
9668 checked_hash_insert (aarch64_shift_hsh, name,
9669 (void *) (aarch64_operand_modifiers + i));
9670 /* Also hash the name in the upper case. */
9671 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9672 (void *) (aarch64_operand_modifiers + i));
9673 }
9674
9675 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9676 {
9677 unsigned int j;
9678 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9679 the same condition code. */
9680 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9681 {
9682 const char *name = aarch64_conds[i].names[j];
9683 if (name == NULL)
9684 break;
9685 checked_hash_insert (aarch64_cond_hsh, name,
9686 (void *) (aarch64_conds + i));
9687 /* Also hash the name in the upper case. */
9688 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9689 (void *) (aarch64_conds + i));
9690 }
9691 }
9692
9693 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9694 {
9695 const char *name = aarch64_barrier_options[i].name;
9696 /* Skip xx00 - the unallocated values of option. */
9697 if ((i & 0x3) == 0)
9698 continue;
9699 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9700 (void *) (aarch64_barrier_options + i));
9701 /* Also hash the name in the upper case. */
9702 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9703 (void *) (aarch64_barrier_options + i));
9704 }
9705
9706 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9707 {
9708 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9709 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9710 (void *) (aarch64_barrier_dsb_nxs_options + i));
9711 /* Also hash the name in the upper case. */
9712 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9713 (void *) (aarch64_barrier_dsb_nxs_options + i));
9714 }
9715
9716 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9717 {
9718 const char* name = aarch64_prfops[i].name;
9719 /* Skip the unallocated hint encodings. */
9720 if (name == NULL)
9721 continue;
9722 checked_hash_insert (aarch64_pldop_hsh, name,
9723 (void *) (aarch64_prfops + i));
9724 /* Also hash the name in the upper case. */
9725 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9726 (void *) (aarch64_prfops + i));
9727 }
9728
9729 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9730 {
9731 const char* name = aarch64_hint_options[i].name;
9732 const char* upper_name = get_upper_str(name);
9733
9734 checked_hash_insert (aarch64_hint_opt_hsh, name,
9735 (void *) (aarch64_hint_options + i));
9736
9737 /* Also hash the name in the upper case if not the same. */
9738 if (strcmp (name, upper_name) != 0)
9739 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9740 (void *) (aarch64_hint_options + i));
9741 }
9742
9743 /* Set the cpu variant based on the command-line options. */
9744 if (!mcpu_cpu_opt)
9745 mcpu_cpu_opt = march_cpu_opt;
9746
9747 if (!mcpu_cpu_opt)
9748 mcpu_cpu_opt = &cpu_default;
9749
9750 cpu_variant = *mcpu_cpu_opt;
9751
9752 /* Record the CPU type. */
9753 if(ilp32_p)
9754 mach = bfd_mach_aarch64_ilp32;
9755 else if (llp64_p)
9756 mach = bfd_mach_aarch64_llp64;
9757 else
9758 mach = bfd_mach_aarch64;
9759
9760 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9761 #ifdef OBJ_ELF
9762 /* FIXME - is there a better way to do it ? */
9763 aarch64_sframe_cfa_sp_reg = 31;
9764 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
9765 aarch64_sframe_cfa_ra_reg = 30;
9766 #endif
9767 }
9768
9769 /* Command line processing. */
9770
9771 const char *md_shortopts = "m:";
9772
9773 #ifdef AARCH64_BI_ENDIAN
9774 #define OPTION_EB (OPTION_MD_BASE + 0)
9775 #define OPTION_EL (OPTION_MD_BASE + 1)
9776 #else
9777 #if TARGET_BYTES_BIG_ENDIAN
9778 #define OPTION_EB (OPTION_MD_BASE + 0)
9779 #else
9780 #define OPTION_EL (OPTION_MD_BASE + 1)
9781 #endif
9782 #endif
9783
9784 struct option md_longopts[] = {
9785 #ifdef OPTION_EB
9786 {"EB", no_argument, NULL, OPTION_EB},
9787 #endif
9788 #ifdef OPTION_EL
9789 {"EL", no_argument, NULL, OPTION_EL},
9790 #endif
9791 {NULL, no_argument, NULL, 0}
9792 };
9793
9794 size_t md_longopts_size = sizeof (md_longopts);
9795
9796 struct aarch64_option_table
9797 {
9798 const char *option; /* Option name to match. */
9799 const char *help; /* Help information. */
9800 int *var; /* Variable to change. */
9801 int value; /* What to change it to. */
9802 char *deprecated; /* If non-null, print this message. */
9803 };
9804
9805 static struct aarch64_option_table aarch64_opts[] = {
9806 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9807 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9808 NULL},
9809 #ifdef DEBUG_AARCH64
9810 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9811 #endif /* DEBUG_AARCH64 */
9812 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9813 NULL},
9814 {"mno-verbose-error", N_("do not output verbose error messages"),
9815 &verbose_error_p, 0, NULL},
9816 {NULL, NULL, NULL, 0, NULL}
9817 };
9818
9819 struct aarch64_cpu_option_table
9820 {
9821 const char *name;
9822 const aarch64_feature_set value;
9823 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9824 case. */
9825 const char *canonical_name;
9826 };
9827
9828 /* This list should, at a minimum, contain all the cpu names
9829 recognized by GCC. */
9830 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9831 {"all", AARCH64_ANY, NULL},
9832 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9833 AARCH64_FEATURE_CRC), "Cortex-A34"},
9834 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9835 AARCH64_FEATURE_CRC), "Cortex-A35"},
9836 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9837 AARCH64_FEATURE_CRC), "Cortex-A53"},
9838 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9839 AARCH64_FEATURE_CRC), "Cortex-A57"},
9840 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9841 AARCH64_FEATURE_CRC), "Cortex-A72"},
9842 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9843 AARCH64_FEATURE_CRC), "Cortex-A73"},
9844 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9845 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9846 "Cortex-A55"},
9847 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9848 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9849 "Cortex-A75"},
9850 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9851 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9852 "Cortex-A76"},
9853 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9854 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9855 | AARCH64_FEATURE_DOTPROD
9856 | AARCH64_FEATURE_SSBS),
9857 "Cortex-A76AE"},
9858 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9859 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9860 | AARCH64_FEATURE_DOTPROD
9861 | AARCH64_FEATURE_SSBS),
9862 "Cortex-A77"},
9863 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9864 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9865 | AARCH64_FEATURE_DOTPROD
9866 | AARCH64_FEATURE_SSBS),
9867 "Cortex-A65"},
9868 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9869 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9870 | AARCH64_FEATURE_DOTPROD
9871 | AARCH64_FEATURE_SSBS),
9872 "Cortex-A65AE"},
9873 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9874 AARCH64_FEATURE_F16
9875 | AARCH64_FEATURE_RCPC
9876 | AARCH64_FEATURE_DOTPROD
9877 | AARCH64_FEATURE_SSBS
9878 | AARCH64_FEATURE_PROFILE),
9879 "Cortex-A78"},
9880 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9881 AARCH64_FEATURE_F16
9882 | AARCH64_FEATURE_RCPC
9883 | AARCH64_FEATURE_DOTPROD
9884 | AARCH64_FEATURE_SSBS
9885 | AARCH64_FEATURE_PROFILE),
9886 "Cortex-A78AE"},
9887 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9888 AARCH64_FEATURE_DOTPROD
9889 | AARCH64_FEATURE_F16
9890 | AARCH64_FEATURE_FLAGM
9891 | AARCH64_FEATURE_PAC
9892 | AARCH64_FEATURE_PROFILE
9893 | AARCH64_FEATURE_RCPC
9894 | AARCH64_FEATURE_SSBS),
9895 "Cortex-A78C"},
9896 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9897 AARCH64_FEATURE_BFLOAT16
9898 | AARCH64_FEATURE_I8MM
9899 | AARCH64_FEATURE_MEMTAG
9900 | AARCH64_FEATURE_SVE2_BITPERM),
9901 "Cortex-A510"},
9902 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9903 AARCH64_FEATURE_BFLOAT16
9904 | AARCH64_FEATURE_I8MM
9905 | AARCH64_FEATURE_MEMTAG
9906 | AARCH64_FEATURE_SVE2_BITPERM),
9907 "Cortex-A710"},
9908 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9909 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9910 | AARCH64_FEATURE_DOTPROD
9911 | AARCH64_FEATURE_PROFILE),
9912 "Ares"},
9913 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9914 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9915 "Samsung Exynos M1"},
9916 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9917 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9918 | AARCH64_FEATURE_RDMA),
9919 "Qualcomm Falkor"},
9920 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9921 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9922 | AARCH64_FEATURE_DOTPROD
9923 | AARCH64_FEATURE_SSBS),
9924 "Neoverse E1"},
9925 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9926 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9927 | AARCH64_FEATURE_DOTPROD
9928 | AARCH64_FEATURE_PROFILE),
9929 "Neoverse N1"},
9930 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9931 AARCH64_FEATURE_BFLOAT16
9932 | AARCH64_FEATURE_I8MM
9933 | AARCH64_FEATURE_F16
9934 | AARCH64_FEATURE_SVE
9935 | AARCH64_FEATURE_SVE2
9936 | AARCH64_FEATURE_SVE2_BITPERM
9937 | AARCH64_FEATURE_MEMTAG
9938 | AARCH64_FEATURE_RNG),
9939 "Neoverse N2"},
9940 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9941 AARCH64_FEATURE_PROFILE
9942 | AARCH64_FEATURE_CVADP
9943 | AARCH64_FEATURE_SVE
9944 | AARCH64_FEATURE_SSBS
9945 | AARCH64_FEATURE_RNG
9946 | AARCH64_FEATURE_F16
9947 | AARCH64_FEATURE_BFLOAT16
9948 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9949 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9950 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9951 | AARCH64_FEATURE_RDMA),
9952 "Qualcomm QDF24XX"},
9953 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9954 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9955 "Qualcomm Saphira"},
9956 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9957 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9958 "Cavium ThunderX"},
9959 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9960 AARCH64_FEATURE_CRYPTO),
9961 "Broadcom Vulcan"},
9962 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9963 in earlier releases and is superseded by 'xgene1' in all
9964 tools. */
9965 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9966 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9967 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9968 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9969 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9970 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9971 AARCH64_FEATURE_F16
9972 | AARCH64_FEATURE_RCPC
9973 | AARCH64_FEATURE_DOTPROD
9974 | AARCH64_FEATURE_SSBS
9975 | AARCH64_FEATURE_PROFILE),
9976 "Cortex-X1"},
9977 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9978 AARCH64_FEATURE_BFLOAT16
9979 | AARCH64_FEATURE_I8MM
9980 | AARCH64_FEATURE_MEMTAG
9981 | AARCH64_FEATURE_SVE2_BITPERM),
9982 "Cortex-X2"},
9983 {"generic", AARCH64_ARCH_V8, NULL},
9984
9985 {NULL, AARCH64_ARCH_NONE, NULL}
9986 };
9987
9988 struct aarch64_arch_option_table
9989 {
9990 const char *name;
9991 const aarch64_feature_set value;
9992 };
9993
9994 /* This list should, at a minimum, contain all the architecture names
9995 recognized by GCC. */
9996 static const struct aarch64_arch_option_table aarch64_archs[] = {
9997 {"all", AARCH64_ANY},
9998 {"armv8-a", AARCH64_ARCH_V8},
9999 {"armv8.1-a", AARCH64_ARCH_V8_1},
10000 {"armv8.2-a", AARCH64_ARCH_V8_2},
10001 {"armv8.3-a", AARCH64_ARCH_V8_3},
10002 {"armv8.4-a", AARCH64_ARCH_V8_4},
10003 {"armv8.5-a", AARCH64_ARCH_V8_5},
10004 {"armv8.6-a", AARCH64_ARCH_V8_6},
10005 {"armv8.7-a", AARCH64_ARCH_V8_7},
10006 {"armv8.8-a", AARCH64_ARCH_V8_8},
10007 {"armv8-r", AARCH64_ARCH_V8_R},
10008 {"armv9-a", AARCH64_ARCH_V9},
10009 {"armv9.1-a", AARCH64_ARCH_V9_1},
10010 {"armv9.2-a", AARCH64_ARCH_V9_2},
10011 {"armv9.3-a", AARCH64_ARCH_V9_3},
10012 {NULL, AARCH64_ARCH_NONE}
10013 };
10014
10015 /* ISA extensions. */
10016 struct aarch64_option_cpu_value_table
10017 {
10018 const char *name;
10019 const aarch64_feature_set value;
10020 const aarch64_feature_set require; /* Feature dependencies. */
10021 };
10022
10023 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10024 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
10025 AARCH64_ARCH_NONE},
10026 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
10027 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10028 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
10029 AARCH64_ARCH_NONE},
10030 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
10031 AARCH64_ARCH_NONE},
10032 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
10033 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10034 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
10035 AARCH64_ARCH_NONE},
10036 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
10037 AARCH64_ARCH_NONE},
10038 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
10039 AARCH64_ARCH_NONE},
10040 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
10041 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10042 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
10043 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10044 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
10045 AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
10046 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
10047 AARCH64_ARCH_NONE},
10048 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
10049 AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
10050 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
10051 AARCH64_ARCH_NONE},
10052 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
10053 AARCH64_FEATURE (AARCH64_FEATURE_F16
10054 | AARCH64_FEATURE_SIMD, 0)},
10055 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
10056 AARCH64_ARCH_NONE},
10057 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
10058 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10059 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
10060 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10061 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
10062 AARCH64_ARCH_NONE},
10063 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
10064 AARCH64_ARCH_NONE},
10065 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
10066 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10067 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
10068 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10069 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
10070 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
10071 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
10072 AARCH64_ARCH_NONE},
10073 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
10074 AARCH64_ARCH_NONE},
10075 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10076 AARCH64_ARCH_NONE},
10077 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10078 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10079 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10080 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10081 | AARCH64_FEATURE_SM4, 0)},
10082 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10083 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10084 | AARCH64_FEATURE_AES, 0)},
10085 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10086 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10087 | AARCH64_FEATURE_SHA3, 0)},
10088 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10089 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10090 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10091 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10092 | AARCH64_FEATURE_BFLOAT16, 0)},
10093 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10094 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10095 {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10096 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10097 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10098 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10099 {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10100 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10101 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10102 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10103 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10104 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10105 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10106 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10107 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10108 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10109 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10110 AARCH64_ARCH_NONE},
10111 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10112 AARCH64_ARCH_NONE},
10113 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10114 AARCH64_ARCH_NONE},
10115 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10116 AARCH64_ARCH_NONE},
10117 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10118 AARCH64_ARCH_NONE},
10119 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
10120 AARCH64_ARCH_NONE},
10121 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10122 };
10123
10124 struct aarch64_long_option_table
10125 {
10126 const char *option; /* Substring to match. */
10127 const char *help; /* Help information. */
10128 int (*func) (const char *subopt); /* Function to decode sub-option. */
10129 char *deprecated; /* If non-null, print this message. */
10130 };
10131
10132 /* Transitive closure of features depending on set. */
10133 static aarch64_feature_set
10134 aarch64_feature_disable_set (aarch64_feature_set set)
10135 {
10136 const struct aarch64_option_cpu_value_table *opt;
10137 aarch64_feature_set prev = 0;
10138
10139 while (prev != set) {
10140 prev = set;
10141 for (opt = aarch64_features; opt->name != NULL; opt++)
10142 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10143 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10144 }
10145 return set;
10146 }
10147
10148 /* Transitive closure of dependencies of set. */
10149 static aarch64_feature_set
10150 aarch64_feature_enable_set (aarch64_feature_set set)
10151 {
10152 const struct aarch64_option_cpu_value_table *opt;
10153 aarch64_feature_set prev = 0;
10154
10155 while (prev != set) {
10156 prev = set;
10157 for (opt = aarch64_features; opt->name != NULL; opt++)
10158 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10159 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10160 }
10161 return set;
10162 }
10163
10164 static int
10165 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10166 bool ext_only)
10167 {
10168 /* We insist on extensions being added before being removed. We achieve
10169 this by using the ADDING_VALUE variable to indicate whether we are
10170 adding an extension (1) or removing it (0) and only allowing it to
10171 change in the order -1 -> 1 -> 0. */
10172 int adding_value = -1;
10173 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10174
10175 /* Copy the feature set, so that we can modify it. */
10176 *ext_set = **opt_p;
10177 *opt_p = ext_set;
10178
10179 while (str != NULL && *str != 0)
10180 {
10181 const struct aarch64_option_cpu_value_table *opt;
10182 const char *ext = NULL;
10183 int optlen;
10184
10185 if (!ext_only)
10186 {
10187 if (*str != '+')
10188 {
10189 as_bad (_("invalid architectural extension"));
10190 return 0;
10191 }
10192
10193 ext = strchr (++str, '+');
10194 }
10195
10196 if (ext != NULL)
10197 optlen = ext - str;
10198 else
10199 optlen = strlen (str);
10200
10201 if (optlen >= 2 && startswith (str, "no"))
10202 {
10203 if (adding_value != 0)
10204 adding_value = 0;
10205 optlen -= 2;
10206 str += 2;
10207 }
10208 else if (optlen > 0)
10209 {
10210 if (adding_value == -1)
10211 adding_value = 1;
10212 else if (adding_value != 1)
10213 {
10214 as_bad (_("must specify extensions to add before specifying "
10215 "those to remove"));
10216 return false;
10217 }
10218 }
10219
10220 if (optlen == 0)
10221 {
10222 as_bad (_("missing architectural extension"));
10223 return 0;
10224 }
10225
10226 gas_assert (adding_value != -1);
10227
10228 for (opt = aarch64_features; opt->name != NULL; opt++)
10229 if (strncmp (opt->name, str, optlen) == 0)
10230 {
10231 aarch64_feature_set set;
10232
10233 /* Add or remove the extension. */
10234 if (adding_value)
10235 {
10236 set = aarch64_feature_enable_set (opt->value);
10237 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10238 }
10239 else
10240 {
10241 set = aarch64_feature_disable_set (opt->value);
10242 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10243 }
10244 break;
10245 }
10246
10247 if (opt->name == NULL)
10248 {
10249 as_bad (_("unknown architectural extension `%s'"), str);
10250 return 0;
10251 }
10252
10253 str = ext;
10254 };
10255
10256 return 1;
10257 }
10258
10259 static int
10260 aarch64_parse_cpu (const char *str)
10261 {
10262 const struct aarch64_cpu_option_table *opt;
10263 const char *ext = strchr (str, '+');
10264 size_t optlen;
10265
10266 if (ext != NULL)
10267 optlen = ext - str;
10268 else
10269 optlen = strlen (str);
10270
10271 if (optlen == 0)
10272 {
10273 as_bad (_("missing cpu name `%s'"), str);
10274 return 0;
10275 }
10276
10277 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10278 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10279 {
10280 mcpu_cpu_opt = &opt->value;
10281 if (ext != NULL)
10282 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10283
10284 return 1;
10285 }
10286
10287 as_bad (_("unknown cpu `%s'"), str);
10288 return 0;
10289 }
10290
10291 static int
10292 aarch64_parse_arch (const char *str)
10293 {
10294 const struct aarch64_arch_option_table *opt;
10295 const char *ext = strchr (str, '+');
10296 size_t optlen;
10297
10298 if (ext != NULL)
10299 optlen = ext - str;
10300 else
10301 optlen = strlen (str);
10302
10303 if (optlen == 0)
10304 {
10305 as_bad (_("missing architecture name `%s'"), str);
10306 return 0;
10307 }
10308
10309 for (opt = aarch64_archs; opt->name != NULL; opt++)
10310 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10311 {
10312 march_cpu_opt = &opt->value;
10313 if (ext != NULL)
10314 return aarch64_parse_features (ext, &march_cpu_opt, false);
10315
10316 return 1;
10317 }
10318
10319 as_bad (_("unknown architecture `%s'\n"), str);
10320 return 0;
10321 }
10322
10323 /* ABIs. */
10324 struct aarch64_option_abi_value_table
10325 {
10326 const char *name;
10327 enum aarch64_abi_type value;
10328 };
10329
10330 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10331 #ifdef OBJ_ELF
10332 {"ilp32", AARCH64_ABI_ILP32},
10333 {"lp64", AARCH64_ABI_LP64},
10334 #else
10335 {"llp64", AARCH64_ABI_LLP64},
10336 #endif
10337 };
10338
10339 static int
10340 aarch64_parse_abi (const char *str)
10341 {
10342 unsigned int i;
10343
10344 if (str[0] == '\0')
10345 {
10346 as_bad (_("missing abi name `%s'"), str);
10347 return 0;
10348 }
10349
10350 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10351 if (strcmp (str, aarch64_abis[i].name) == 0)
10352 {
10353 aarch64_abi = aarch64_abis[i].value;
10354 return 1;
10355 }
10356
10357 as_bad (_("unknown abi `%s'\n"), str);
10358 return 0;
10359 }
10360
10361 static struct aarch64_long_option_table aarch64_long_opts[] = {
10362 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10363 aarch64_parse_abi, NULL},
10364 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10365 aarch64_parse_cpu, NULL},
10366 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10367 aarch64_parse_arch, NULL},
10368 {NULL, NULL, 0, NULL}
10369 };
10370
10371 int
10372 md_parse_option (int c, const char *arg)
10373 {
10374 struct aarch64_option_table *opt;
10375 struct aarch64_long_option_table *lopt;
10376
10377 switch (c)
10378 {
10379 #ifdef OPTION_EB
10380 case OPTION_EB:
10381 target_big_endian = 1;
10382 break;
10383 #endif
10384
10385 #ifdef OPTION_EL
10386 case OPTION_EL:
10387 target_big_endian = 0;
10388 break;
10389 #endif
10390
10391 case 'a':
10392 /* Listing option. Just ignore these, we don't support additional
10393 ones. */
10394 return 0;
10395
10396 default:
10397 for (opt = aarch64_opts; opt->option != NULL; opt++)
10398 {
10399 if (c == opt->option[0]
10400 && ((arg == NULL && opt->option[1] == 0)
10401 || streq (arg, opt->option + 1)))
10402 {
10403 /* If the option is deprecated, tell the user. */
10404 if (opt->deprecated != NULL)
10405 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10406 arg ? arg : "", _(opt->deprecated));
10407
10408 if (opt->var != NULL)
10409 *opt->var = opt->value;
10410
10411 return 1;
10412 }
10413 }
10414
10415 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10416 {
10417 /* These options are expected to have an argument. */
10418 if (c == lopt->option[0]
10419 && arg != NULL
10420 && startswith (arg, lopt->option + 1))
10421 {
10422 /* If the option is deprecated, tell the user. */
10423 if (lopt->deprecated != NULL)
10424 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10425 _(lopt->deprecated));
10426
10427 /* Call the sup-option parser. */
10428 return lopt->func (arg + strlen (lopt->option) - 1);
10429 }
10430 }
10431
10432 return 0;
10433 }
10434
10435 return 1;
10436 }
10437
10438 void
10439 md_show_usage (FILE * fp)
10440 {
10441 struct aarch64_option_table *opt;
10442 struct aarch64_long_option_table *lopt;
10443
10444 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10445
10446 for (opt = aarch64_opts; opt->option != NULL; opt++)
10447 if (opt->help != NULL)
10448 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10449
10450 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10451 if (lopt->help != NULL)
10452 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10453
10454 #ifdef OPTION_EB
10455 fprintf (fp, _("\
10456 -EB assemble code for a big-endian cpu\n"));
10457 #endif
10458
10459 #ifdef OPTION_EL
10460 fprintf (fp, _("\
10461 -EL assemble code for a little-endian cpu\n"));
10462 #endif
10463 }
10464
10465 /* Parse a .cpu directive. */
10466
10467 static void
10468 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10469 {
10470 const struct aarch64_cpu_option_table *opt;
10471 char saved_char;
10472 char *name;
10473 char *ext;
10474 size_t optlen;
10475
10476 name = input_line_pointer;
10477 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10478 saved_char = *input_line_pointer;
10479 *input_line_pointer = 0;
10480
10481 ext = strchr (name, '+');
10482
10483 if (ext != NULL)
10484 optlen = ext - name;
10485 else
10486 optlen = strlen (name);
10487
10488 /* Skip the first "all" entry. */
10489 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10490 if (strlen (opt->name) == optlen
10491 && strncmp (name, opt->name, optlen) == 0)
10492 {
10493 mcpu_cpu_opt = &opt->value;
10494 if (ext != NULL)
10495 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10496 return;
10497
10498 cpu_variant = *mcpu_cpu_opt;
10499
10500 *input_line_pointer = saved_char;
10501 demand_empty_rest_of_line ();
10502 return;
10503 }
10504 as_bad (_("unknown cpu `%s'"), name);
10505 *input_line_pointer = saved_char;
10506 ignore_rest_of_line ();
10507 }
10508
10509
10510 /* Parse a .arch directive. */
10511
10512 static void
10513 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10514 {
10515 const struct aarch64_arch_option_table *opt;
10516 char saved_char;
10517 char *name;
10518 char *ext;
10519 size_t optlen;
10520
10521 name = input_line_pointer;
10522 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10523 saved_char = *input_line_pointer;
10524 *input_line_pointer = 0;
10525
10526 ext = strchr (name, '+');
10527
10528 if (ext != NULL)
10529 optlen = ext - name;
10530 else
10531 optlen = strlen (name);
10532
10533 /* Skip the first "all" entry. */
10534 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10535 if (strlen (opt->name) == optlen
10536 && strncmp (name, opt->name, optlen) == 0)
10537 {
10538 mcpu_cpu_opt = &opt->value;
10539 if (ext != NULL)
10540 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10541 return;
10542
10543 cpu_variant = *mcpu_cpu_opt;
10544
10545 *input_line_pointer = saved_char;
10546 demand_empty_rest_of_line ();
10547 return;
10548 }
10549
10550 as_bad (_("unknown architecture `%s'\n"), name);
10551 *input_line_pointer = saved_char;
10552 ignore_rest_of_line ();
10553 }
10554
10555 /* Parse a .arch_extension directive. */
10556
10557 static void
10558 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10559 {
10560 char saved_char;
10561 char *ext = input_line_pointer;
10562
10563 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10564 saved_char = *input_line_pointer;
10565 *input_line_pointer = 0;
10566
10567 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10568 return;
10569
10570 cpu_variant = *mcpu_cpu_opt;
10571
10572 *input_line_pointer = saved_char;
10573 demand_empty_rest_of_line ();
10574 }
10575
10576 /* Copy symbol information. */
10577
10578 void
10579 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10580 {
10581 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10582 }
10583
10584 #ifdef OBJ_ELF
10585 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10586 This is needed so AArch64 specific st_other values can be independently
10587 specified for an IFUNC resolver (that is called by the dynamic linker)
10588 and the symbol it resolves (aliased to the resolver). In particular,
10589 if a function symbol has special st_other value set via directives,
10590 then attaching an IFUNC resolver to that symbol should not override
10591 the st_other setting. Requiring the directive on the IFUNC resolver
10592 symbol would be unexpected and problematic in C code, where the two
10593 symbols appear as two independent function declarations. */
10594
10595 void
10596 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10597 {
10598 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10599 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10600 /* If size is unset, copy size from src. Because we don't track whether
10601 .size has been used, we can't differentiate .size dest, 0 from the case
10602 where dest's size is unset. */
10603 if (!destelf->size && S_GET_SIZE (dest) == 0)
10604 {
10605 if (srcelf->size)
10606 {
10607 destelf->size = XNEW (expressionS);
10608 *destelf->size = *srcelf->size;
10609 }
10610 S_SET_SIZE (dest, S_GET_SIZE (src));
10611 }
10612 }
10613 #endif