aarch64: Tweak parsing of integer & FP registers
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame stack trace info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* Bits for DEFINED field in vector_type_el. */
118 #define NTA_HASTYPE 1
119 #define NTA_HASINDEX 2
120 #define NTA_HASVARWIDTH 4
121
122 struct vector_type_el
123 {
124 enum vector_el_type type;
125 unsigned char defined;
126 unsigned element_size;
127 unsigned width;
128 int64_t index;
129 };
130
131 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
132
133 struct reloc
134 {
135 bfd_reloc_code_real_type type;
136 expressionS exp;
137 int pc_rel;
138 enum aarch64_opnd opnd;
139 uint32_t flags;
140 unsigned need_libopcodes_p : 1;
141 };
142
143 struct aarch64_instruction
144 {
145 /* libopcodes structure for instruction intermediate representation. */
146 aarch64_inst base;
147 /* Record assembly errors found during the parsing. */
148 aarch64_operand_error parsing_error;
149 /* The condition that appears in the assembly line. */
150 int cond;
151 /* Relocation information (including the GAS internal fixup). */
152 struct reloc reloc;
153 /* Need to generate an immediate in the literal pool. */
154 unsigned gen_lit_pool : 1;
155 };
156
157 typedef struct aarch64_instruction aarch64_instruction;
158
159 static aarch64_instruction inst;
160
161 static bool parse_operands (char *, const aarch64_opcode *);
162 static bool programmer_friendly_fixup (aarch64_instruction *);
163
164 /* Diagnostics inline function utilities.
165
166 These are lightweight utilities which should only be called by parse_operands
167 and other parsers. GAS processes each assembly line by parsing it against
168 instruction template(s), in the case of multiple templates (for the same
169 mnemonic name), those templates are tried one by one until one succeeds or
170 all fail. An assembly line may fail a few templates before being
171 successfully parsed; an error saved here in most cases is not a user error
172 but an error indicating the current template is not the right template.
173 Therefore it is very important that errors can be saved at a low cost during
174 the parsing; we don't want to slow down the whole parsing by recording
175 non-user errors in detail.
176
177 Remember that the objective is to help GAS pick up the most appropriate
178 error message in the case of multiple templates, e.g. FMOV which has 8
179 templates. */
180
181 static inline void
182 clear_error (void)
183 {
184 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
185 inst.parsing_error.kind = AARCH64_OPDE_NIL;
186 }
187
188 static inline bool
189 error_p (void)
190 {
191 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
192 }
193
194 static inline void
195 set_error (enum aarch64_operand_error_kind kind, const char *error)
196 {
197 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
198 inst.parsing_error.index = -1;
199 inst.parsing_error.kind = kind;
200 inst.parsing_error.error = error;
201 }
202
203 static inline void
204 set_recoverable_error (const char *error)
205 {
206 set_error (AARCH64_OPDE_RECOVERABLE, error);
207 }
208
209 /* Use the DESC field of the corresponding aarch64_operand entry to compose
210 the error message. */
211 static inline void
212 set_default_error (void)
213 {
214 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
215 }
216
217 static inline void
218 set_syntax_error (const char *error)
219 {
220 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
221 }
222
223 static inline void
224 set_first_syntax_error (const char *error)
225 {
226 if (! error_p ())
227 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
228 }
229
230 static inline void
231 set_fatal_syntax_error (const char *error)
232 {
233 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
234 }
235 \f
236 /* Return value for certain parsers when the parsing fails; those parsers
237 return the information of the parsed result, e.g. register number, on
238 success. */
239 #define PARSE_FAIL -1
240
241 /* This is an invalid condition code that means no conditional field is
242 present. */
243 #define COND_ALWAYS 0x10
244
245 typedef struct
246 {
247 const char *template;
248 uint32_t value;
249 } asm_nzcv;
250
251 struct reloc_entry
252 {
253 char *name;
254 bfd_reloc_code_real_type reloc;
255 };
256
257 /* Macros to define the register types and masks for the purpose
258 of parsing. */
259
260 #undef AARCH64_REG_TYPES
261 #define AARCH64_REG_TYPES \
262 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
263 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
264 BASIC_REG_TYPE(SP_32) /* wsp */ \
265 BASIC_REG_TYPE(SP_64) /* sp */ \
266 BASIC_REG_TYPE(Z_32) /* wzr */ \
267 BASIC_REG_TYPE(Z_64) /* xzr */ \
268 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
269 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
270 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
271 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
272 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
273 BASIC_REG_TYPE(VN) /* v[0-31] */ \
274 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
275 BASIC_REG_TYPE(PN) /* p[0-15] */ \
276 BASIC_REG_TYPE(ZA) /* za */ \
277 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
278 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
279 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
280 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
281 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
282 /* Typecheck: same, plus SVE registers. */ \
283 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
284 | REG_TYPE(ZN)) \
285 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
286 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
287 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
288 /* Typecheck: same, plus SVE registers. */ \
289 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
290 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
291 | REG_TYPE(ZN)) \
292 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
293 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
295 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
296 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
299 /* Typecheck: any [BHSDQ]P FP. */ \
300 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
301 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
302 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
303 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
305 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
306 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
307 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
308 be used for SVE instructions, since Zn and Pn are valid symbols \
309 in other contexts. */ \
310 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
312 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
313 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
314 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
315 | REG_TYPE(ZN) | REG_TYPE(PN)) \
316 /* Any integer register; used for error messages only. */ \
317 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
320 /* The whole of ZA or a single tile. */ \
321 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
322 /* A horizontal or vertical slice of a ZA tile. */ \
323 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
324 /* Pseudo type to mark the end of the enumerator sequence. */ \
325 BASIC_REG_TYPE(MAX)
326
327 #undef BASIC_REG_TYPE
328 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
329 #undef MULTI_REG_TYPE
330 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
331
332 /* Register type enumerators. */
333 typedef enum aarch64_reg_type_
334 {
335 /* A list of REG_TYPE_*. */
336 AARCH64_REG_TYPES
337 } aarch64_reg_type;
338
339 #undef BASIC_REG_TYPE
340 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
341 #undef REG_TYPE
342 #define REG_TYPE(T) (1 << REG_TYPE_##T)
343 #undef MULTI_REG_TYPE
344 #define MULTI_REG_TYPE(T,V) V,
345
346 /* Structure for a hash table entry for a register. */
347 typedef struct
348 {
349 const char *name;
350 unsigned char number;
351 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
352 unsigned char builtin;
353 } reg_entry;
354
355 /* Values indexed by aarch64_reg_type to assist the type checking. */
356 static const unsigned reg_type_masks[] =
357 {
358 AARCH64_REG_TYPES
359 };
360
361 #undef BASIC_REG_TYPE
362 #undef REG_TYPE
363 #undef MULTI_REG_TYPE
364 #undef AARCH64_REG_TYPES
365
366 /* Diagnostics used when we don't get a register of the expected type.
367 Note: this has to synchronized with aarch64_reg_type definitions
368 above. */
369 static const char *
370 get_reg_expected_msg (aarch64_reg_type reg_type)
371 {
372 const char *msg;
373
374 switch (reg_type)
375 {
376 case REG_TYPE_R_32:
377 msg = N_("integer 32-bit register expected");
378 break;
379 case REG_TYPE_R_64:
380 msg = N_("integer 64-bit register expected");
381 break;
382 case REG_TYPE_R_N:
383 msg = N_("integer register expected");
384 break;
385 case REG_TYPE_R64_SP:
386 msg = N_("64-bit integer or SP register expected");
387 break;
388 case REG_TYPE_SVE_BASE:
389 msg = N_("base register expected");
390 break;
391 case REG_TYPE_R_Z:
392 msg = N_("integer or zero register expected");
393 break;
394 case REG_TYPE_SVE_OFFSET:
395 msg = N_("offset register expected");
396 break;
397 case REG_TYPE_R_SP:
398 msg = N_("integer or SP register expected");
399 break;
400 case REG_TYPE_R_Z_SP:
401 msg = N_("integer, zero or SP register expected");
402 break;
403 case REG_TYPE_FP_B:
404 msg = N_("8-bit SIMD scalar register expected");
405 break;
406 case REG_TYPE_FP_H:
407 msg = N_("16-bit SIMD scalar or floating-point half precision "
408 "register expected");
409 break;
410 case REG_TYPE_FP_S:
411 msg = N_("32-bit SIMD scalar or floating-point single precision "
412 "register expected");
413 break;
414 case REG_TYPE_FP_D:
415 msg = N_("64-bit SIMD scalar or floating-point double precision "
416 "register expected");
417 break;
418 case REG_TYPE_FP_Q:
419 msg = N_("128-bit SIMD scalar or floating-point quad precision "
420 "register expected");
421 break;
422 case REG_TYPE_R_Z_BHSDQ_V:
423 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
424 msg = N_("register expected");
425 break;
426 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
427 msg = N_("SIMD scalar or floating-point register expected");
428 break;
429 case REG_TYPE_VN: /* any V reg */
430 msg = N_("vector register expected");
431 break;
432 case REG_TYPE_ZN:
433 msg = N_("SVE vector register expected");
434 break;
435 case REG_TYPE_PN:
436 msg = N_("SVE predicate register expected");
437 break;
438 default:
439 as_fatal (_("invalid register type %d"), reg_type);
440 }
441 return msg;
442 }
443
444 /* Some well known registers that we refer to directly elsewhere. */
445 #define REG_SP 31
446 #define REG_ZR 31
447
448 /* Instructions take 4 bytes in the object file. */
449 #define INSN_SIZE 4
450
451 static htab_t aarch64_ops_hsh;
452 static htab_t aarch64_cond_hsh;
453 static htab_t aarch64_shift_hsh;
454 static htab_t aarch64_sys_regs_hsh;
455 static htab_t aarch64_pstatefield_hsh;
456 static htab_t aarch64_sys_regs_ic_hsh;
457 static htab_t aarch64_sys_regs_dc_hsh;
458 static htab_t aarch64_sys_regs_at_hsh;
459 static htab_t aarch64_sys_regs_tlbi_hsh;
460 static htab_t aarch64_sys_regs_sr_hsh;
461 static htab_t aarch64_reg_hsh;
462 static htab_t aarch64_barrier_opt_hsh;
463 static htab_t aarch64_nzcv_hsh;
464 static htab_t aarch64_pldop_hsh;
465 static htab_t aarch64_hint_opt_hsh;
466
467 /* Stuff needed to resolve the label ambiguity
468 As:
469 ...
470 label: <insn>
471 may differ from:
472 ...
473 label:
474 <insn> */
475
476 static symbolS *last_label_seen;
477
478 /* Literal pool structure. Held on a per-section
479 and per-sub-section basis. */
480
481 #define MAX_LITERAL_POOL_SIZE 1024
482 typedef struct literal_expression
483 {
484 expressionS exp;
485 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
486 LITTLENUM_TYPE * bignum;
487 } literal_expression;
488
489 typedef struct literal_pool
490 {
491 literal_expression literals[MAX_LITERAL_POOL_SIZE];
492 unsigned int next_free_entry;
493 unsigned int id;
494 symbolS *symbol;
495 segT section;
496 subsegT sub_section;
497 int size;
498 struct literal_pool *next;
499 } literal_pool;
500
501 /* Pointer to a linked list of literal pools. */
502 static literal_pool *list_of_pools = NULL;
503 \f
504 /* Pure syntax. */
505
506 /* This array holds the chars that always start a comment. If the
507 pre-processor is disabled, these aren't very useful. */
508 const char comment_chars[] = "";
509
510 /* This array holds the chars that only start a comment at the beginning of
511 a line. If the line seems to have the form '# 123 filename'
512 .line and .file directives will appear in the pre-processed output. */
513 /* Note that input_file.c hand checks for '#' at the beginning of the
514 first line of the input file. This is because the compiler outputs
515 #NO_APP at the beginning of its output. */
516 /* Also note that comments like this one will always work. */
517 const char line_comment_chars[] = "#";
518
519 const char line_separator_chars[] = ";";
520
521 /* Chars that can be used to separate mant
522 from exp in floating point numbers. */
523 const char EXP_CHARS[] = "eE";
524
525 /* Chars that mean this number is a floating point constant. */
526 /* As in 0f12.456 */
527 /* or 0d1.2345e12 */
528
529 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
530
531 /* Prefix character that indicates the start of an immediate value. */
532 #define is_immediate_prefix(C) ((C) == '#')
533
534 /* Separator character handling. */
535
536 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
537
538 static inline bool
539 skip_past_char (char **str, char c)
540 {
541 if (**str == c)
542 {
543 (*str)++;
544 return true;
545 }
546 else
547 return false;
548 }
549
550 #define skip_past_comma(str) skip_past_char (str, ',')
551
552 /* Arithmetic expressions (possibly involving symbols). */
553
554 static bool in_aarch64_get_expression = false;
555
556 /* Third argument to aarch64_get_expression. */
557 #define GE_NO_PREFIX false
558 #define GE_OPT_PREFIX true
559
560 /* Fourth argument to aarch64_get_expression. */
561 #define ALLOW_ABSENT false
562 #define REJECT_ABSENT true
563
564 /* Return TRUE if the string pointed by *STR is successfully parsed
565 as an valid expression; *EP will be filled with the information of
566 such an expression. Otherwise return FALSE.
567
568 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
569 If REJECT_ABSENT is true then trat missing expressions as an error. */
570
571 static bool
572 aarch64_get_expression (expressionS * ep,
573 char ** str,
574 bool allow_immediate_prefix,
575 bool reject_absent)
576 {
577 char *save_in;
578 segT seg;
579 bool prefix_present = false;
580
581 if (allow_immediate_prefix)
582 {
583 if (is_immediate_prefix (**str))
584 {
585 (*str)++;
586 prefix_present = true;
587 }
588 }
589
590 memset (ep, 0, sizeof (expressionS));
591
592 save_in = input_line_pointer;
593 input_line_pointer = *str;
594 in_aarch64_get_expression = true;
595 seg = expression (ep);
596 in_aarch64_get_expression = false;
597
598 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
599 {
600 /* We found a bad expression in md_operand(). */
601 *str = input_line_pointer;
602 input_line_pointer = save_in;
603 if (prefix_present && ! error_p ())
604 set_fatal_syntax_error (_("bad expression"));
605 else
606 set_first_syntax_error (_("bad expression"));
607 return false;
608 }
609
610 #ifdef OBJ_AOUT
611 if (seg != absolute_section
612 && seg != text_section
613 && seg != data_section
614 && seg != bss_section
615 && seg != undefined_section)
616 {
617 set_syntax_error (_("bad segment"));
618 *str = input_line_pointer;
619 input_line_pointer = save_in;
620 return false;
621 }
622 #else
623 (void) seg;
624 #endif
625
626 *str = input_line_pointer;
627 input_line_pointer = save_in;
628 return true;
629 }
630
631 /* Turn a string in input_line_pointer into a floating point constant
632 of type TYPE, and store the appropriate bytes in *LITP. The number
633 of LITTLENUMS emitted is stored in *SIZEP. An error message is
634 returned, or NULL on OK. */
635
636 const char *
637 md_atof (int type, char *litP, int *sizeP)
638 {
639 return ieee_md_atof (type, litP, sizeP, target_big_endian);
640 }
641
642 /* We handle all bad expressions here, so that we can report the faulty
643 instruction in the error message. */
644 void
645 md_operand (expressionS * exp)
646 {
647 if (in_aarch64_get_expression)
648 exp->X_op = O_illegal;
649 }
650
651 /* Immediate values. */
652
653 /* Errors may be set multiple times during parsing or bit encoding
654 (particularly in the Neon bits), but usually the earliest error which is set
655 will be the most meaningful. Avoid overwriting it with later (cascading)
656 errors by calling this function. */
657
658 static void
659 first_error (const char *error)
660 {
661 if (! error_p ())
662 set_syntax_error (error);
663 }
664
665 /* Similar to first_error, but this function accepts formatted error
666 message. */
667 static void
668 first_error_fmt (const char *format, ...)
669 {
670 va_list args;
671 enum
672 { size = 100 };
673 /* N.B. this single buffer will not cause error messages for different
674 instructions to pollute each other; this is because at the end of
675 processing of each assembly line, error message if any will be
676 collected by as_bad. */
677 static char buffer[size];
678
679 if (! error_p ())
680 {
681 int ret ATTRIBUTE_UNUSED;
682 va_start (args, format);
683 ret = vsnprintf (buffer, size, format, args);
684 know (ret <= size - 1 && ret >= 0);
685 va_end (args);
686 set_syntax_error (buffer);
687 }
688 }
689
690 /* Internal helper routine converting a vector_type_el structure *VECTYPE
691 to a corresponding operand qualifier. */
692
693 static inline aarch64_opnd_qualifier_t
694 vectype_to_qualifier (const struct vector_type_el *vectype)
695 {
696 /* Element size in bytes indexed by vector_el_type. */
697 const unsigned char ele_size[5]
698 = {1, 2, 4, 8, 16};
699 const unsigned int ele_base [5] =
700 {
701 AARCH64_OPND_QLF_V_4B,
702 AARCH64_OPND_QLF_V_2H,
703 AARCH64_OPND_QLF_V_2S,
704 AARCH64_OPND_QLF_V_1D,
705 AARCH64_OPND_QLF_V_1Q
706 };
707
708 if (!vectype->defined || vectype->type == NT_invtype)
709 goto vectype_conversion_fail;
710
711 if (vectype->type == NT_zero)
712 return AARCH64_OPND_QLF_P_Z;
713 if (vectype->type == NT_merge)
714 return AARCH64_OPND_QLF_P_M;
715
716 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
717
718 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
719 {
720 /* Special case S_4B. */
721 if (vectype->type == NT_b && vectype->width == 4)
722 return AARCH64_OPND_QLF_S_4B;
723
724 /* Special case S_2H. */
725 if (vectype->type == NT_h && vectype->width == 2)
726 return AARCH64_OPND_QLF_S_2H;
727
728 /* Vector element register. */
729 return AARCH64_OPND_QLF_S_B + vectype->type;
730 }
731 else
732 {
733 /* Vector register. */
734 int reg_size = ele_size[vectype->type] * vectype->width;
735 unsigned offset;
736 unsigned shift;
737 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
738 goto vectype_conversion_fail;
739
740 /* The conversion is by calculating the offset from the base operand
741 qualifier for the vector type. The operand qualifiers are regular
742 enough that the offset can established by shifting the vector width by
743 a vector-type dependent amount. */
744 shift = 0;
745 if (vectype->type == NT_b)
746 shift = 3;
747 else if (vectype->type == NT_h || vectype->type == NT_s)
748 shift = 2;
749 else if (vectype->type >= NT_d)
750 shift = 1;
751 else
752 gas_assert (0);
753
754 offset = ele_base [vectype->type] + (vectype->width >> shift);
755 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
756 && offset <= AARCH64_OPND_QLF_V_1Q);
757 return offset;
758 }
759
760 vectype_conversion_fail:
761 first_error (_("bad vector arrangement type"));
762 return AARCH64_OPND_QLF_NIL;
763 }
764
765 /* Register parsing. */
766
767 /* Generic register parser which is called by other specialized
768 register parsers.
769 CCP points to what should be the beginning of a register name.
770 If it is indeed a valid register name, advance CCP over it and
771 return the reg_entry structure; otherwise return NULL.
772 It does not issue diagnostics. */
773
774 static reg_entry *
775 parse_reg (char **ccp)
776 {
777 char *start = *ccp;
778 char *p;
779 reg_entry *reg;
780
781 #ifdef REGISTER_PREFIX
782 if (*start != REGISTER_PREFIX)
783 return NULL;
784 start++;
785 #endif
786
787 p = start;
788 if (!ISALPHA (*p) || !is_name_beginner (*p))
789 return NULL;
790
791 do
792 p++;
793 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
794
795 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
796
797 if (!reg)
798 return NULL;
799
800 *ccp = p;
801 return reg;
802 }
803
804 /* Return the operand qualifier associated with all uses of REG, or
805 AARCH64_OPND_QLF_NIL if none. AARCH64_OPND_QLF_NIL means either
806 that qualifiers don't apply to REG or that qualifiers are added
807 using suffixes. */
808
809 static aarch64_opnd_qualifier_t
810 inherent_reg_qualifier (const reg_entry *reg)
811 {
812 switch (reg->type)
813 {
814 case REG_TYPE_R_32:
815 case REG_TYPE_SP_32:
816 case REG_TYPE_Z_32:
817 return AARCH64_OPND_QLF_W;
818
819 case REG_TYPE_R_64:
820 case REG_TYPE_SP_64:
821 case REG_TYPE_Z_64:
822 return AARCH64_OPND_QLF_X;
823
824 case REG_TYPE_FP_B:
825 case REG_TYPE_FP_H:
826 case REG_TYPE_FP_S:
827 case REG_TYPE_FP_D:
828 case REG_TYPE_FP_Q:
829 return AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
830
831 default:
832 return AARCH64_OPND_QLF_NIL;
833 }
834 }
835
836 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
837 return FALSE. */
838 static bool
839 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
840 {
841 return (reg_type_masks[type] & (1 << reg->type)) != 0;
842 }
843
844 /* Try to parse a base or offset register. Allow SVE base and offset
845 registers if REG_TYPE includes SVE registers. Return the register
846 entry on success, setting *QUALIFIER to the register qualifier.
847 Return null otherwise.
848
849 Note that this function does not issue any diagnostics. */
850
851 static const reg_entry *
852 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
853 aarch64_opnd_qualifier_t *qualifier)
854 {
855 char *str = *ccp;
856 const reg_entry *reg = parse_reg (&str);
857
858 if (reg == NULL)
859 return NULL;
860
861 switch (reg->type)
862 {
863 case REG_TYPE_ZN:
864 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
865 || str[0] != '.')
866 return NULL;
867 switch (TOLOWER (str[1]))
868 {
869 case 's':
870 *qualifier = AARCH64_OPND_QLF_S_S;
871 break;
872 case 'd':
873 *qualifier = AARCH64_OPND_QLF_S_D;
874 break;
875 default:
876 return NULL;
877 }
878 str += 2;
879 break;
880
881 default:
882 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
883 return NULL;
884 *qualifier = inherent_reg_qualifier (reg);
885 break;
886 }
887
888 *ccp = str;
889
890 return reg;
891 }
892
893 /* Try to parse a base or offset register. Return the register entry
894 on success, setting *QUALIFIER to the register qualifier. Return null
895 otherwise.
896
897 Note that this function does not issue any diagnostics. */
898
899 static const reg_entry *
900 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
901 {
902 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
903 }
904
905 /* Parse the qualifier of a vector register or vector element of type
906 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
907 succeeds; otherwise return FALSE.
908
909 Accept only one occurrence of:
910 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
911 b h s d q */
912 static bool
913 parse_vector_type_for_operand (aarch64_reg_type reg_type,
914 struct vector_type_el *parsed_type, char **str)
915 {
916 char *ptr = *str;
917 unsigned width;
918 unsigned element_size;
919 enum vector_el_type type;
920
921 /* skip '.' */
922 gas_assert (*ptr == '.');
923 ptr++;
924
925 if (reg_type != REG_TYPE_VN || !ISDIGIT (*ptr))
926 {
927 width = 0;
928 goto elt_size;
929 }
930 width = strtoul (ptr, &ptr, 10);
931 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
932 {
933 first_error_fmt (_("bad size %d in vector width specifier"), width);
934 return false;
935 }
936
937 elt_size:
938 switch (TOLOWER (*ptr))
939 {
940 case 'b':
941 type = NT_b;
942 element_size = 8;
943 break;
944 case 'h':
945 type = NT_h;
946 element_size = 16;
947 break;
948 case 's':
949 type = NT_s;
950 element_size = 32;
951 break;
952 case 'd':
953 type = NT_d;
954 element_size = 64;
955 break;
956 case 'q':
957 if (reg_type != REG_TYPE_VN || width == 1)
958 {
959 type = NT_q;
960 element_size = 128;
961 break;
962 }
963 /* fall through. */
964 default:
965 if (*ptr != '\0')
966 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
967 else
968 first_error (_("missing element size"));
969 return false;
970 }
971 if (width != 0 && width * element_size != 64
972 && width * element_size != 128
973 && !(width == 2 && element_size == 16)
974 && !(width == 4 && element_size == 8))
975 {
976 first_error_fmt (_
977 ("invalid element size %d and vector size combination %c"),
978 width, *ptr);
979 return false;
980 }
981 ptr++;
982
983 parsed_type->type = type;
984 parsed_type->width = width;
985 parsed_type->element_size = element_size;
986
987 *str = ptr;
988
989 return true;
990 }
991
992 /* *STR contains an SVE zero/merge predication suffix. Parse it into
993 *PARSED_TYPE and point *STR at the end of the suffix. */
994
995 static bool
996 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
997 {
998 char *ptr = *str;
999
1000 /* Skip '/'. */
1001 gas_assert (*ptr == '/');
1002 ptr++;
1003 switch (TOLOWER (*ptr))
1004 {
1005 case 'z':
1006 parsed_type->type = NT_zero;
1007 break;
1008 case 'm':
1009 parsed_type->type = NT_merge;
1010 break;
1011 default:
1012 if (*ptr != '\0' && *ptr != ',')
1013 first_error_fmt (_("unexpected character `%c' in predication type"),
1014 *ptr);
1015 else
1016 first_error (_("missing predication type"));
1017 return false;
1018 }
1019 parsed_type->width = 0;
1020 *str = ptr + 1;
1021 return true;
1022 }
1023
1024 /* Return true if CH is a valid suffix character for registers of
1025 type TYPE. */
1026
1027 static bool
1028 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1029 {
1030 switch (type)
1031 {
1032 case REG_TYPE_VN:
1033 case REG_TYPE_ZN:
1034 case REG_TYPE_ZA:
1035 case REG_TYPE_ZAT:
1036 case REG_TYPE_ZATH:
1037 case REG_TYPE_ZATV:
1038 return ch == '.';
1039
1040 case REG_TYPE_PN:
1041 return ch == '.' || ch == '/';
1042
1043 default:
1044 return false;
1045 }
1046 }
1047
1048 /* Parse an index expression at *STR, storing it in *IMM on success. */
1049
1050 static bool
1051 parse_index_expression (char **str, int64_t *imm)
1052 {
1053 expressionS exp;
1054
1055 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1056 if (exp.X_op != O_constant)
1057 {
1058 first_error (_("constant expression required"));
1059 return false;
1060 }
1061 *imm = exp.X_add_number;
1062 return true;
1063 }
1064
1065 /* Parse a register of the type TYPE.
1066
1067 Return null if the string pointed to by *CCP is not a valid register
1068 name or the parsed register is not of TYPE.
1069
1070 Otherwise return the register, and optionally return the register
1071 shape and element index information in *TYPEINFO.
1072
1073 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1074
1075 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1076 register index. */
1077
1078 #define PTR_IN_REGLIST (1U << 0)
1079 #define PTR_FULL_REG (1U << 1)
1080
1081 static const reg_entry *
1082 parse_typed_reg (char **ccp, aarch64_reg_type type,
1083 struct vector_type_el *typeinfo, unsigned int flags)
1084 {
1085 char *str = *ccp;
1086 const reg_entry *reg = parse_reg (&str);
1087 struct vector_type_el atype;
1088 struct vector_type_el parsetype;
1089 bool is_typed_vecreg = false;
1090
1091 atype.defined = 0;
1092 atype.type = NT_invtype;
1093 atype.width = -1;
1094 atype.element_size = 0;
1095 atype.index = 0;
1096
1097 if (reg == NULL)
1098 {
1099 if (typeinfo)
1100 *typeinfo = atype;
1101 set_default_error ();
1102 return NULL;
1103 }
1104
1105 if (! aarch64_check_reg_type (reg, type))
1106 {
1107 DEBUG_TRACE ("reg type check failed");
1108 set_default_error ();
1109 return NULL;
1110 }
1111 type = reg->type;
1112
1113 if (aarch64_valid_suffix_char_p (reg->type, *str))
1114 {
1115 if (*str == '.')
1116 {
1117 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1118 return NULL;
1119 if ((reg->type == REG_TYPE_ZAT
1120 || reg->type == REG_TYPE_ZATH
1121 || reg->type == REG_TYPE_ZATV)
1122 && reg->number * 8 >= parsetype.element_size)
1123 {
1124 set_syntax_error (_("ZA tile number out of range"));
1125 return NULL;
1126 }
1127 }
1128 else
1129 {
1130 if (!parse_predication_for_operand (&parsetype, &str))
1131 return NULL;
1132 }
1133
1134 /* Register if of the form Vn.[bhsdq]. */
1135 is_typed_vecreg = true;
1136
1137 if (type != REG_TYPE_VN)
1138 {
1139 /* The width is always variable; we don't allow an integer width
1140 to be specified. */
1141 gas_assert (parsetype.width == 0);
1142 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1143 }
1144 else if (parsetype.width == 0)
1145 /* Expect index. In the new scheme we cannot have
1146 Vn.[bhsdq] represent a scalar. Therefore any
1147 Vn.[bhsdq] should have an index following it.
1148 Except in reglists of course. */
1149 atype.defined |= NTA_HASINDEX;
1150 else
1151 atype.defined |= NTA_HASTYPE;
1152
1153 atype.type = parsetype.type;
1154 atype.width = parsetype.width;
1155 }
1156
1157 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1158 {
1159 /* Reject Sn[index] syntax. */
1160 if (!is_typed_vecreg)
1161 {
1162 first_error (_("this type of register can't be indexed"));
1163 return NULL;
1164 }
1165
1166 if (flags & PTR_IN_REGLIST)
1167 {
1168 first_error (_("index not allowed inside register list"));
1169 return NULL;
1170 }
1171
1172 atype.defined |= NTA_HASINDEX;
1173
1174 if (!parse_index_expression (&str, &atype.index))
1175 return NULL;
1176
1177 if (! skip_past_char (&str, ']'))
1178 return NULL;
1179 }
1180 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1181 {
1182 /* Indexed vector register expected. */
1183 first_error (_("indexed vector register expected"));
1184 return NULL;
1185 }
1186
1187 /* A vector reg Vn should be typed or indexed. */
1188 if (type == REG_TYPE_VN && atype.defined == 0)
1189 {
1190 first_error (_("invalid use of vector register"));
1191 }
1192
1193 if (typeinfo)
1194 *typeinfo = atype;
1195
1196 *ccp = str;
1197
1198 return reg;
1199 }
1200
1201 /* Parse register.
1202
1203 Return the register on success; return null otherwise.
1204
1205 If this is a NEON vector register with additional type information, fill
1206 in the struct pointed to by VECTYPE (if non-NULL).
1207
1208 This parser does not handle register lists. */
1209
1210 static const reg_entry *
1211 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1212 struct vector_type_el *vectype)
1213 {
1214 return parse_typed_reg (ccp, type, vectype, 0);
1215 }
1216
1217 static inline bool
1218 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1219 {
1220 return (e1.type == e2.type
1221 && e1.defined == e2.defined
1222 && e1.width == e2.width
1223 && e1.element_size == e2.element_size
1224 && e1.index == e2.index);
1225 }
1226
1227 /* This function parses a list of vector registers of type TYPE.
1228 On success, it returns the parsed register list information in the
1229 following encoded format:
1230
1231 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1232 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1233
1234 The information of the register shape and/or index is returned in
1235 *VECTYPE.
1236
1237 It returns PARSE_FAIL if the register list is invalid.
1238
1239 The list contains one to four registers.
1240 Each register can be one of:
1241 <Vt>.<T>[<index>]
1242 <Vt>.<T>
1243 All <T> should be identical.
1244 All <index> should be identical.
1245 There are restrictions on <Vt> numbers which are checked later
1246 (by reg_list_valid_p). */
1247
1248 static int
1249 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1250 struct vector_type_el *vectype)
1251 {
1252 char *str = *ccp;
1253 int nb_regs;
1254 struct vector_type_el typeinfo, typeinfo_first;
1255 int val, val_range;
1256 int in_range;
1257 int ret_val;
1258 int i;
1259 bool error = false;
1260 bool expect_index = false;
1261
1262 if (*str != '{')
1263 {
1264 set_syntax_error (_("expecting {"));
1265 return PARSE_FAIL;
1266 }
1267 str++;
1268
1269 nb_regs = 0;
1270 typeinfo_first.defined = 0;
1271 typeinfo_first.type = NT_invtype;
1272 typeinfo_first.width = -1;
1273 typeinfo_first.element_size = 0;
1274 typeinfo_first.index = 0;
1275 ret_val = 0;
1276 val = -1;
1277 val_range = -1;
1278 in_range = 0;
1279 do
1280 {
1281 if (in_range)
1282 {
1283 str++; /* skip over '-' */
1284 val_range = val;
1285 }
1286 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1287 PTR_IN_REGLIST);
1288 if (!reg)
1289 {
1290 set_first_syntax_error (_("invalid vector register in list"));
1291 error = true;
1292 continue;
1293 }
1294 val = reg->number;
1295 /* reject [bhsd]n */
1296 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1297 {
1298 set_first_syntax_error (_("invalid scalar register in list"));
1299 error = true;
1300 continue;
1301 }
1302
1303 if (typeinfo.defined & NTA_HASINDEX)
1304 expect_index = true;
1305
1306 if (in_range)
1307 {
1308 if (val < val_range)
1309 {
1310 set_first_syntax_error
1311 (_("invalid range in vector register list"));
1312 error = true;
1313 }
1314 val_range++;
1315 }
1316 else
1317 {
1318 val_range = val;
1319 if (nb_regs == 0)
1320 typeinfo_first = typeinfo;
1321 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1322 {
1323 set_first_syntax_error
1324 (_("type mismatch in vector register list"));
1325 error = true;
1326 }
1327 }
1328 if (! error)
1329 for (i = val_range; i <= val; i++)
1330 {
1331 ret_val |= i << (5 * nb_regs);
1332 nb_regs++;
1333 }
1334 in_range = 0;
1335 }
1336 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1337
1338 skip_whitespace (str);
1339 if (*str != '}')
1340 {
1341 set_first_syntax_error (_("end of vector register list not found"));
1342 error = true;
1343 }
1344 str++;
1345
1346 skip_whitespace (str);
1347
1348 if (expect_index)
1349 {
1350 if (skip_past_char (&str, '['))
1351 {
1352 if (!parse_index_expression (&str, &typeinfo_first.index))
1353 error = true;
1354 if (! skip_past_char (&str, ']'))
1355 error = true;
1356 }
1357 else
1358 {
1359 set_first_syntax_error (_("expected index"));
1360 error = true;
1361 }
1362 }
1363
1364 if (nb_regs > 4)
1365 {
1366 set_first_syntax_error (_("too many registers in vector register list"));
1367 error = true;
1368 }
1369 else if (nb_regs == 0)
1370 {
1371 set_first_syntax_error (_("empty vector register list"));
1372 error = true;
1373 }
1374
1375 *ccp = str;
1376 if (! error)
1377 *vectype = typeinfo_first;
1378
1379 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1380 }
1381
1382 /* Directives: register aliases. */
1383
1384 static reg_entry *
1385 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1386 {
1387 reg_entry *new;
1388 const char *name;
1389
1390 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1391 {
1392 if (new->builtin)
1393 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1394 str);
1395
1396 /* Only warn about a redefinition if it's not defined as the
1397 same register. */
1398 else if (new->number != number || new->type != type)
1399 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1400
1401 return NULL;
1402 }
1403
1404 name = xstrdup (str);
1405 new = XNEW (reg_entry);
1406
1407 new->name = name;
1408 new->number = number;
1409 new->type = type;
1410 new->builtin = false;
1411
1412 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1413
1414 return new;
1415 }
1416
1417 /* Look for the .req directive. This is of the form:
1418
1419 new_register_name .req existing_register_name
1420
1421 If we find one, or if it looks sufficiently like one that we want to
1422 handle any error here, return TRUE. Otherwise return FALSE. */
1423
1424 static bool
1425 create_register_alias (char *newname, char *p)
1426 {
1427 const reg_entry *old;
1428 char *oldname, *nbuf;
1429 size_t nlen;
1430
1431 /* The input scrubber ensures that whitespace after the mnemonic is
1432 collapsed to single spaces. */
1433 oldname = p;
1434 if (!startswith (oldname, " .req "))
1435 return false;
1436
1437 oldname += 6;
1438 if (*oldname == '\0')
1439 return false;
1440
1441 old = str_hash_find (aarch64_reg_hsh, oldname);
1442 if (!old)
1443 {
1444 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1445 return true;
1446 }
1447
1448 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1449 the desired alias name, and p points to its end. If not, then
1450 the desired alias name is in the global original_case_string. */
1451 #ifdef TC_CASE_SENSITIVE
1452 nlen = p - newname;
1453 #else
1454 newname = original_case_string;
1455 nlen = strlen (newname);
1456 #endif
1457
1458 nbuf = xmemdup0 (newname, nlen);
1459
1460 /* Create aliases under the new name as stated; an all-lowercase
1461 version of the new name; and an all-uppercase version of the new
1462 name. */
1463 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1464 {
1465 for (p = nbuf; *p; p++)
1466 *p = TOUPPER (*p);
1467
1468 if (strncmp (nbuf, newname, nlen))
1469 {
1470 /* If this attempt to create an additional alias fails, do not bother
1471 trying to create the all-lower case alias. We will fail and issue
1472 a second, duplicate error message. This situation arises when the
1473 programmer does something like:
1474 foo .req r0
1475 Foo .req r1
1476 The second .req creates the "Foo" alias but then fails to create
1477 the artificial FOO alias because it has already been created by the
1478 first .req. */
1479 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1480 {
1481 free (nbuf);
1482 return true;
1483 }
1484 }
1485
1486 for (p = nbuf; *p; p++)
1487 *p = TOLOWER (*p);
1488
1489 if (strncmp (nbuf, newname, nlen))
1490 insert_reg_alias (nbuf, old->number, old->type);
1491 }
1492
1493 free (nbuf);
1494 return true;
1495 }
1496
1497 /* Should never be called, as .req goes between the alias and the
1498 register name, not at the beginning of the line. */
1499 static void
1500 s_req (int a ATTRIBUTE_UNUSED)
1501 {
1502 as_bad (_("invalid syntax for .req directive"));
1503 }
1504
1505 /* The .unreq directive deletes an alias which was previously defined
1506 by .req. For example:
1507
1508 my_alias .req r11
1509 .unreq my_alias */
1510
1511 static void
1512 s_unreq (int a ATTRIBUTE_UNUSED)
1513 {
1514 char *name;
1515 char saved_char;
1516
1517 name = input_line_pointer;
1518 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1519 saved_char = *input_line_pointer;
1520 *input_line_pointer = 0;
1521
1522 if (!*name)
1523 as_bad (_("invalid syntax for .unreq directive"));
1524 else
1525 {
1526 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1527
1528 if (!reg)
1529 as_bad (_("unknown register alias '%s'"), name);
1530 else if (reg->builtin)
1531 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1532 name);
1533 else
1534 {
1535 char *p;
1536 char *nbuf;
1537
1538 str_hash_delete (aarch64_reg_hsh, name);
1539 free ((char *) reg->name);
1540 free (reg);
1541
1542 /* Also locate the all upper case and all lower case versions.
1543 Do not complain if we cannot find one or the other as it
1544 was probably deleted above. */
1545
1546 nbuf = strdup (name);
1547 for (p = nbuf; *p; p++)
1548 *p = TOUPPER (*p);
1549 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1550 if (reg)
1551 {
1552 str_hash_delete (aarch64_reg_hsh, nbuf);
1553 free ((char *) reg->name);
1554 free (reg);
1555 }
1556
1557 for (p = nbuf; *p; p++)
1558 *p = TOLOWER (*p);
1559 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1560 if (reg)
1561 {
1562 str_hash_delete (aarch64_reg_hsh, nbuf);
1563 free ((char *) reg->name);
1564 free (reg);
1565 }
1566
1567 free (nbuf);
1568 }
1569 }
1570
1571 *input_line_pointer = saved_char;
1572 demand_empty_rest_of_line ();
1573 }
1574
1575 /* Directives: Instruction set selection. */
1576
1577 #if defined OBJ_ELF || defined OBJ_COFF
1578 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1579 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1580 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1581 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1582
1583 /* Create a new mapping symbol for the transition to STATE. */
1584
1585 static void
1586 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1587 {
1588 symbolS *symbolP;
1589 const char *symname;
1590 int type;
1591
1592 switch (state)
1593 {
1594 case MAP_DATA:
1595 symname = "$d";
1596 type = BSF_NO_FLAGS;
1597 break;
1598 case MAP_INSN:
1599 symname = "$x";
1600 type = BSF_NO_FLAGS;
1601 break;
1602 default:
1603 abort ();
1604 }
1605
1606 symbolP = symbol_new (symname, now_seg, frag, value);
1607 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1608
1609 /* Save the mapping symbols for future reference. Also check that
1610 we do not place two mapping symbols at the same offset within a
1611 frag. We'll handle overlap between frags in
1612 check_mapping_symbols.
1613
1614 If .fill or other data filling directive generates zero sized data,
1615 the mapping symbol for the following code will have the same value
1616 as the one generated for the data filling directive. In this case,
1617 we replace the old symbol with the new one at the same address. */
1618 if (value == 0)
1619 {
1620 if (frag->tc_frag_data.first_map != NULL)
1621 {
1622 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1623 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1624 &symbol_lastP);
1625 }
1626 frag->tc_frag_data.first_map = symbolP;
1627 }
1628 if (frag->tc_frag_data.last_map != NULL)
1629 {
1630 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1631 S_GET_VALUE (symbolP));
1632 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1633 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1634 &symbol_lastP);
1635 }
1636 frag->tc_frag_data.last_map = symbolP;
1637 }
1638
1639 /* We must sometimes convert a region marked as code to data during
1640 code alignment, if an odd number of bytes have to be padded. The
1641 code mapping symbol is pushed to an aligned address. */
1642
1643 static void
1644 insert_data_mapping_symbol (enum mstate state,
1645 valueT value, fragS * frag, offsetT bytes)
1646 {
1647 /* If there was already a mapping symbol, remove it. */
1648 if (frag->tc_frag_data.last_map != NULL
1649 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1650 frag->fr_address + value)
1651 {
1652 symbolS *symp = frag->tc_frag_data.last_map;
1653
1654 if (value == 0)
1655 {
1656 know (frag->tc_frag_data.first_map == symp);
1657 frag->tc_frag_data.first_map = NULL;
1658 }
1659 frag->tc_frag_data.last_map = NULL;
1660 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1661 }
1662
1663 make_mapping_symbol (MAP_DATA, value, frag);
1664 make_mapping_symbol (state, value + bytes, frag);
1665 }
1666
1667 static void mapping_state_2 (enum mstate state, int max_chars);
1668
1669 /* Set the mapping state to STATE. Only call this when about to
1670 emit some STATE bytes to the file. */
1671
1672 void
1673 mapping_state (enum mstate state)
1674 {
1675 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1676
1677 if (state == MAP_INSN)
1678 /* AArch64 instructions require 4-byte alignment. When emitting
1679 instructions into any section, record the appropriate section
1680 alignment. */
1681 record_alignment (now_seg, 2);
1682
1683 if (mapstate == state)
1684 /* The mapping symbol has already been emitted.
1685 There is nothing else to do. */
1686 return;
1687
1688 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1689 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1690 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1691 evaluated later in the next else. */
1692 return;
1693 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1694 {
1695 /* Only add the symbol if the offset is > 0:
1696 if we're at the first frag, check it's size > 0;
1697 if we're not at the first frag, then for sure
1698 the offset is > 0. */
1699 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1700 const int add_symbol = (frag_now != frag_first)
1701 || (frag_now_fix () > 0);
1702
1703 if (add_symbol)
1704 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1705 }
1706 #undef TRANSITION
1707
1708 mapping_state_2 (state, 0);
1709 }
1710
1711 /* Same as mapping_state, but MAX_CHARS bytes have already been
1712 allocated. Put the mapping symbol that far back. */
1713
1714 static void
1715 mapping_state_2 (enum mstate state, int max_chars)
1716 {
1717 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1718
1719 if (!SEG_NORMAL (now_seg))
1720 return;
1721
1722 if (mapstate == state)
1723 /* The mapping symbol has already been emitted.
1724 There is nothing else to do. */
1725 return;
1726
1727 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1728 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1729 }
1730 #else
1731 #define mapping_state(x) /* nothing */
1732 #define mapping_state_2(x, y) /* nothing */
1733 #endif
1734
1735 /* Directives: sectioning and alignment. */
1736
1737 static void
1738 s_bss (int ignore ATTRIBUTE_UNUSED)
1739 {
1740 /* We don't support putting frags in the BSS segment, we fake it by
1741 marking in_bss, then looking at s_skip for clues. */
1742 subseg_set (bss_section, 0);
1743 demand_empty_rest_of_line ();
1744 mapping_state (MAP_DATA);
1745 }
1746
1747 static void
1748 s_even (int ignore ATTRIBUTE_UNUSED)
1749 {
1750 /* Never make frag if expect extra pass. */
1751 if (!need_pass_2)
1752 frag_align (1, 0, 0);
1753
1754 record_alignment (now_seg, 1);
1755
1756 demand_empty_rest_of_line ();
1757 }
1758
1759 /* Directives: Literal pools. */
1760
1761 static literal_pool *
1762 find_literal_pool (int size)
1763 {
1764 literal_pool *pool;
1765
1766 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1767 {
1768 if (pool->section == now_seg
1769 && pool->sub_section == now_subseg && pool->size == size)
1770 break;
1771 }
1772
1773 return pool;
1774 }
1775
1776 static literal_pool *
1777 find_or_make_literal_pool (int size)
1778 {
1779 /* Next literal pool ID number. */
1780 static unsigned int latest_pool_num = 1;
1781 literal_pool *pool;
1782
1783 pool = find_literal_pool (size);
1784
1785 if (pool == NULL)
1786 {
1787 /* Create a new pool. */
1788 pool = XNEW (literal_pool);
1789 if (!pool)
1790 return NULL;
1791
1792 /* Currently we always put the literal pool in the current text
1793 section. If we were generating "small" model code where we
1794 knew that all code and initialised data was within 1MB then
1795 we could output literals to mergeable, read-only data
1796 sections. */
1797
1798 pool->next_free_entry = 0;
1799 pool->section = now_seg;
1800 pool->sub_section = now_subseg;
1801 pool->size = size;
1802 pool->next = list_of_pools;
1803 pool->symbol = NULL;
1804
1805 /* Add it to the list. */
1806 list_of_pools = pool;
1807 }
1808
1809 /* New pools, and emptied pools, will have a NULL symbol. */
1810 if (pool->symbol == NULL)
1811 {
1812 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1813 &zero_address_frag, 0);
1814 pool->id = latest_pool_num++;
1815 }
1816
1817 /* Done. */
1818 return pool;
1819 }
1820
1821 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1822 Return TRUE on success, otherwise return FALSE. */
1823 static bool
1824 add_to_lit_pool (expressionS *exp, int size)
1825 {
1826 literal_pool *pool;
1827 unsigned int entry;
1828
1829 pool = find_or_make_literal_pool (size);
1830
1831 /* Check if this literal value is already in the pool. */
1832 for (entry = 0; entry < pool->next_free_entry; entry++)
1833 {
1834 expressionS * litexp = & pool->literals[entry].exp;
1835
1836 if ((litexp->X_op == exp->X_op)
1837 && (exp->X_op == O_constant)
1838 && (litexp->X_add_number == exp->X_add_number)
1839 && (litexp->X_unsigned == exp->X_unsigned))
1840 break;
1841
1842 if ((litexp->X_op == exp->X_op)
1843 && (exp->X_op == O_symbol)
1844 && (litexp->X_add_number == exp->X_add_number)
1845 && (litexp->X_add_symbol == exp->X_add_symbol)
1846 && (litexp->X_op_symbol == exp->X_op_symbol))
1847 break;
1848 }
1849
1850 /* Do we need to create a new entry? */
1851 if (entry == pool->next_free_entry)
1852 {
1853 if (entry >= MAX_LITERAL_POOL_SIZE)
1854 {
1855 set_syntax_error (_("literal pool overflow"));
1856 return false;
1857 }
1858
1859 pool->literals[entry].exp = *exp;
1860 pool->next_free_entry += 1;
1861 if (exp->X_op == O_big)
1862 {
1863 /* PR 16688: Bignums are held in a single global array. We must
1864 copy and preserve that value now, before it is overwritten. */
1865 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1866 exp->X_add_number);
1867 memcpy (pool->literals[entry].bignum, generic_bignum,
1868 CHARS_PER_LITTLENUM * exp->X_add_number);
1869 }
1870 else
1871 pool->literals[entry].bignum = NULL;
1872 }
1873
1874 exp->X_op = O_symbol;
1875 exp->X_add_number = ((int) entry) * size;
1876 exp->X_add_symbol = pool->symbol;
1877
1878 return true;
1879 }
1880
1881 /* Can't use symbol_new here, so have to create a symbol and then at
1882 a later date assign it a value. That's what these functions do. */
1883
1884 static void
1885 symbol_locate (symbolS * symbolP,
1886 const char *name,/* It is copied, the caller can modify. */
1887 segT segment, /* Segment identifier (SEG_<something>). */
1888 valueT valu, /* Symbol value. */
1889 fragS * frag) /* Associated fragment. */
1890 {
1891 size_t name_length;
1892 char *preserved_copy_of_name;
1893
1894 name_length = strlen (name) + 1; /* +1 for \0. */
1895 obstack_grow (&notes, name, name_length);
1896 preserved_copy_of_name = obstack_finish (&notes);
1897
1898 #ifdef tc_canonicalize_symbol_name
1899 preserved_copy_of_name =
1900 tc_canonicalize_symbol_name (preserved_copy_of_name);
1901 #endif
1902
1903 S_SET_NAME (symbolP, preserved_copy_of_name);
1904
1905 S_SET_SEGMENT (symbolP, segment);
1906 S_SET_VALUE (symbolP, valu);
1907 symbol_clear_list_pointers (symbolP);
1908
1909 symbol_set_frag (symbolP, frag);
1910
1911 /* Link to end of symbol chain. */
1912 {
1913 extern int symbol_table_frozen;
1914
1915 if (symbol_table_frozen)
1916 abort ();
1917 }
1918
1919 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1920
1921 obj_symbol_new_hook (symbolP);
1922
1923 #ifdef tc_symbol_new_hook
1924 tc_symbol_new_hook (symbolP);
1925 #endif
1926
1927 #ifdef DEBUG_SYMS
1928 verify_symbol_chain (symbol_rootP, symbol_lastP);
1929 #endif /* DEBUG_SYMS */
1930 }
1931
1932
1933 static void
1934 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1935 {
1936 unsigned int entry;
1937 literal_pool *pool;
1938 char sym_name[20];
1939 int align;
1940
1941 for (align = 2; align <= 4; align++)
1942 {
1943 int size = 1 << align;
1944
1945 pool = find_literal_pool (size);
1946 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1947 continue;
1948
1949 /* Align pool as you have word accesses.
1950 Only make a frag if we have to. */
1951 if (!need_pass_2)
1952 frag_align (align, 0, 0);
1953
1954 mapping_state (MAP_DATA);
1955
1956 record_alignment (now_seg, align);
1957
1958 sprintf (sym_name, "$$lit_\002%x", pool->id);
1959
1960 symbol_locate (pool->symbol, sym_name, now_seg,
1961 (valueT) frag_now_fix (), frag_now);
1962 symbol_table_insert (pool->symbol);
1963
1964 for (entry = 0; entry < pool->next_free_entry; entry++)
1965 {
1966 expressionS * exp = & pool->literals[entry].exp;
1967
1968 if (exp->X_op == O_big)
1969 {
1970 /* PR 16688: Restore the global bignum value. */
1971 gas_assert (pool->literals[entry].bignum != NULL);
1972 memcpy (generic_bignum, pool->literals[entry].bignum,
1973 CHARS_PER_LITTLENUM * exp->X_add_number);
1974 }
1975
1976 /* First output the expression in the instruction to the pool. */
1977 emit_expr (exp, size); /* .word|.xword */
1978
1979 if (exp->X_op == O_big)
1980 {
1981 free (pool->literals[entry].bignum);
1982 pool->literals[entry].bignum = NULL;
1983 }
1984 }
1985
1986 /* Mark the pool as empty. */
1987 pool->next_free_entry = 0;
1988 pool->symbol = NULL;
1989 }
1990 }
1991
1992 #if defined(OBJ_ELF) || defined(OBJ_COFF)
1993 /* Forward declarations for functions below, in the MD interface
1994 section. */
1995 static struct reloc_table_entry * find_reloc_table_entry (char **);
1996
1997 /* Directives: Data. */
1998 /* N.B. the support for relocation suffix in this directive needs to be
1999 implemented properly. */
2000
2001 static void
2002 s_aarch64_cons (int nbytes)
2003 {
2004 expressionS exp;
2005
2006 #ifdef md_flush_pending_output
2007 md_flush_pending_output ();
2008 #endif
2009
2010 if (is_it_end_of_statement ())
2011 {
2012 demand_empty_rest_of_line ();
2013 return;
2014 }
2015
2016 #ifdef md_cons_align
2017 md_cons_align (nbytes);
2018 #endif
2019
2020 mapping_state (MAP_DATA);
2021 do
2022 {
2023 struct reloc_table_entry *reloc;
2024
2025 expression (&exp);
2026
2027 if (exp.X_op != O_symbol)
2028 emit_expr (&exp, (unsigned int) nbytes);
2029 else
2030 {
2031 skip_past_char (&input_line_pointer, '#');
2032 if (skip_past_char (&input_line_pointer, ':'))
2033 {
2034 reloc = find_reloc_table_entry (&input_line_pointer);
2035 if (reloc == NULL)
2036 as_bad (_("unrecognized relocation suffix"));
2037 else
2038 as_bad (_("unimplemented relocation suffix"));
2039 ignore_rest_of_line ();
2040 return;
2041 }
2042 else
2043 emit_expr (&exp, (unsigned int) nbytes);
2044 }
2045 }
2046 while (*input_line_pointer++ == ',');
2047
2048 /* Put terminator back into stream. */
2049 input_line_pointer--;
2050 demand_empty_rest_of_line ();
2051 }
2052 #endif
2053
2054 #ifdef OBJ_ELF
2055 /* Forward declarations for functions below, in the MD interface
2056 section. */
2057 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2058
2059 /* Mark symbol that it follows a variant PCS convention. */
2060
2061 static void
2062 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2063 {
2064 char *name;
2065 char c;
2066 symbolS *sym;
2067 asymbol *bfdsym;
2068 elf_symbol_type *elfsym;
2069
2070 c = get_symbol_name (&name);
2071 if (!*name)
2072 as_bad (_("Missing symbol name in directive"));
2073 sym = symbol_find_or_make (name);
2074 restore_line_pointer (c);
2075 demand_empty_rest_of_line ();
2076 bfdsym = symbol_get_bfdsym (sym);
2077 elfsym = elf_symbol_from (bfdsym);
2078 gas_assert (elfsym);
2079 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2080 }
2081 #endif /* OBJ_ELF */
2082
2083 /* Output a 32-bit word, but mark as an instruction. */
2084
2085 static void
2086 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2087 {
2088 expressionS exp;
2089 unsigned n = 0;
2090
2091 #ifdef md_flush_pending_output
2092 md_flush_pending_output ();
2093 #endif
2094
2095 if (is_it_end_of_statement ())
2096 {
2097 demand_empty_rest_of_line ();
2098 return;
2099 }
2100
2101 /* Sections are assumed to start aligned. In executable section, there is no
2102 MAP_DATA symbol pending. So we only align the address during
2103 MAP_DATA --> MAP_INSN transition.
2104 For other sections, this is not guaranteed. */
2105 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2106 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2107 frag_align_code (2, 0);
2108
2109 #ifdef OBJ_ELF
2110 mapping_state (MAP_INSN);
2111 #endif
2112
2113 do
2114 {
2115 expression (&exp);
2116 if (exp.X_op != O_constant)
2117 {
2118 as_bad (_("constant expression required"));
2119 ignore_rest_of_line ();
2120 return;
2121 }
2122
2123 if (target_big_endian)
2124 {
2125 unsigned int val = exp.X_add_number;
2126 exp.X_add_number = SWAP_32 (val);
2127 }
2128 emit_expr (&exp, INSN_SIZE);
2129 ++n;
2130 }
2131 while (*input_line_pointer++ == ',');
2132
2133 dwarf2_emit_insn (n * INSN_SIZE);
2134
2135 /* Put terminator back into stream. */
2136 input_line_pointer--;
2137 demand_empty_rest_of_line ();
2138 }
2139
2140 static void
2141 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2142 {
2143 demand_empty_rest_of_line ();
2144 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2145 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2146 }
2147
2148 #ifdef OBJ_ELF
2149 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2150
2151 static void
2152 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2153 {
2154 expressionS exp;
2155
2156 expression (&exp);
2157 frag_grow (4);
2158 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2159 BFD_RELOC_AARCH64_TLSDESC_ADD);
2160
2161 demand_empty_rest_of_line ();
2162 }
2163
2164 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2165
2166 static void
2167 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2168 {
2169 expressionS exp;
2170
2171 /* Since we're just labelling the code, there's no need to define a
2172 mapping symbol. */
2173 expression (&exp);
2174 /* Make sure there is enough room in this frag for the following
2175 blr. This trick only works if the blr follows immediately after
2176 the .tlsdesc directive. */
2177 frag_grow (4);
2178 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2179 BFD_RELOC_AARCH64_TLSDESC_CALL);
2180
2181 demand_empty_rest_of_line ();
2182 }
2183
2184 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2185
2186 static void
2187 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2188 {
2189 expressionS exp;
2190
2191 expression (&exp);
2192 frag_grow (4);
2193 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2194 BFD_RELOC_AARCH64_TLSDESC_LDR);
2195
2196 demand_empty_rest_of_line ();
2197 }
2198 #endif /* OBJ_ELF */
2199
2200 #ifdef TE_PE
2201 static void
2202 s_secrel (int dummy ATTRIBUTE_UNUSED)
2203 {
2204 expressionS exp;
2205
2206 do
2207 {
2208 expression (&exp);
2209 if (exp.X_op == O_symbol)
2210 exp.X_op = O_secrel;
2211
2212 emit_expr (&exp, 4);
2213 }
2214 while (*input_line_pointer++ == ',');
2215
2216 input_line_pointer--;
2217 demand_empty_rest_of_line ();
2218 }
2219
2220 void
2221 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2222 {
2223 expressionS exp;
2224
2225 exp.X_op = O_secrel;
2226 exp.X_add_symbol = symbol;
2227 exp.X_add_number = 0;
2228 emit_expr (&exp, size);
2229 }
2230
2231 static void
2232 s_secidx (int dummy ATTRIBUTE_UNUSED)
2233 {
2234 expressionS exp;
2235
2236 do
2237 {
2238 expression (&exp);
2239 if (exp.X_op == O_symbol)
2240 exp.X_op = O_secidx;
2241
2242 emit_expr (&exp, 2);
2243 }
2244 while (*input_line_pointer++ == ',');
2245
2246 input_line_pointer--;
2247 demand_empty_rest_of_line ();
2248 }
2249 #endif /* TE_PE */
2250
2251 static void s_aarch64_arch (int);
2252 static void s_aarch64_cpu (int);
2253 static void s_aarch64_arch_extension (int);
2254
2255 /* This table describes all the machine specific pseudo-ops the assembler
2256 has to support. The fields are:
2257 pseudo-op name without dot
2258 function to call to execute this pseudo-op
2259 Integer arg to pass to the function. */
2260
2261 const pseudo_typeS md_pseudo_table[] = {
2262 /* Never called because '.req' does not start a line. */
2263 {"req", s_req, 0},
2264 {"unreq", s_unreq, 0},
2265 {"bss", s_bss, 0},
2266 {"even", s_even, 0},
2267 {"ltorg", s_ltorg, 0},
2268 {"pool", s_ltorg, 0},
2269 {"cpu", s_aarch64_cpu, 0},
2270 {"arch", s_aarch64_arch, 0},
2271 {"arch_extension", s_aarch64_arch_extension, 0},
2272 {"inst", s_aarch64_inst, 0},
2273 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2274 #ifdef OBJ_ELF
2275 {"tlsdescadd", s_tlsdescadd, 0},
2276 {"tlsdesccall", s_tlsdesccall, 0},
2277 {"tlsdescldr", s_tlsdescldr, 0},
2278 {"variant_pcs", s_variant_pcs, 0},
2279 #endif
2280 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2281 {"word", s_aarch64_cons, 4},
2282 {"long", s_aarch64_cons, 4},
2283 {"xword", s_aarch64_cons, 8},
2284 {"dword", s_aarch64_cons, 8},
2285 #endif
2286 #ifdef TE_PE
2287 {"secrel32", s_secrel, 0},
2288 {"secidx", s_secidx, 0},
2289 #endif
2290 {"float16", float_cons, 'h'},
2291 {"bfloat16", float_cons, 'b'},
2292 {0, 0, 0}
2293 };
2294 \f
2295
2296 /* Check whether STR points to a register name followed by a comma or the
2297 end of line; REG_TYPE indicates which register types are checked
2298 against. Return TRUE if STR is such a register name; otherwise return
2299 FALSE. The function does not intend to produce any diagnostics, but since
2300 the register parser aarch64_reg_parse, which is called by this function,
2301 does produce diagnostics, we call clear_error to clear any diagnostics
2302 that may be generated by aarch64_reg_parse.
2303 Also, the function returns FALSE directly if there is any user error
2304 present at the function entry. This prevents the existing diagnostics
2305 state from being spoiled.
2306 The function currently serves parse_constant_immediate and
2307 parse_big_immediate only. */
2308 static bool
2309 reg_name_p (char *str, aarch64_reg_type reg_type)
2310 {
2311 const reg_entry *reg;
2312
2313 /* Prevent the diagnostics state from being spoiled. */
2314 if (error_p ())
2315 return false;
2316
2317 reg = aarch64_reg_parse (&str, reg_type, NULL);
2318
2319 /* Clear the parsing error that may be set by the reg parser. */
2320 clear_error ();
2321
2322 if (!reg)
2323 return false;
2324
2325 skip_whitespace (str);
2326 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2327 return true;
2328
2329 return false;
2330 }
2331
2332 /* Parser functions used exclusively in instruction operands. */
2333
2334 /* Parse an immediate expression which may not be constant.
2335
2336 To prevent the expression parser from pushing a register name
2337 into the symbol table as an undefined symbol, firstly a check is
2338 done to find out whether STR is a register of type REG_TYPE followed
2339 by a comma or the end of line. Return FALSE if STR is such a string. */
2340
2341 static bool
2342 parse_immediate_expression (char **str, expressionS *exp,
2343 aarch64_reg_type reg_type)
2344 {
2345 if (reg_name_p (*str, reg_type))
2346 {
2347 set_recoverable_error (_("immediate operand required"));
2348 return false;
2349 }
2350
2351 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2352
2353 if (exp->X_op == O_absent)
2354 {
2355 set_fatal_syntax_error (_("missing immediate expression"));
2356 return false;
2357 }
2358
2359 return true;
2360 }
2361
2362 /* Constant immediate-value read function for use in insn parsing.
2363 STR points to the beginning of the immediate (with the optional
2364 leading #); *VAL receives the value. REG_TYPE says which register
2365 names should be treated as registers rather than as symbolic immediates.
2366
2367 Return TRUE on success; otherwise return FALSE. */
2368
2369 static bool
2370 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2371 {
2372 expressionS exp;
2373
2374 if (! parse_immediate_expression (str, &exp, reg_type))
2375 return false;
2376
2377 if (exp.X_op != O_constant)
2378 {
2379 set_syntax_error (_("constant expression required"));
2380 return false;
2381 }
2382
2383 *val = exp.X_add_number;
2384 return true;
2385 }
2386
2387 static uint32_t
2388 encode_imm_float_bits (uint32_t imm)
2389 {
2390 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2391 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2392 }
2393
2394 /* Return TRUE if the single-precision floating-point value encoded in IMM
2395 can be expressed in the AArch64 8-bit signed floating-point format with
2396 3-bit exponent and normalized 4 bits of precision; in other words, the
2397 floating-point value must be expressable as
2398 (+/-) n / 16 * power (2, r)
2399 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2400
2401 static bool
2402 aarch64_imm_float_p (uint32_t imm)
2403 {
2404 /* If a single-precision floating-point value has the following bit
2405 pattern, it can be expressed in the AArch64 8-bit floating-point
2406 format:
2407
2408 3 32222222 2221111111111
2409 1 09876543 21098765432109876543210
2410 n Eeeeeexx xxxx0000000000000000000
2411
2412 where n, e and each x are either 0 or 1 independently, with
2413 E == ~ e. */
2414
2415 uint32_t pattern;
2416
2417 /* Prepare the pattern for 'Eeeeee'. */
2418 if (((imm >> 30) & 0x1) == 0)
2419 pattern = 0x3e000000;
2420 else
2421 pattern = 0x40000000;
2422
2423 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2424 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2425 }
2426
2427 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2428 as an IEEE float without any loss of precision. Store the value in
2429 *FPWORD if so. */
2430
2431 static bool
2432 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2433 {
2434 /* If a double-precision floating-point value has the following bit
2435 pattern, it can be expressed in a float:
2436
2437 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2438 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2439 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2440
2441 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2442 if Eeee_eeee != 1111_1111
2443
2444 where n, e, s and S are either 0 or 1 independently and where ~ is the
2445 inverse of E. */
2446
2447 uint32_t pattern;
2448 uint32_t high32 = imm >> 32;
2449 uint32_t low32 = imm;
2450
2451 /* Lower 29 bits need to be 0s. */
2452 if ((imm & 0x1fffffff) != 0)
2453 return false;
2454
2455 /* Prepare the pattern for 'Eeeeeeeee'. */
2456 if (((high32 >> 30) & 0x1) == 0)
2457 pattern = 0x38000000;
2458 else
2459 pattern = 0x40000000;
2460
2461 /* Check E~~~. */
2462 if ((high32 & 0x78000000) != pattern)
2463 return false;
2464
2465 /* Check Eeee_eeee != 1111_1111. */
2466 if ((high32 & 0x7ff00000) == 0x47f00000)
2467 return false;
2468
2469 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2470 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2471 | (low32 >> 29)); /* 3 S bits. */
2472 return true;
2473 }
2474
2475 /* Return true if we should treat OPERAND as a double-precision
2476 floating-point operand rather than a single-precision one. */
2477 static bool
2478 double_precision_operand_p (const aarch64_opnd_info *operand)
2479 {
2480 /* Check for unsuffixed SVE registers, which are allowed
2481 for LDR and STR but not in instructions that require an
2482 immediate. We get better error messages if we arbitrarily
2483 pick one size, parse the immediate normally, and then
2484 report the match failure in the normal way. */
2485 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2486 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2487 }
2488
2489 /* Parse a floating-point immediate. Return TRUE on success and return the
2490 value in *IMMED in the format of IEEE754 single-precision encoding.
2491 *CCP points to the start of the string; DP_P is TRUE when the immediate
2492 is expected to be in double-precision (N.B. this only matters when
2493 hexadecimal representation is involved). REG_TYPE says which register
2494 names should be treated as registers rather than as symbolic immediates.
2495
2496 This routine accepts any IEEE float; it is up to the callers to reject
2497 invalid ones. */
2498
2499 static bool
2500 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2501 aarch64_reg_type reg_type)
2502 {
2503 char *str = *ccp;
2504 char *fpnum;
2505 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2506 int64_t val = 0;
2507 unsigned fpword = 0;
2508 bool hex_p = false;
2509
2510 skip_past_char (&str, '#');
2511
2512 fpnum = str;
2513 skip_whitespace (fpnum);
2514
2515 if (startswith (fpnum, "0x"))
2516 {
2517 /* Support the hexadecimal representation of the IEEE754 encoding.
2518 Double-precision is expected when DP_P is TRUE, otherwise the
2519 representation should be in single-precision. */
2520 if (! parse_constant_immediate (&str, &val, reg_type))
2521 goto invalid_fp;
2522
2523 if (dp_p)
2524 {
2525 if (!can_convert_double_to_float (val, &fpword))
2526 goto invalid_fp;
2527 }
2528 else if ((uint64_t) val > 0xffffffff)
2529 goto invalid_fp;
2530 else
2531 fpword = val;
2532
2533 hex_p = true;
2534 }
2535 else if (reg_name_p (str, reg_type))
2536 {
2537 set_recoverable_error (_("immediate operand required"));
2538 return false;
2539 }
2540
2541 if (! hex_p)
2542 {
2543 int i;
2544
2545 if ((str = atof_ieee (str, 's', words)) == NULL)
2546 goto invalid_fp;
2547
2548 /* Our FP word must be 32 bits (single-precision FP). */
2549 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2550 {
2551 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2552 fpword |= words[i];
2553 }
2554 }
2555
2556 *immed = fpword;
2557 *ccp = str;
2558 return true;
2559
2560 invalid_fp:
2561 set_fatal_syntax_error (_("invalid floating-point constant"));
2562 return false;
2563 }
2564
2565 /* Less-generic immediate-value read function with the possibility of loading
2566 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2567 instructions.
2568
2569 To prevent the expression parser from pushing a register name into the
2570 symbol table as an undefined symbol, a check is firstly done to find
2571 out whether STR is a register of type REG_TYPE followed by a comma or
2572 the end of line. Return FALSE if STR is such a register. */
2573
2574 static bool
2575 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2576 {
2577 char *ptr = *str;
2578
2579 if (reg_name_p (ptr, reg_type))
2580 {
2581 set_syntax_error (_("immediate operand required"));
2582 return false;
2583 }
2584
2585 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2586
2587 if (inst.reloc.exp.X_op == O_constant)
2588 *imm = inst.reloc.exp.X_add_number;
2589
2590 *str = ptr;
2591
2592 return true;
2593 }
2594
2595 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2596 if NEED_LIBOPCODES is non-zero, the fixup will need
2597 assistance from the libopcodes. */
2598
2599 static inline void
2600 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2601 const aarch64_opnd_info *operand,
2602 int need_libopcodes_p)
2603 {
2604 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2605 reloc->opnd = operand->type;
2606 if (need_libopcodes_p)
2607 reloc->need_libopcodes_p = 1;
2608 };
2609
2610 /* Return TRUE if the instruction needs to be fixed up later internally by
2611 the GAS; otherwise return FALSE. */
2612
2613 static inline bool
2614 aarch64_gas_internal_fixup_p (void)
2615 {
2616 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2617 }
2618
2619 /* Assign the immediate value to the relevant field in *OPERAND if
2620 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2621 needs an internal fixup in a later stage.
2622 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2623 IMM.VALUE that may get assigned with the constant. */
2624 static inline void
2625 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2626 aarch64_opnd_info *operand,
2627 int addr_off_p,
2628 int need_libopcodes_p,
2629 int skip_p)
2630 {
2631 if (reloc->exp.X_op == O_constant)
2632 {
2633 if (addr_off_p)
2634 operand->addr.offset.imm = reloc->exp.X_add_number;
2635 else
2636 operand->imm.value = reloc->exp.X_add_number;
2637 reloc->type = BFD_RELOC_UNUSED;
2638 }
2639 else
2640 {
2641 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2642 /* Tell libopcodes to ignore this operand or not. This is helpful
2643 when one of the operands needs to be fixed up later but we need
2644 libopcodes to check the other operands. */
2645 operand->skip = skip_p;
2646 }
2647 }
2648
2649 /* Relocation modifiers. Each entry in the table contains the textual
2650 name for the relocation which may be placed before a symbol used as
2651 a load/store offset, or add immediate. It must be surrounded by a
2652 leading and trailing colon, for example:
2653
2654 ldr x0, [x1, #:rello:varsym]
2655 add x0, x1, #:rello:varsym */
2656
2657 struct reloc_table_entry
2658 {
2659 const char *name;
2660 int pc_rel;
2661 bfd_reloc_code_real_type adr_type;
2662 bfd_reloc_code_real_type adrp_type;
2663 bfd_reloc_code_real_type movw_type;
2664 bfd_reloc_code_real_type add_type;
2665 bfd_reloc_code_real_type ldst_type;
2666 bfd_reloc_code_real_type ld_literal_type;
2667 };
2668
2669 static struct reloc_table_entry reloc_table[] =
2670 {
2671 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2672 {"lo12", 0,
2673 0, /* adr_type */
2674 0,
2675 0,
2676 BFD_RELOC_AARCH64_ADD_LO12,
2677 BFD_RELOC_AARCH64_LDST_LO12,
2678 0},
2679
2680 /* Higher 21 bits of pc-relative page offset: ADRP */
2681 {"pg_hi21", 1,
2682 0, /* adr_type */
2683 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2684 0,
2685 0,
2686 0,
2687 0},
2688
2689 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2690 {"pg_hi21_nc", 1,
2691 0, /* adr_type */
2692 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2693 0,
2694 0,
2695 0,
2696 0},
2697
2698 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2699 {"abs_g0", 0,
2700 0, /* adr_type */
2701 0,
2702 BFD_RELOC_AARCH64_MOVW_G0,
2703 0,
2704 0,
2705 0},
2706
2707 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2708 {"abs_g0_s", 0,
2709 0, /* adr_type */
2710 0,
2711 BFD_RELOC_AARCH64_MOVW_G0_S,
2712 0,
2713 0,
2714 0},
2715
2716 /* Less significant bits 0-15 of address/value: MOVK, no check */
2717 {"abs_g0_nc", 0,
2718 0, /* adr_type */
2719 0,
2720 BFD_RELOC_AARCH64_MOVW_G0_NC,
2721 0,
2722 0,
2723 0},
2724
2725 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2726 {"abs_g1", 0,
2727 0, /* adr_type */
2728 0,
2729 BFD_RELOC_AARCH64_MOVW_G1,
2730 0,
2731 0,
2732 0},
2733
2734 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2735 {"abs_g1_s", 0,
2736 0, /* adr_type */
2737 0,
2738 BFD_RELOC_AARCH64_MOVW_G1_S,
2739 0,
2740 0,
2741 0},
2742
2743 /* Less significant bits 16-31 of address/value: MOVK, no check */
2744 {"abs_g1_nc", 0,
2745 0, /* adr_type */
2746 0,
2747 BFD_RELOC_AARCH64_MOVW_G1_NC,
2748 0,
2749 0,
2750 0},
2751
2752 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2753 {"abs_g2", 0,
2754 0, /* adr_type */
2755 0,
2756 BFD_RELOC_AARCH64_MOVW_G2,
2757 0,
2758 0,
2759 0},
2760
2761 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2762 {"abs_g2_s", 0,
2763 0, /* adr_type */
2764 0,
2765 BFD_RELOC_AARCH64_MOVW_G2_S,
2766 0,
2767 0,
2768 0},
2769
2770 /* Less significant bits 32-47 of address/value: MOVK, no check */
2771 {"abs_g2_nc", 0,
2772 0, /* adr_type */
2773 0,
2774 BFD_RELOC_AARCH64_MOVW_G2_NC,
2775 0,
2776 0,
2777 0},
2778
2779 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2780 {"abs_g3", 0,
2781 0, /* adr_type */
2782 0,
2783 BFD_RELOC_AARCH64_MOVW_G3,
2784 0,
2785 0,
2786 0},
2787
2788 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2789 {"prel_g0", 1,
2790 0, /* adr_type */
2791 0,
2792 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2793 0,
2794 0,
2795 0},
2796
2797 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2798 {"prel_g0_nc", 1,
2799 0, /* adr_type */
2800 0,
2801 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2802 0,
2803 0,
2804 0},
2805
2806 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2807 {"prel_g1", 1,
2808 0, /* adr_type */
2809 0,
2810 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2811 0,
2812 0,
2813 0},
2814
2815 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2816 {"prel_g1_nc", 1,
2817 0, /* adr_type */
2818 0,
2819 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2820 0,
2821 0,
2822 0},
2823
2824 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2825 {"prel_g2", 1,
2826 0, /* adr_type */
2827 0,
2828 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2829 0,
2830 0,
2831 0},
2832
2833 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2834 {"prel_g2_nc", 1,
2835 0, /* adr_type */
2836 0,
2837 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2838 0,
2839 0,
2840 0},
2841
2842 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2843 {"prel_g3", 1,
2844 0, /* adr_type */
2845 0,
2846 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2847 0,
2848 0,
2849 0},
2850
2851 /* Get to the page containing GOT entry for a symbol. */
2852 {"got", 1,
2853 0, /* adr_type */
2854 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2855 0,
2856 0,
2857 0,
2858 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2859
2860 /* 12 bit offset into the page containing GOT entry for that symbol. */
2861 {"got_lo12", 0,
2862 0, /* adr_type */
2863 0,
2864 0,
2865 0,
2866 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2867 0},
2868
2869 /* 0-15 bits of address/value: MOVk, no check. */
2870 {"gotoff_g0_nc", 0,
2871 0, /* adr_type */
2872 0,
2873 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2874 0,
2875 0,
2876 0},
2877
2878 /* Most significant bits 16-31 of address/value: MOVZ. */
2879 {"gotoff_g1", 0,
2880 0, /* adr_type */
2881 0,
2882 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2883 0,
2884 0,
2885 0},
2886
2887 /* 15 bit offset into the page containing GOT entry for that symbol. */
2888 {"gotoff_lo15", 0,
2889 0, /* adr_type */
2890 0,
2891 0,
2892 0,
2893 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2894 0},
2895
2896 /* Get to the page containing GOT TLS entry for a symbol */
2897 {"gottprel_g0_nc", 0,
2898 0, /* adr_type */
2899 0,
2900 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2901 0,
2902 0,
2903 0},
2904
2905 /* Get to the page containing GOT TLS entry for a symbol */
2906 {"gottprel_g1", 0,
2907 0, /* adr_type */
2908 0,
2909 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2910 0,
2911 0,
2912 0},
2913
2914 /* Get to the page containing GOT TLS entry for a symbol */
2915 {"tlsgd", 0,
2916 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2917 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2918 0,
2919 0,
2920 0,
2921 0},
2922
2923 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2924 {"tlsgd_lo12", 0,
2925 0, /* adr_type */
2926 0,
2927 0,
2928 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2929 0,
2930 0},
2931
2932 /* Lower 16 bits address/value: MOVk. */
2933 {"tlsgd_g0_nc", 0,
2934 0, /* adr_type */
2935 0,
2936 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2937 0,
2938 0,
2939 0},
2940
2941 /* Most significant bits 16-31 of address/value: MOVZ. */
2942 {"tlsgd_g1", 0,
2943 0, /* adr_type */
2944 0,
2945 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2946 0,
2947 0,
2948 0},
2949
2950 /* Get to the page containing GOT TLS entry for a symbol */
2951 {"tlsdesc", 0,
2952 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2953 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2954 0,
2955 0,
2956 0,
2957 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2958
2959 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2960 {"tlsdesc_lo12", 0,
2961 0, /* adr_type */
2962 0,
2963 0,
2964 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2965 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2966 0},
2967
2968 /* Get to the page containing GOT TLS entry for a symbol.
2969 The same as GD, we allocate two consecutive GOT slots
2970 for module index and module offset, the only difference
2971 with GD is the module offset should be initialized to
2972 zero without any outstanding runtime relocation. */
2973 {"tlsldm", 0,
2974 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2975 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2976 0,
2977 0,
2978 0,
2979 0},
2980
2981 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2982 {"tlsldm_lo12_nc", 0,
2983 0, /* adr_type */
2984 0,
2985 0,
2986 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2987 0,
2988 0},
2989
2990 /* 12 bit offset into the module TLS base address. */
2991 {"dtprel_lo12", 0,
2992 0, /* adr_type */
2993 0,
2994 0,
2995 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2996 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2997 0},
2998
2999 /* Same as dtprel_lo12, no overflow check. */
3000 {"dtprel_lo12_nc", 0,
3001 0, /* adr_type */
3002 0,
3003 0,
3004 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
3005 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
3006 0},
3007
3008 /* bits[23:12] of offset to the module TLS base address. */
3009 {"dtprel_hi12", 0,
3010 0, /* adr_type */
3011 0,
3012 0,
3013 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3014 0,
3015 0},
3016
3017 /* bits[15:0] of offset to the module TLS base address. */
3018 {"dtprel_g0", 0,
3019 0, /* adr_type */
3020 0,
3021 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3022 0,
3023 0,
3024 0},
3025
3026 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3027 {"dtprel_g0_nc", 0,
3028 0, /* adr_type */
3029 0,
3030 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3031 0,
3032 0,
3033 0},
3034
3035 /* bits[31:16] of offset to the module TLS base address. */
3036 {"dtprel_g1", 0,
3037 0, /* adr_type */
3038 0,
3039 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3040 0,
3041 0,
3042 0},
3043
3044 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3045 {"dtprel_g1_nc", 0,
3046 0, /* adr_type */
3047 0,
3048 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3049 0,
3050 0,
3051 0},
3052
3053 /* bits[47:32] of offset to the module TLS base address. */
3054 {"dtprel_g2", 0,
3055 0, /* adr_type */
3056 0,
3057 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3058 0,
3059 0,
3060 0},
3061
3062 /* Lower 16 bit offset into GOT entry for a symbol */
3063 {"tlsdesc_off_g0_nc", 0,
3064 0, /* adr_type */
3065 0,
3066 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3067 0,
3068 0,
3069 0},
3070
3071 /* Higher 16 bit offset into GOT entry for a symbol */
3072 {"tlsdesc_off_g1", 0,
3073 0, /* adr_type */
3074 0,
3075 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3076 0,
3077 0,
3078 0},
3079
3080 /* Get to the page containing GOT TLS entry for a symbol */
3081 {"gottprel", 0,
3082 0, /* adr_type */
3083 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3084 0,
3085 0,
3086 0,
3087 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3088
3089 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3090 {"gottprel_lo12", 0,
3091 0, /* adr_type */
3092 0,
3093 0,
3094 0,
3095 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3096 0},
3097
3098 /* Get tp offset for a symbol. */
3099 {"tprel", 0,
3100 0, /* adr_type */
3101 0,
3102 0,
3103 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3104 0,
3105 0},
3106
3107 /* Get tp offset for a symbol. */
3108 {"tprel_lo12", 0,
3109 0, /* adr_type */
3110 0,
3111 0,
3112 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3113 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3114 0},
3115
3116 /* Get tp offset for a symbol. */
3117 {"tprel_hi12", 0,
3118 0, /* adr_type */
3119 0,
3120 0,
3121 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3122 0,
3123 0},
3124
3125 /* Get tp offset for a symbol. */
3126 {"tprel_lo12_nc", 0,
3127 0, /* adr_type */
3128 0,
3129 0,
3130 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3131 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3132 0},
3133
3134 /* Most significant bits 32-47 of address/value: MOVZ. */
3135 {"tprel_g2", 0,
3136 0, /* adr_type */
3137 0,
3138 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3139 0,
3140 0,
3141 0},
3142
3143 /* Most significant bits 16-31 of address/value: MOVZ. */
3144 {"tprel_g1", 0,
3145 0, /* adr_type */
3146 0,
3147 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3148 0,
3149 0,
3150 0},
3151
3152 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3153 {"tprel_g1_nc", 0,
3154 0, /* adr_type */
3155 0,
3156 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3157 0,
3158 0,
3159 0},
3160
3161 /* Most significant bits 0-15 of address/value: MOVZ. */
3162 {"tprel_g0", 0,
3163 0, /* adr_type */
3164 0,
3165 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3166 0,
3167 0,
3168 0},
3169
3170 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3171 {"tprel_g0_nc", 0,
3172 0, /* adr_type */
3173 0,
3174 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3175 0,
3176 0,
3177 0},
3178
3179 /* 15bit offset from got entry to base address of GOT table. */
3180 {"gotpage_lo15", 0,
3181 0,
3182 0,
3183 0,
3184 0,
3185 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3186 0},
3187
3188 /* 14bit offset from got entry to base address of GOT table. */
3189 {"gotpage_lo14", 0,
3190 0,
3191 0,
3192 0,
3193 0,
3194 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3195 0},
3196 };
3197
3198 /* Given the address of a pointer pointing to the textual name of a
3199 relocation as may appear in assembler source, attempt to find its
3200 details in reloc_table. The pointer will be updated to the character
3201 after the trailing colon. On failure, NULL will be returned;
3202 otherwise return the reloc_table_entry. */
3203
3204 static struct reloc_table_entry *
3205 find_reloc_table_entry (char **str)
3206 {
3207 unsigned int i;
3208 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3209 {
3210 int length = strlen (reloc_table[i].name);
3211
3212 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3213 && (*str)[length] == ':')
3214 {
3215 *str += (length + 1);
3216 return &reloc_table[i];
3217 }
3218 }
3219
3220 return NULL;
3221 }
3222
3223 /* Returns 0 if the relocation should never be forced,
3224 1 if the relocation must be forced, and -1 if either
3225 result is OK. */
3226
3227 static signed int
3228 aarch64_force_reloc (unsigned int type)
3229 {
3230 switch (type)
3231 {
3232 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3233 /* Perform these "immediate" internal relocations
3234 even if the symbol is extern or weak. */
3235 return 0;
3236
3237 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3238 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3239 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3240 /* Pseudo relocs that need to be fixed up according to
3241 ilp32_p. */
3242 return 1;
3243
3244 case BFD_RELOC_AARCH64_ADD_LO12:
3245 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3246 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3247 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3248 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3249 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3250 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3251 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3252 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3253 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3254 case BFD_RELOC_AARCH64_LDST128_LO12:
3255 case BFD_RELOC_AARCH64_LDST16_LO12:
3256 case BFD_RELOC_AARCH64_LDST32_LO12:
3257 case BFD_RELOC_AARCH64_LDST64_LO12:
3258 case BFD_RELOC_AARCH64_LDST8_LO12:
3259 case BFD_RELOC_AARCH64_LDST_LO12:
3260 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3261 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3262 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3263 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3264 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3265 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3266 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3267 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3268 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3269 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3270 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3271 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3272 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3273 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3274 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3275 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3276 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3277 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3278 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3279 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3280 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3281 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3282 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3283 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3284 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3285 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3286 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3287 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3288 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3289 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3290 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3291 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3292 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3293 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3294 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3295 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3296 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3297 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3298 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3299 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3300 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3301 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3302 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3303 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3304 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3305 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3306 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3307 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3308 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3309 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3310 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3311 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3312 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3313 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3314 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3315 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3316 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3317 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3318 /* Always leave these relocations for the linker. */
3319 return 1;
3320
3321 default:
3322 return -1;
3323 }
3324 }
3325
3326 int
3327 aarch64_force_relocation (struct fix *fixp)
3328 {
3329 int res = aarch64_force_reloc (fixp->fx_r_type);
3330
3331 if (res == -1)
3332 return generic_force_reloc (fixp);
3333 return res;
3334 }
3335
3336 /* Mode argument to parse_shift and parser_shifter_operand. */
3337 enum parse_shift_mode
3338 {
3339 SHIFTED_NONE, /* no shifter allowed */
3340 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3341 "#imm{,lsl #n}" */
3342 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3343 "#imm" */
3344 SHIFTED_LSL, /* bare "lsl #n" */
3345 SHIFTED_MUL, /* bare "mul #n" */
3346 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3347 SHIFTED_MUL_VL, /* "mul vl" */
3348 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3349 };
3350
3351 /* Parse a <shift> operator on an AArch64 data processing instruction.
3352 Return TRUE on success; otherwise return FALSE. */
3353 static bool
3354 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3355 {
3356 const struct aarch64_name_value_pair *shift_op;
3357 enum aarch64_modifier_kind kind;
3358 expressionS exp;
3359 int exp_has_prefix;
3360 char *s = *str;
3361 char *p = s;
3362
3363 for (p = *str; ISALPHA (*p); p++)
3364 ;
3365
3366 if (p == *str)
3367 {
3368 set_syntax_error (_("shift expression expected"));
3369 return false;
3370 }
3371
3372 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3373
3374 if (shift_op == NULL)
3375 {
3376 set_syntax_error (_("shift operator expected"));
3377 return false;
3378 }
3379
3380 kind = aarch64_get_operand_modifier (shift_op);
3381
3382 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3383 {
3384 set_syntax_error (_("invalid use of 'MSL'"));
3385 return false;
3386 }
3387
3388 if (kind == AARCH64_MOD_MUL
3389 && mode != SHIFTED_MUL
3390 && mode != SHIFTED_MUL_VL)
3391 {
3392 set_syntax_error (_("invalid use of 'MUL'"));
3393 return false;
3394 }
3395
3396 switch (mode)
3397 {
3398 case SHIFTED_LOGIC_IMM:
3399 if (aarch64_extend_operator_p (kind))
3400 {
3401 set_syntax_error (_("extending shift is not permitted"));
3402 return false;
3403 }
3404 break;
3405
3406 case SHIFTED_ARITH_IMM:
3407 if (kind == AARCH64_MOD_ROR)
3408 {
3409 set_syntax_error (_("'ROR' shift is not permitted"));
3410 return false;
3411 }
3412 break;
3413
3414 case SHIFTED_LSL:
3415 if (kind != AARCH64_MOD_LSL)
3416 {
3417 set_syntax_error (_("only 'LSL' shift is permitted"));
3418 return false;
3419 }
3420 break;
3421
3422 case SHIFTED_MUL:
3423 if (kind != AARCH64_MOD_MUL)
3424 {
3425 set_syntax_error (_("only 'MUL' is permitted"));
3426 return false;
3427 }
3428 break;
3429
3430 case SHIFTED_MUL_VL:
3431 /* "MUL VL" consists of two separate tokens. Require the first
3432 token to be "MUL" and look for a following "VL". */
3433 if (kind == AARCH64_MOD_MUL)
3434 {
3435 skip_whitespace (p);
3436 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3437 {
3438 p += 2;
3439 kind = AARCH64_MOD_MUL_VL;
3440 break;
3441 }
3442 }
3443 set_syntax_error (_("only 'MUL VL' is permitted"));
3444 return false;
3445
3446 case SHIFTED_REG_OFFSET:
3447 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3448 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3449 {
3450 set_fatal_syntax_error
3451 (_("invalid shift for the register offset addressing mode"));
3452 return false;
3453 }
3454 break;
3455
3456 case SHIFTED_LSL_MSL:
3457 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3458 {
3459 set_syntax_error (_("invalid shift operator"));
3460 return false;
3461 }
3462 break;
3463
3464 default:
3465 abort ();
3466 }
3467
3468 /* Whitespace can appear here if the next thing is a bare digit. */
3469 skip_whitespace (p);
3470
3471 /* Parse shift amount. */
3472 exp_has_prefix = 0;
3473 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3474 exp.X_op = O_absent;
3475 else
3476 {
3477 if (is_immediate_prefix (*p))
3478 {
3479 p++;
3480 exp_has_prefix = 1;
3481 }
3482 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3483 }
3484 if (kind == AARCH64_MOD_MUL_VL)
3485 /* For consistency, give MUL VL the same shift amount as an implicit
3486 MUL #1. */
3487 operand->shifter.amount = 1;
3488 else if (exp.X_op == O_absent)
3489 {
3490 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3491 {
3492 set_syntax_error (_("missing shift amount"));
3493 return false;
3494 }
3495 operand->shifter.amount = 0;
3496 }
3497 else if (exp.X_op != O_constant)
3498 {
3499 set_syntax_error (_("constant shift amount required"));
3500 return false;
3501 }
3502 /* For parsing purposes, MUL #n has no inherent range. The range
3503 depends on the operand and will be checked by operand-specific
3504 routines. */
3505 else if (kind != AARCH64_MOD_MUL
3506 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3507 {
3508 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3509 return false;
3510 }
3511 else
3512 {
3513 operand->shifter.amount = exp.X_add_number;
3514 operand->shifter.amount_present = 1;
3515 }
3516
3517 operand->shifter.operator_present = 1;
3518 operand->shifter.kind = kind;
3519
3520 *str = p;
3521 return true;
3522 }
3523
3524 /* Parse a <shifter_operand> for a data processing instruction:
3525
3526 #<immediate>
3527 #<immediate>, LSL #imm
3528
3529 Validation of immediate operands is deferred to md_apply_fix.
3530
3531 Return TRUE on success; otherwise return FALSE. */
3532
3533 static bool
3534 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3535 enum parse_shift_mode mode)
3536 {
3537 char *p;
3538
3539 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3540 return false;
3541
3542 p = *str;
3543
3544 /* Accept an immediate expression. */
3545 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3546 REJECT_ABSENT))
3547 return false;
3548
3549 /* Accept optional LSL for arithmetic immediate values. */
3550 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3551 if (! parse_shift (&p, operand, SHIFTED_LSL))
3552 return false;
3553
3554 /* Not accept any shifter for logical immediate values. */
3555 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3556 && parse_shift (&p, operand, mode))
3557 {
3558 set_syntax_error (_("unexpected shift operator"));
3559 return false;
3560 }
3561
3562 *str = p;
3563 return true;
3564 }
3565
3566 /* Parse a <shifter_operand> for a data processing instruction:
3567
3568 <Rm>
3569 <Rm>, <shift>
3570 #<immediate>
3571 #<immediate>, LSL #imm
3572
3573 where <shift> is handled by parse_shift above, and the last two
3574 cases are handled by the function above.
3575
3576 Validation of immediate operands is deferred to md_apply_fix.
3577
3578 Return TRUE on success; otherwise return FALSE. */
3579
3580 static bool
3581 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3582 enum parse_shift_mode mode)
3583 {
3584 const reg_entry *reg;
3585 aarch64_opnd_qualifier_t qualifier;
3586 enum aarch64_operand_class opd_class
3587 = aarch64_get_operand_class (operand->type);
3588
3589 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3590 if (reg)
3591 {
3592 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3593 {
3594 set_syntax_error (_("unexpected register in the immediate operand"));
3595 return false;
3596 }
3597
3598 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3599 {
3600 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3601 return false;
3602 }
3603
3604 operand->reg.regno = reg->number;
3605 operand->qualifier = qualifier;
3606
3607 /* Accept optional shift operation on register. */
3608 if (! skip_past_comma (str))
3609 return true;
3610
3611 if (! parse_shift (str, operand, mode))
3612 return false;
3613
3614 return true;
3615 }
3616 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3617 {
3618 set_syntax_error
3619 (_("integer register expected in the extended/shifted operand "
3620 "register"));
3621 return false;
3622 }
3623
3624 /* We have a shifted immediate variable. */
3625 return parse_shifter_operand_imm (str, operand, mode);
3626 }
3627
3628 /* Return TRUE on success; return FALSE otherwise. */
3629
3630 static bool
3631 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3632 enum parse_shift_mode mode)
3633 {
3634 char *p = *str;
3635
3636 /* Determine if we have the sequence of characters #: or just :
3637 coming next. If we do, then we check for a :rello: relocation
3638 modifier. If we don't, punt the whole lot to
3639 parse_shifter_operand. */
3640
3641 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3642 {
3643 struct reloc_table_entry *entry;
3644
3645 if (p[0] == '#')
3646 p += 2;
3647 else
3648 p++;
3649 *str = p;
3650
3651 /* Try to parse a relocation. Anything else is an error. */
3652 if (!(entry = find_reloc_table_entry (str)))
3653 {
3654 set_syntax_error (_("unknown relocation modifier"));
3655 return false;
3656 }
3657
3658 if (entry->add_type == 0)
3659 {
3660 set_syntax_error
3661 (_("this relocation modifier is not allowed on this instruction"));
3662 return false;
3663 }
3664
3665 /* Save str before we decompose it. */
3666 p = *str;
3667
3668 /* Next, we parse the expression. */
3669 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3670 REJECT_ABSENT))
3671 return false;
3672
3673 /* Record the relocation type (use the ADD variant here). */
3674 inst.reloc.type = entry->add_type;
3675 inst.reloc.pc_rel = entry->pc_rel;
3676
3677 /* If str is empty, we've reached the end, stop here. */
3678 if (**str == '\0')
3679 return true;
3680
3681 /* Otherwise, we have a shifted reloc modifier, so rewind to
3682 recover the variable name and continue parsing for the shifter. */
3683 *str = p;
3684 return parse_shifter_operand_imm (str, operand, mode);
3685 }
3686
3687 return parse_shifter_operand (str, operand, mode);
3688 }
3689
3690 /* Parse all forms of an address expression. Information is written
3691 to *OPERAND and/or inst.reloc.
3692
3693 The A64 instruction set has the following addressing modes:
3694
3695 Offset
3696 [base] // in SIMD ld/st structure
3697 [base{,#0}] // in ld/st exclusive
3698 [base{,#imm}]
3699 [base,Xm{,LSL #imm}]
3700 [base,Xm,SXTX {#imm}]
3701 [base,Wm,(S|U)XTW {#imm}]
3702 Pre-indexed
3703 [base]! // in ldraa/ldrab exclusive
3704 [base,#imm]!
3705 Post-indexed
3706 [base],#imm
3707 [base],Xm // in SIMD ld/st structure
3708 PC-relative (literal)
3709 label
3710 SVE:
3711 [base,#imm,MUL VL]
3712 [base,Zm.D{,LSL #imm}]
3713 [base,Zm.S,(S|U)XTW {#imm}]
3714 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3715 [Zn.S,#imm]
3716 [Zn.D,#imm]
3717 [Zn.S{, Xm}]
3718 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3719 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3720 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3721
3722 (As a convenience, the notation "=immediate" is permitted in conjunction
3723 with the pc-relative literal load instructions to automatically place an
3724 immediate value or symbolic address in a nearby literal pool and generate
3725 a hidden label which references it.)
3726
3727 Upon a successful parsing, the address structure in *OPERAND will be
3728 filled in the following way:
3729
3730 .base_regno = <base>
3731 .offset.is_reg // 1 if the offset is a register
3732 .offset.imm = <imm>
3733 .offset.regno = <Rm>
3734
3735 For different addressing modes defined in the A64 ISA:
3736
3737 Offset
3738 .pcrel=0; .preind=1; .postind=0; .writeback=0
3739 Pre-indexed
3740 .pcrel=0; .preind=1; .postind=0; .writeback=1
3741 Post-indexed
3742 .pcrel=0; .preind=0; .postind=1; .writeback=1
3743 PC-relative (literal)
3744 .pcrel=1; .preind=1; .postind=0; .writeback=0
3745
3746 The shift/extension information, if any, will be stored in .shifter.
3747 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3748 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3749 corresponding register.
3750
3751 BASE_TYPE says which types of base register should be accepted and
3752 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3753 is the type of shifter that is allowed for immediate offsets,
3754 or SHIFTED_NONE if none.
3755
3756 In all other respects, it is the caller's responsibility to check
3757 for addressing modes not supported by the instruction, and to set
3758 inst.reloc.type. */
3759
3760 static bool
3761 parse_address_main (char **str, aarch64_opnd_info *operand,
3762 aarch64_opnd_qualifier_t *base_qualifier,
3763 aarch64_opnd_qualifier_t *offset_qualifier,
3764 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3765 enum parse_shift_mode imm_shift_mode)
3766 {
3767 char *p = *str;
3768 const reg_entry *reg;
3769 expressionS *exp = &inst.reloc.exp;
3770
3771 *base_qualifier = AARCH64_OPND_QLF_NIL;
3772 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3773 if (! skip_past_char (&p, '['))
3774 {
3775 /* =immediate or label. */
3776 operand->addr.pcrel = 1;
3777 operand->addr.preind = 1;
3778
3779 /* #:<reloc_op>:<symbol> */
3780 skip_past_char (&p, '#');
3781 if (skip_past_char (&p, ':'))
3782 {
3783 bfd_reloc_code_real_type ty;
3784 struct reloc_table_entry *entry;
3785
3786 /* Try to parse a relocation modifier. Anything else is
3787 an error. */
3788 entry = find_reloc_table_entry (&p);
3789 if (! entry)
3790 {
3791 set_syntax_error (_("unknown relocation modifier"));
3792 return false;
3793 }
3794
3795 switch (operand->type)
3796 {
3797 case AARCH64_OPND_ADDR_PCREL21:
3798 /* adr */
3799 ty = entry->adr_type;
3800 break;
3801
3802 default:
3803 ty = entry->ld_literal_type;
3804 break;
3805 }
3806
3807 if (ty == 0)
3808 {
3809 set_syntax_error
3810 (_("this relocation modifier is not allowed on this "
3811 "instruction"));
3812 return false;
3813 }
3814
3815 /* #:<reloc_op>: */
3816 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3817 {
3818 set_syntax_error (_("invalid relocation expression"));
3819 return false;
3820 }
3821 /* #:<reloc_op>:<expr> */
3822 /* Record the relocation type. */
3823 inst.reloc.type = ty;
3824 inst.reloc.pc_rel = entry->pc_rel;
3825 }
3826 else
3827 {
3828 if (skip_past_char (&p, '='))
3829 /* =immediate; need to generate the literal in the literal pool. */
3830 inst.gen_lit_pool = 1;
3831
3832 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3833 {
3834 set_syntax_error (_("invalid address"));
3835 return false;
3836 }
3837 }
3838
3839 *str = p;
3840 return true;
3841 }
3842
3843 /* [ */
3844
3845 bool alpha_base_p = ISALPHA (*p);
3846 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3847 if (!reg || !aarch64_check_reg_type (reg, base_type))
3848 {
3849 if (reg
3850 && aarch64_check_reg_type (reg, REG_TYPE_R_SP)
3851 && *base_qualifier == AARCH64_OPND_QLF_W)
3852 set_syntax_error (_("expected a 64-bit base register"));
3853 else if (alpha_base_p)
3854 set_syntax_error (_("invalid base register"));
3855 else
3856 set_syntax_error (_("expected a base register"));
3857 return false;
3858 }
3859 operand->addr.base_regno = reg->number;
3860
3861 /* [Xn */
3862 if (skip_past_comma (&p))
3863 {
3864 /* [Xn, */
3865 operand->addr.preind = 1;
3866
3867 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3868 if (reg)
3869 {
3870 if (!aarch64_check_reg_type (reg, offset_type))
3871 {
3872 set_syntax_error (_("invalid offset register"));
3873 return false;
3874 }
3875
3876 /* [Xn,Rm */
3877 operand->addr.offset.regno = reg->number;
3878 operand->addr.offset.is_reg = 1;
3879 /* Shifted index. */
3880 if (skip_past_comma (&p))
3881 {
3882 /* [Xn,Rm, */
3883 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3884 /* Use the diagnostics set in parse_shift, so not set new
3885 error message here. */
3886 return false;
3887 }
3888 /* We only accept:
3889 [base,Xm] # For vector plus scalar SVE2 indexing.
3890 [base,Xm{,LSL #imm}]
3891 [base,Xm,SXTX {#imm}]
3892 [base,Wm,(S|U)XTW {#imm}] */
3893 if (operand->shifter.kind == AARCH64_MOD_NONE
3894 || operand->shifter.kind == AARCH64_MOD_LSL
3895 || operand->shifter.kind == AARCH64_MOD_SXTX)
3896 {
3897 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3898 {
3899 set_syntax_error (_("invalid use of 32-bit register offset"));
3900 return false;
3901 }
3902 if (aarch64_get_qualifier_esize (*base_qualifier)
3903 != aarch64_get_qualifier_esize (*offset_qualifier)
3904 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3905 || *base_qualifier != AARCH64_OPND_QLF_S_S
3906 || *offset_qualifier != AARCH64_OPND_QLF_X))
3907 {
3908 set_syntax_error (_("offset has different size from base"));
3909 return false;
3910 }
3911 }
3912 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3913 {
3914 set_syntax_error (_("invalid use of 64-bit register offset"));
3915 return false;
3916 }
3917 }
3918 else
3919 {
3920 /* [Xn,#:<reloc_op>:<symbol> */
3921 skip_past_char (&p, '#');
3922 if (skip_past_char (&p, ':'))
3923 {
3924 struct reloc_table_entry *entry;
3925
3926 /* Try to parse a relocation modifier. Anything else is
3927 an error. */
3928 if (!(entry = find_reloc_table_entry (&p)))
3929 {
3930 set_syntax_error (_("unknown relocation modifier"));
3931 return false;
3932 }
3933
3934 if (entry->ldst_type == 0)
3935 {
3936 set_syntax_error
3937 (_("this relocation modifier is not allowed on this "
3938 "instruction"));
3939 return false;
3940 }
3941
3942 /* [Xn,#:<reloc_op>: */
3943 /* We now have the group relocation table entry corresponding to
3944 the name in the assembler source. Next, we parse the
3945 expression. */
3946 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3947 {
3948 set_syntax_error (_("invalid relocation expression"));
3949 return false;
3950 }
3951
3952 /* [Xn,#:<reloc_op>:<expr> */
3953 /* Record the load/store relocation type. */
3954 inst.reloc.type = entry->ldst_type;
3955 inst.reloc.pc_rel = entry->pc_rel;
3956 }
3957 else
3958 {
3959 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3960 {
3961 set_syntax_error (_("invalid expression in the address"));
3962 return false;
3963 }
3964 /* [Xn,<expr> */
3965 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3966 /* [Xn,<expr>,<shifter> */
3967 if (! parse_shift (&p, operand, imm_shift_mode))
3968 return false;
3969 }
3970 }
3971 }
3972
3973 if (! skip_past_char (&p, ']'))
3974 {
3975 set_syntax_error (_("']' expected"));
3976 return false;
3977 }
3978
3979 if (skip_past_char (&p, '!'))
3980 {
3981 if (operand->addr.preind && operand->addr.offset.is_reg)
3982 {
3983 set_syntax_error (_("register offset not allowed in pre-indexed "
3984 "addressing mode"));
3985 return false;
3986 }
3987 /* [Xn]! */
3988 operand->addr.writeback = 1;
3989 }
3990 else if (skip_past_comma (&p))
3991 {
3992 /* [Xn], */
3993 operand->addr.postind = 1;
3994 operand->addr.writeback = 1;
3995
3996 if (operand->addr.preind)
3997 {
3998 set_syntax_error (_("cannot combine pre- and post-indexing"));
3999 return false;
4000 }
4001
4002 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
4003 if (reg)
4004 {
4005 /* [Xn],Xm */
4006 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
4007 {
4008 set_syntax_error (_("invalid offset register"));
4009 return false;
4010 }
4011
4012 operand->addr.offset.regno = reg->number;
4013 operand->addr.offset.is_reg = 1;
4014 }
4015 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4016 {
4017 /* [Xn],#expr */
4018 set_syntax_error (_("invalid expression in the address"));
4019 return false;
4020 }
4021 }
4022
4023 /* If at this point neither .preind nor .postind is set, we have a
4024 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4025 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4026 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4027 [Zn.<T>, xzr]. */
4028 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4029 {
4030 if (operand->addr.writeback)
4031 {
4032 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4033 {
4034 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4035 operand->addr.offset.is_reg = 0;
4036 operand->addr.offset.imm = 0;
4037 operand->addr.preind = 1;
4038 }
4039 else
4040 {
4041 /* Reject [Rn]! */
4042 set_syntax_error (_("missing offset in the pre-indexed address"));
4043 return false;
4044 }
4045 }
4046 else
4047 {
4048 operand->addr.preind = 1;
4049 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4050 {
4051 operand->addr.offset.is_reg = 1;
4052 operand->addr.offset.regno = REG_ZR;
4053 *offset_qualifier = AARCH64_OPND_QLF_X;
4054 }
4055 else
4056 {
4057 inst.reloc.exp.X_op = O_constant;
4058 inst.reloc.exp.X_add_number = 0;
4059 }
4060 }
4061 }
4062
4063 *str = p;
4064 return true;
4065 }
4066
4067 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4068 on success. */
4069 static bool
4070 parse_address (char **str, aarch64_opnd_info *operand)
4071 {
4072 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4073 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4074 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
4075 }
4076
4077 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4078 The arguments have the same meaning as for parse_address_main.
4079 Return TRUE on success. */
4080 static bool
4081 parse_sve_address (char **str, aarch64_opnd_info *operand,
4082 aarch64_opnd_qualifier_t *base_qualifier,
4083 aarch64_opnd_qualifier_t *offset_qualifier)
4084 {
4085 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4086 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4087 SHIFTED_MUL_VL);
4088 }
4089
4090 /* Parse a register X0-X30. The register must be 64-bit and register 31
4091 is unallocated. */
4092 static bool
4093 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4094 {
4095 const reg_entry *reg = parse_reg (str);
4096 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4097 {
4098 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
4099 return false;
4100 }
4101 operand->reg.regno = reg->number;
4102 operand->qualifier = AARCH64_OPND_QLF_X;
4103 return true;
4104 }
4105
4106 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4107 Return TRUE on success; otherwise return FALSE. */
4108 static bool
4109 parse_half (char **str, int *internal_fixup_p)
4110 {
4111 char *p = *str;
4112
4113 skip_past_char (&p, '#');
4114
4115 gas_assert (internal_fixup_p);
4116 *internal_fixup_p = 0;
4117
4118 if (*p == ':')
4119 {
4120 struct reloc_table_entry *entry;
4121
4122 /* Try to parse a relocation. Anything else is an error. */
4123 ++p;
4124
4125 if (!(entry = find_reloc_table_entry (&p)))
4126 {
4127 set_syntax_error (_("unknown relocation modifier"));
4128 return false;
4129 }
4130
4131 if (entry->movw_type == 0)
4132 {
4133 set_syntax_error
4134 (_("this relocation modifier is not allowed on this instruction"));
4135 return false;
4136 }
4137
4138 inst.reloc.type = entry->movw_type;
4139 }
4140 else
4141 *internal_fixup_p = 1;
4142
4143 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4144 return false;
4145
4146 *str = p;
4147 return true;
4148 }
4149
4150 /* Parse an operand for an ADRP instruction:
4151 ADRP <Xd>, <label>
4152 Return TRUE on success; otherwise return FALSE. */
4153
4154 static bool
4155 parse_adrp (char **str)
4156 {
4157 char *p;
4158
4159 p = *str;
4160 if (*p == ':')
4161 {
4162 struct reloc_table_entry *entry;
4163
4164 /* Try to parse a relocation. Anything else is an error. */
4165 ++p;
4166 if (!(entry = find_reloc_table_entry (&p)))
4167 {
4168 set_syntax_error (_("unknown relocation modifier"));
4169 return false;
4170 }
4171
4172 if (entry->adrp_type == 0)
4173 {
4174 set_syntax_error
4175 (_("this relocation modifier is not allowed on this instruction"));
4176 return false;
4177 }
4178
4179 inst.reloc.type = entry->adrp_type;
4180 }
4181 else
4182 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4183
4184 inst.reloc.pc_rel = 1;
4185 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4186 return false;
4187 *str = p;
4188 return true;
4189 }
4190
4191 /* Miscellaneous. */
4192
4193 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4194 of SIZE tokens in which index I gives the token for field value I,
4195 or is null if field value I is invalid. REG_TYPE says which register
4196 names should be treated as registers rather than as symbolic immediates.
4197
4198 Return true on success, moving *STR past the operand and storing the
4199 field value in *VAL. */
4200
4201 static int
4202 parse_enum_string (char **str, int64_t *val, const char *const *array,
4203 size_t size, aarch64_reg_type reg_type)
4204 {
4205 expressionS exp;
4206 char *p, *q;
4207 size_t i;
4208
4209 /* Match C-like tokens. */
4210 p = q = *str;
4211 while (ISALNUM (*q))
4212 q++;
4213
4214 for (i = 0; i < size; ++i)
4215 if (array[i]
4216 && strncasecmp (array[i], p, q - p) == 0
4217 && array[i][q - p] == 0)
4218 {
4219 *val = i;
4220 *str = q;
4221 return true;
4222 }
4223
4224 if (!parse_immediate_expression (&p, &exp, reg_type))
4225 return false;
4226
4227 if (exp.X_op == O_constant
4228 && (uint64_t) exp.X_add_number < size)
4229 {
4230 *val = exp.X_add_number;
4231 *str = p;
4232 return true;
4233 }
4234
4235 /* Use the default error for this operand. */
4236 return false;
4237 }
4238
4239 /* Parse an option for a preload instruction. Returns the encoding for the
4240 option, or PARSE_FAIL. */
4241
4242 static int
4243 parse_pldop (char **str)
4244 {
4245 char *p, *q;
4246 const struct aarch64_name_value_pair *o;
4247
4248 p = q = *str;
4249 while (ISALNUM (*q))
4250 q++;
4251
4252 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4253 if (!o)
4254 return PARSE_FAIL;
4255
4256 *str = q;
4257 return o->value;
4258 }
4259
4260 /* Parse an option for a barrier instruction. Returns the encoding for the
4261 option, or PARSE_FAIL. */
4262
4263 static int
4264 parse_barrier (char **str)
4265 {
4266 char *p, *q;
4267 const struct aarch64_name_value_pair *o;
4268
4269 p = q = *str;
4270 while (ISALPHA (*q))
4271 q++;
4272
4273 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4274 if (!o)
4275 return PARSE_FAIL;
4276
4277 *str = q;
4278 return o->value;
4279 }
4280
4281 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4282 return 0 if successful. Otherwise return PARSE_FAIL. */
4283
4284 static int
4285 parse_barrier_psb (char **str,
4286 const struct aarch64_name_value_pair ** hint_opt)
4287 {
4288 char *p, *q;
4289 const struct aarch64_name_value_pair *o;
4290
4291 p = q = *str;
4292 while (ISALPHA (*q))
4293 q++;
4294
4295 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4296 if (!o)
4297 {
4298 set_fatal_syntax_error
4299 ( _("unknown or missing option to PSB/TSB"));
4300 return PARSE_FAIL;
4301 }
4302
4303 if (o->value != 0x11)
4304 {
4305 /* PSB only accepts option name 'CSYNC'. */
4306 set_syntax_error
4307 (_("the specified option is not accepted for PSB/TSB"));
4308 return PARSE_FAIL;
4309 }
4310
4311 *str = q;
4312 *hint_opt = o;
4313 return 0;
4314 }
4315
4316 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4317 return 0 if successful. Otherwise return PARSE_FAIL. */
4318
4319 static int
4320 parse_bti_operand (char **str,
4321 const struct aarch64_name_value_pair ** hint_opt)
4322 {
4323 char *p, *q;
4324 const struct aarch64_name_value_pair *o;
4325
4326 p = q = *str;
4327 while (ISALPHA (*q))
4328 q++;
4329
4330 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4331 if (!o)
4332 {
4333 set_fatal_syntax_error
4334 ( _("unknown option to BTI"));
4335 return PARSE_FAIL;
4336 }
4337
4338 switch (o->value)
4339 {
4340 /* Valid BTI operands. */
4341 case HINT_OPD_C:
4342 case HINT_OPD_J:
4343 case HINT_OPD_JC:
4344 break;
4345
4346 default:
4347 set_syntax_error
4348 (_("unknown option to BTI"));
4349 return PARSE_FAIL;
4350 }
4351
4352 *str = q;
4353 *hint_opt = o;
4354 return 0;
4355 }
4356
4357 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4358 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4359 on failure. Format:
4360
4361 REG_TYPE.QUALIFIER
4362
4363 Side effect: Update STR with current parse position of success.
4364 */
4365
4366 static const reg_entry *
4367 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4368 aarch64_opnd_qualifier_t *qualifier)
4369 {
4370 struct vector_type_el vectype;
4371 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4372 PTR_FULL_REG);
4373 if (!reg)
4374 return NULL;
4375
4376 if (vectype.type == NT_invtype)
4377 *qualifier = AARCH64_OPND_QLF_NIL;
4378 else
4379 {
4380 *qualifier = vectype_to_qualifier (&vectype);
4381 if (*qualifier == AARCH64_OPND_QLF_NIL)
4382 return NULL;
4383 }
4384
4385 return reg;
4386 }
4387
4388 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4389
4390 #<imm>
4391 <imm>
4392
4393 Function return TRUE if immediate was found, or FALSE.
4394 */
4395 static bool
4396 parse_sme_immediate (char **str, int64_t *imm)
4397 {
4398 int64_t val;
4399 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4400 return false;
4401
4402 *imm = val;
4403 return true;
4404 }
4405
4406 /* Parse index with selection register and immediate offset:
4407
4408 [<Wv>, <imm>]
4409 [<Wv>, #<imm>]
4410
4411 Return true on success, populating OPND with the parsed index. */
4412
4413 static bool
4414 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4415 {
4416 const reg_entry *reg;
4417
4418 if (!skip_past_char (str, '['))
4419 {
4420 set_syntax_error (_("expected '['"));
4421 return false;
4422 }
4423
4424 /* The selection register, encoded in the 2-bit Rv field. */
4425 reg = parse_reg (str);
4426 if (reg == NULL || reg->type != REG_TYPE_R_32)
4427 {
4428 set_syntax_error (_("expected a 32-bit selection register"));
4429 return false;
4430 }
4431 opnd->index.regno = reg->number;
4432
4433 if (!skip_past_char (str, ','))
4434 {
4435 set_syntax_error (_("missing immediate offset"));
4436 return false;
4437 }
4438
4439 if (!parse_sme_immediate (str, &opnd->index.imm))
4440 {
4441 set_syntax_error (_("expected a constant immediate offset"));
4442 return false;
4443 }
4444
4445 if (!skip_past_char (str, ']'))
4446 {
4447 set_syntax_error (_("expected ']'"));
4448 return false;
4449 }
4450
4451 return true;
4452 }
4453
4454 /* Parse a register of type REG_TYPE that might have an element type
4455 qualifier and that is indexed by two values: a 32-bit register,
4456 followed by an immediate. The ranges of the register and the
4457 immediate vary by opcode and are checked in libopcodes.
4458
4459 Return true on success, populating OPND with information about
4460 the operand and setting QUALIFIER to the register qualifier.
4461
4462 Field format examples:
4463
4464 <Pm>.<T>[<Wv>< #<imm>]
4465 ZA[<Wv>, #<imm>]
4466 <ZAn><HV>.<T>[<Wv>, #<imm>]
4467 */
4468 static bool
4469 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4470 struct aarch64_indexed_za *opnd,
4471 aarch64_opnd_qualifier_t *qualifier)
4472 {
4473 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier);
4474 if (!reg)
4475 return false;
4476
4477 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4478 opnd->regno = reg->number;
4479
4480 return parse_sme_za_index (str, opnd);
4481 }
4482
4483 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4484 operand. */
4485
4486 static bool
4487 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4488 struct aarch64_indexed_za *opnd,
4489 aarch64_opnd_qualifier_t *qualifier)
4490 {
4491 if (!skip_past_char (str, '{'))
4492 {
4493 set_syntax_error (_("expected '{'"));
4494 return false;
4495 }
4496
4497 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier))
4498 return false;
4499
4500 if (!skip_past_char (str, '}'))
4501 {
4502 set_syntax_error (_("expected '}'"));
4503 return false;
4504 }
4505
4506 return true;
4507 }
4508
4509 /* Parse list of up to eight 64-bit element tile names separated by commas in
4510 SME's ZERO instruction:
4511
4512 ZERO { <mask> }
4513
4514 Function returns <mask>:
4515
4516 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4517 */
4518 static int
4519 parse_sme_zero_mask(char **str)
4520 {
4521 char *q;
4522 int mask;
4523 aarch64_opnd_qualifier_t qualifier;
4524
4525 mask = 0x00;
4526 q = *str;
4527 do
4528 {
4529 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4530 &qualifier);
4531 if (!reg)
4532 return PARSE_FAIL;
4533
4534 if (reg->type == REG_TYPE_ZA)
4535 {
4536 if (qualifier != AARCH64_OPND_QLF_NIL)
4537 {
4538 set_syntax_error ("ZA should not have a size suffix");
4539 return PARSE_FAIL;
4540 }
4541 /* { ZA } is assembled as all-ones immediate. */
4542 mask = 0xff;
4543 }
4544 else
4545 {
4546 int regno = reg->number;
4547 if (qualifier == AARCH64_OPND_QLF_S_B)
4548 {
4549 /* { ZA0.B } is assembled as all-ones immediate. */
4550 mask = 0xff;
4551 }
4552 else if (qualifier == AARCH64_OPND_QLF_S_H)
4553 mask |= 0x55 << regno;
4554 else if (qualifier == AARCH64_OPND_QLF_S_S)
4555 mask |= 0x11 << regno;
4556 else if (qualifier == AARCH64_OPND_QLF_S_D)
4557 mask |= 0x01 << regno;
4558 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4559 {
4560 set_syntax_error (_("ZA tile masks do not operate at .Q"
4561 " granularity"));
4562 return PARSE_FAIL;
4563 }
4564 else if (qualifier == AARCH64_OPND_QLF_NIL)
4565 {
4566 set_syntax_error (_("missing ZA tile size"));
4567 return PARSE_FAIL;
4568 }
4569 else
4570 {
4571 set_syntax_error (_("invalid ZA tile"));
4572 return PARSE_FAIL;
4573 }
4574 }
4575 }
4576 while (skip_past_char (&q, ','));
4577
4578 *str = q;
4579 return mask;
4580 }
4581
4582 /* Wraps in curly braces <mask> operand ZERO instruction:
4583
4584 ZERO { <mask> }
4585
4586 Function returns value of <mask> bit-field.
4587 */
4588 static int
4589 parse_sme_list_of_64bit_tiles (char **str)
4590 {
4591 int regno;
4592
4593 if (!skip_past_char (str, '{'))
4594 {
4595 set_syntax_error (_("expected '{'"));
4596 return PARSE_FAIL;
4597 }
4598
4599 /* Empty <mask> list is an all-zeros immediate. */
4600 if (!skip_past_char (str, '}'))
4601 {
4602 regno = parse_sme_zero_mask (str);
4603 if (regno == PARSE_FAIL)
4604 return PARSE_FAIL;
4605
4606 if (!skip_past_char (str, '}'))
4607 {
4608 set_syntax_error (_("expected '}'"));
4609 return PARSE_FAIL;
4610 }
4611 }
4612 else
4613 regno = 0x00;
4614
4615 return regno;
4616 }
4617
4618 /* Parse streaming mode operand for SMSTART and SMSTOP.
4619
4620 {SM | ZA}
4621
4622 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4623 */
4624 static int
4625 parse_sme_sm_za (char **str)
4626 {
4627 char *p, *q;
4628
4629 p = q = *str;
4630 while (ISALPHA (*q))
4631 q++;
4632
4633 if ((q - p != 2)
4634 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4635 {
4636 set_syntax_error (_("expected SM or ZA operand"));
4637 return PARSE_FAIL;
4638 }
4639
4640 *str = q;
4641 return TOLOWER (p[0]);
4642 }
4643
4644 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4645 Returns the encoding for the option, or PARSE_FAIL.
4646
4647 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4648 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4649
4650 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4651 field, otherwise as a system register.
4652 */
4653
4654 static int
4655 parse_sys_reg (char **str, htab_t sys_regs,
4656 int imple_defined_p, int pstatefield_p,
4657 uint32_t* flags)
4658 {
4659 char *p, *q;
4660 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4661 const aarch64_sys_reg *o;
4662 int value;
4663
4664 p = buf;
4665 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4666 if (p < buf + (sizeof (buf) - 1))
4667 *p++ = TOLOWER (*q);
4668 *p = '\0';
4669
4670 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4671 valid system register. This is enforced by construction of the hash
4672 table. */
4673 if (p - buf != q - *str)
4674 return PARSE_FAIL;
4675
4676 o = str_hash_find (sys_regs, buf);
4677 if (!o)
4678 {
4679 if (!imple_defined_p)
4680 return PARSE_FAIL;
4681 else
4682 {
4683 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4684 unsigned int op0, op1, cn, cm, op2;
4685
4686 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4687 != 5)
4688 return PARSE_FAIL;
4689 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4690 return PARSE_FAIL;
4691 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4692 if (flags)
4693 *flags = 0;
4694 }
4695 }
4696 else
4697 {
4698 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4699 as_bad (_("selected processor does not support PSTATE field "
4700 "name '%s'"), buf);
4701 if (!pstatefield_p
4702 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4703 o->value, o->flags, o->features))
4704 as_bad (_("selected processor does not support system register "
4705 "name '%s'"), buf);
4706 if (aarch64_sys_reg_deprecated_p (o->flags))
4707 as_warn (_("system register name '%s' is deprecated and may be "
4708 "removed in a future release"), buf);
4709 value = o->value;
4710 if (flags)
4711 *flags = o->flags;
4712 }
4713
4714 *str = q;
4715 return value;
4716 }
4717
4718 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4719 for the option, or NULL. */
4720
4721 static const aarch64_sys_ins_reg *
4722 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4723 {
4724 char *p, *q;
4725 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4726 const aarch64_sys_ins_reg *o;
4727
4728 p = buf;
4729 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4730 if (p < buf + (sizeof (buf) - 1))
4731 *p++ = TOLOWER (*q);
4732 *p = '\0';
4733
4734 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4735 valid system register. This is enforced by construction of the hash
4736 table. */
4737 if (p - buf != q - *str)
4738 return NULL;
4739
4740 o = str_hash_find (sys_ins_regs, buf);
4741 if (!o)
4742 return NULL;
4743
4744 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4745 o->name, o->value, o->flags, 0))
4746 as_bad (_("selected processor does not support system register "
4747 "name '%s'"), buf);
4748 if (aarch64_sys_reg_deprecated_p (o->flags))
4749 as_warn (_("system register name '%s' is deprecated and may be "
4750 "removed in a future release"), buf);
4751
4752 *str = q;
4753 return o;
4754 }
4755 \f
4756 #define po_char_or_fail(chr) do { \
4757 if (! skip_past_char (&str, chr)) \
4758 goto failure; \
4759 } while (0)
4760
4761 #define po_reg_or_fail(regtype) do { \
4762 reg = aarch64_reg_parse (&str, regtype, NULL); \
4763 if (!reg) \
4764 { \
4765 set_default_error (); \
4766 goto failure; \
4767 } \
4768 } while (0)
4769
4770 #define po_int_fp_reg_or_fail(reg_type) do { \
4771 reg = parse_reg (&str); \
4772 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4773 { \
4774 set_default_error (); \
4775 goto failure; \
4776 } \
4777 info->reg.regno = reg->number; \
4778 info->qualifier = inherent_reg_qualifier (reg); \
4779 } while (0)
4780
4781 #define po_imm_nc_or_fail() do { \
4782 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4783 goto failure; \
4784 } while (0)
4785
4786 #define po_imm_or_fail(min, max) do { \
4787 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4788 goto failure; \
4789 if (val < min || val > max) \
4790 { \
4791 set_fatal_syntax_error (_("immediate value out of range "\
4792 #min " to "#max)); \
4793 goto failure; \
4794 } \
4795 } while (0)
4796
4797 #define po_enum_or_fail(array) do { \
4798 if (!parse_enum_string (&str, &val, array, \
4799 ARRAY_SIZE (array), imm_reg_type)) \
4800 goto failure; \
4801 } while (0)
4802
4803 #define po_misc_or_fail(expr) do { \
4804 if (!expr) \
4805 goto failure; \
4806 } while (0)
4807 \f
4808 /* encode the 12-bit imm field of Add/sub immediate */
4809 static inline uint32_t
4810 encode_addsub_imm (uint32_t imm)
4811 {
4812 return imm << 10;
4813 }
4814
4815 /* encode the shift amount field of Add/sub immediate */
4816 static inline uint32_t
4817 encode_addsub_imm_shift_amount (uint32_t cnt)
4818 {
4819 return cnt << 22;
4820 }
4821
4822
4823 /* encode the imm field of Adr instruction */
4824 static inline uint32_t
4825 encode_adr_imm (uint32_t imm)
4826 {
4827 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4828 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4829 }
4830
4831 /* encode the immediate field of Move wide immediate */
4832 static inline uint32_t
4833 encode_movw_imm (uint32_t imm)
4834 {
4835 return imm << 5;
4836 }
4837
4838 /* encode the 26-bit offset of unconditional branch */
4839 static inline uint32_t
4840 encode_branch_ofs_26 (uint32_t ofs)
4841 {
4842 return ofs & ((1 << 26) - 1);
4843 }
4844
4845 /* encode the 19-bit offset of conditional branch and compare & branch */
4846 static inline uint32_t
4847 encode_cond_branch_ofs_19 (uint32_t ofs)
4848 {
4849 return (ofs & ((1 << 19) - 1)) << 5;
4850 }
4851
4852 /* encode the 19-bit offset of ld literal */
4853 static inline uint32_t
4854 encode_ld_lit_ofs_19 (uint32_t ofs)
4855 {
4856 return (ofs & ((1 << 19) - 1)) << 5;
4857 }
4858
4859 /* Encode the 14-bit offset of test & branch. */
4860 static inline uint32_t
4861 encode_tst_branch_ofs_14 (uint32_t ofs)
4862 {
4863 return (ofs & ((1 << 14) - 1)) << 5;
4864 }
4865
4866 /* Encode the 16-bit imm field of svc/hvc/smc. */
4867 static inline uint32_t
4868 encode_svc_imm (uint32_t imm)
4869 {
4870 return imm << 5;
4871 }
4872
4873 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4874 static inline uint32_t
4875 reencode_addsub_switch_add_sub (uint32_t opcode)
4876 {
4877 return opcode ^ (1 << 30);
4878 }
4879
4880 static inline uint32_t
4881 reencode_movzn_to_movz (uint32_t opcode)
4882 {
4883 return opcode | (1 << 30);
4884 }
4885
4886 static inline uint32_t
4887 reencode_movzn_to_movn (uint32_t opcode)
4888 {
4889 return opcode & ~(1 << 30);
4890 }
4891
4892 /* Overall per-instruction processing. */
4893
4894 /* We need to be able to fix up arbitrary expressions in some statements.
4895 This is so that we can handle symbols that are an arbitrary distance from
4896 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4897 which returns part of an address in a form which will be valid for
4898 a data instruction. We do this by pushing the expression into a symbol
4899 in the expr_section, and creating a fix for that. */
4900
4901 static fixS *
4902 fix_new_aarch64 (fragS * frag,
4903 int where,
4904 short int size,
4905 expressionS * exp,
4906 int pc_rel,
4907 int reloc)
4908 {
4909 fixS *new_fix;
4910
4911 switch (exp->X_op)
4912 {
4913 case O_constant:
4914 case O_symbol:
4915 case O_add:
4916 case O_subtract:
4917 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4918 break;
4919
4920 default:
4921 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4922 pc_rel, reloc);
4923 break;
4924 }
4925 return new_fix;
4926 }
4927 \f
4928 /* Diagnostics on operands errors. */
4929
4930 /* By default, output verbose error message.
4931 Disable the verbose error message by -mno-verbose-error. */
4932 static int verbose_error_p = 1;
4933
4934 #ifdef DEBUG_AARCH64
4935 /* N.B. this is only for the purpose of debugging. */
4936 const char* operand_mismatch_kind_names[] =
4937 {
4938 "AARCH64_OPDE_NIL",
4939 "AARCH64_OPDE_RECOVERABLE",
4940 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
4941 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
4942 "AARCH64_OPDE_SYNTAX_ERROR",
4943 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4944 "AARCH64_OPDE_INVALID_VARIANT",
4945 "AARCH64_OPDE_OUT_OF_RANGE",
4946 "AARCH64_OPDE_UNALIGNED",
4947 "AARCH64_OPDE_REG_LIST",
4948 "AARCH64_OPDE_OTHER_ERROR",
4949 };
4950 #endif /* DEBUG_AARCH64 */
4951
4952 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4953
4954 When multiple errors of different kinds are found in the same assembly
4955 line, only the error of the highest severity will be picked up for
4956 issuing the diagnostics. */
4957
4958 static inline bool
4959 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4960 enum aarch64_operand_error_kind rhs)
4961 {
4962 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4963 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
4964 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
4965 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
4966 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
4967 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4968 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4969 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4970 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4971 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4972 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4973 return lhs > rhs;
4974 }
4975
4976 /* Helper routine to get the mnemonic name from the assembly instruction
4977 line; should only be called for the diagnosis purpose, as there is
4978 string copy operation involved, which may affect the runtime
4979 performance if used in elsewhere. */
4980
4981 static const char*
4982 get_mnemonic_name (const char *str)
4983 {
4984 static char mnemonic[32];
4985 char *ptr;
4986
4987 /* Get the first 15 bytes and assume that the full name is included. */
4988 strncpy (mnemonic, str, 31);
4989 mnemonic[31] = '\0';
4990
4991 /* Scan up to the end of the mnemonic, which must end in white space,
4992 '.', or end of string. */
4993 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4994 ;
4995
4996 *ptr = '\0';
4997
4998 /* Append '...' to the truncated long name. */
4999 if (ptr - mnemonic == 31)
5000 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5001
5002 return mnemonic;
5003 }
5004
5005 static void
5006 reset_aarch64_instruction (aarch64_instruction *instruction)
5007 {
5008 memset (instruction, '\0', sizeof (aarch64_instruction));
5009 instruction->reloc.type = BFD_RELOC_UNUSED;
5010 }
5011
5012 /* Data structures storing one user error in the assembly code related to
5013 operands. */
5014
5015 struct operand_error_record
5016 {
5017 const aarch64_opcode *opcode;
5018 aarch64_operand_error detail;
5019 struct operand_error_record *next;
5020 };
5021
5022 typedef struct operand_error_record operand_error_record;
5023
5024 struct operand_errors
5025 {
5026 operand_error_record *head;
5027 operand_error_record *tail;
5028 };
5029
5030 typedef struct operand_errors operand_errors;
5031
5032 /* Top-level data structure reporting user errors for the current line of
5033 the assembly code.
5034 The way md_assemble works is that all opcodes sharing the same mnemonic
5035 name are iterated to find a match to the assembly line. In this data
5036 structure, each of the such opcodes will have one operand_error_record
5037 allocated and inserted. In other words, excessive errors related with
5038 a single opcode are disregarded. */
5039 operand_errors operand_error_report;
5040
5041 /* Free record nodes. */
5042 static operand_error_record *free_opnd_error_record_nodes = NULL;
5043
5044 /* Initialize the data structure that stores the operand mismatch
5045 information on assembling one line of the assembly code. */
5046 static void
5047 init_operand_error_report (void)
5048 {
5049 if (operand_error_report.head != NULL)
5050 {
5051 gas_assert (operand_error_report.tail != NULL);
5052 operand_error_report.tail->next = free_opnd_error_record_nodes;
5053 free_opnd_error_record_nodes = operand_error_report.head;
5054 operand_error_report.head = NULL;
5055 operand_error_report.tail = NULL;
5056 return;
5057 }
5058 gas_assert (operand_error_report.tail == NULL);
5059 }
5060
5061 /* Return TRUE if some operand error has been recorded during the
5062 parsing of the current assembly line using the opcode *OPCODE;
5063 otherwise return FALSE. */
5064 static inline bool
5065 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5066 {
5067 operand_error_record *record = operand_error_report.head;
5068 return record && record->opcode == opcode;
5069 }
5070
5071 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5072 OPCODE field is initialized with OPCODE.
5073 N.B. only one record for each opcode, i.e. the maximum of one error is
5074 recorded for each instruction template. */
5075
5076 static void
5077 add_operand_error_record (const operand_error_record* new_record)
5078 {
5079 const aarch64_opcode *opcode = new_record->opcode;
5080 operand_error_record* record = operand_error_report.head;
5081
5082 /* The record may have been created for this opcode. If not, we need
5083 to prepare one. */
5084 if (! opcode_has_operand_error_p (opcode))
5085 {
5086 /* Get one empty record. */
5087 if (free_opnd_error_record_nodes == NULL)
5088 {
5089 record = XNEW (operand_error_record);
5090 }
5091 else
5092 {
5093 record = free_opnd_error_record_nodes;
5094 free_opnd_error_record_nodes = record->next;
5095 }
5096 record->opcode = opcode;
5097 /* Insert at the head. */
5098 record->next = operand_error_report.head;
5099 operand_error_report.head = record;
5100 if (operand_error_report.tail == NULL)
5101 operand_error_report.tail = record;
5102 }
5103 else if (record->detail.kind != AARCH64_OPDE_NIL
5104 && record->detail.index <= new_record->detail.index
5105 && operand_error_higher_severity_p (record->detail.kind,
5106 new_record->detail.kind))
5107 {
5108 /* In the case of multiple errors found on operands related with a
5109 single opcode, only record the error of the leftmost operand and
5110 only if the error is of higher severity. */
5111 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5112 " the existing error %s on operand %d",
5113 operand_mismatch_kind_names[new_record->detail.kind],
5114 new_record->detail.index,
5115 operand_mismatch_kind_names[record->detail.kind],
5116 record->detail.index);
5117 return;
5118 }
5119
5120 record->detail = new_record->detail;
5121 }
5122
5123 static inline void
5124 record_operand_error_info (const aarch64_opcode *opcode,
5125 aarch64_operand_error *error_info)
5126 {
5127 operand_error_record record;
5128 record.opcode = opcode;
5129 record.detail = *error_info;
5130 add_operand_error_record (&record);
5131 }
5132
5133 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5134 error message *ERROR, for operand IDX (count from 0). */
5135
5136 static void
5137 record_operand_error (const aarch64_opcode *opcode, int idx,
5138 enum aarch64_operand_error_kind kind,
5139 const char* error)
5140 {
5141 aarch64_operand_error info;
5142 memset(&info, 0, sizeof (info));
5143 info.index = idx;
5144 info.kind = kind;
5145 info.error = error;
5146 info.non_fatal = false;
5147 record_operand_error_info (opcode, &info);
5148 }
5149
5150 static void
5151 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5152 enum aarch64_operand_error_kind kind,
5153 const char* error, const int *extra_data)
5154 {
5155 aarch64_operand_error info;
5156 info.index = idx;
5157 info.kind = kind;
5158 info.error = error;
5159 info.data[0].i = extra_data[0];
5160 info.data[1].i = extra_data[1];
5161 info.data[2].i = extra_data[2];
5162 info.non_fatal = false;
5163 record_operand_error_info (opcode, &info);
5164 }
5165
5166 static void
5167 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5168 const char* error, int lower_bound,
5169 int upper_bound)
5170 {
5171 int data[3] = {lower_bound, upper_bound, 0};
5172 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5173 error, data);
5174 }
5175
5176 /* Remove the operand error record for *OPCODE. */
5177 static void ATTRIBUTE_UNUSED
5178 remove_operand_error_record (const aarch64_opcode *opcode)
5179 {
5180 if (opcode_has_operand_error_p (opcode))
5181 {
5182 operand_error_record* record = operand_error_report.head;
5183 gas_assert (record != NULL && operand_error_report.tail != NULL);
5184 operand_error_report.head = record->next;
5185 record->next = free_opnd_error_record_nodes;
5186 free_opnd_error_record_nodes = record;
5187 if (operand_error_report.head == NULL)
5188 {
5189 gas_assert (operand_error_report.tail == record);
5190 operand_error_report.tail = NULL;
5191 }
5192 }
5193 }
5194
5195 /* Given the instruction in *INSTR, return the index of the best matched
5196 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5197
5198 Return -1 if there is no qualifier sequence; return the first match
5199 if there is multiple matches found. */
5200
5201 static int
5202 find_best_match (const aarch64_inst *instr,
5203 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5204 {
5205 int i, num_opnds, max_num_matched, idx;
5206
5207 num_opnds = aarch64_num_of_operands (instr->opcode);
5208 if (num_opnds == 0)
5209 {
5210 DEBUG_TRACE ("no operand");
5211 return -1;
5212 }
5213
5214 max_num_matched = 0;
5215 idx = 0;
5216
5217 /* For each pattern. */
5218 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5219 {
5220 int j, num_matched;
5221 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5222
5223 /* Most opcodes has much fewer patterns in the list. */
5224 if (empty_qualifier_sequence_p (qualifiers))
5225 {
5226 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5227 break;
5228 }
5229
5230 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5231 if (*qualifiers == instr->operands[j].qualifier)
5232 ++num_matched;
5233
5234 if (num_matched > max_num_matched)
5235 {
5236 max_num_matched = num_matched;
5237 idx = i;
5238 }
5239 }
5240
5241 DEBUG_TRACE ("return with %d", idx);
5242 return idx;
5243 }
5244
5245 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5246 corresponding operands in *INSTR. */
5247
5248 static inline void
5249 assign_qualifier_sequence (aarch64_inst *instr,
5250 const aarch64_opnd_qualifier_t *qualifiers)
5251 {
5252 int i = 0;
5253 int num_opnds = aarch64_num_of_operands (instr->opcode);
5254 gas_assert (num_opnds);
5255 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5256 instr->operands[i].qualifier = *qualifiers;
5257 }
5258
5259 /* Callback used by aarch64_print_operand to apply STYLE to the
5260 disassembler output created from FMT and ARGS. The STYLER object holds
5261 any required state. Must return a pointer to a string (created from FMT
5262 and ARGS) that will continue to be valid until the complete disassembled
5263 instruction has been printed.
5264
5265 We don't currently add any styling to the output of the disassembler as
5266 used within assembler error messages, and so STYLE is ignored here. A
5267 new string is allocated on the obstack help within STYLER and returned
5268 to the caller. */
5269
5270 static const char *aarch64_apply_style
5271 (struct aarch64_styler *styler,
5272 enum disassembler_style style ATTRIBUTE_UNUSED,
5273 const char *fmt, va_list args)
5274 {
5275 int res;
5276 char *ptr;
5277 struct obstack *stack = (struct obstack *) styler->state;
5278 va_list ap;
5279
5280 /* Calculate the required space. */
5281 va_copy (ap, args);
5282 res = vsnprintf (NULL, 0, fmt, ap);
5283 va_end (ap);
5284 gas_assert (res >= 0);
5285
5286 /* Allocate space on the obstack and format the result. */
5287 ptr = (char *) obstack_alloc (stack, res + 1);
5288 res = vsnprintf (ptr, (res + 1), fmt, args);
5289 gas_assert (res >= 0);
5290
5291 return ptr;
5292 }
5293
5294 /* Print operands for the diagnosis purpose. */
5295
5296 static void
5297 print_operands (char *buf, const aarch64_opcode *opcode,
5298 const aarch64_opnd_info *opnds)
5299 {
5300 int i;
5301 struct aarch64_styler styler;
5302 struct obstack content;
5303 obstack_init (&content);
5304
5305 styler.apply_style = aarch64_apply_style;
5306 styler.state = (void *) &content;
5307
5308 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5309 {
5310 char str[128];
5311 char cmt[128];
5312
5313 /* We regard the opcode operand info more, however we also look into
5314 the inst->operands to support the disassembling of the optional
5315 operand.
5316 The two operand code should be the same in all cases, apart from
5317 when the operand can be optional. */
5318 if (opcode->operands[i] == AARCH64_OPND_NIL
5319 || opnds[i].type == AARCH64_OPND_NIL)
5320 break;
5321
5322 /* Generate the operand string in STR. */
5323 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5324 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5325
5326 /* Delimiter. */
5327 if (str[0] != '\0')
5328 strcat (buf, i == 0 ? " " : ", ");
5329
5330 /* Append the operand string. */
5331 strcat (buf, str);
5332
5333 /* Append a comment. This works because only the last operand ever
5334 adds a comment. If that ever changes then we'll need to be
5335 smarter here. */
5336 if (cmt[0] != '\0')
5337 {
5338 strcat (buf, "\t// ");
5339 strcat (buf, cmt);
5340 }
5341 }
5342
5343 obstack_free (&content, NULL);
5344 }
5345
5346 /* Send to stderr a string as information. */
5347
5348 static void
5349 output_info (const char *format, ...)
5350 {
5351 const char *file;
5352 unsigned int line;
5353 va_list args;
5354
5355 file = as_where (&line);
5356 if (file)
5357 {
5358 if (line != 0)
5359 fprintf (stderr, "%s:%u: ", file, line);
5360 else
5361 fprintf (stderr, "%s: ", file);
5362 }
5363 fprintf (stderr, _("Info: "));
5364 va_start (args, format);
5365 vfprintf (stderr, format, args);
5366 va_end (args);
5367 (void) putc ('\n', stderr);
5368 }
5369
5370 /* Output one operand error record. */
5371
5372 static void
5373 output_operand_error_record (const operand_error_record *record, char *str)
5374 {
5375 const aarch64_operand_error *detail = &record->detail;
5376 int idx = detail->index;
5377 const aarch64_opcode *opcode = record->opcode;
5378 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5379 : AARCH64_OPND_NIL);
5380
5381 typedef void (*handler_t)(const char *format, ...);
5382 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5383
5384 switch (detail->kind)
5385 {
5386 case AARCH64_OPDE_NIL:
5387 gas_assert (0);
5388 break;
5389
5390 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5391 handler (_("this `%s' should have an immediately preceding `%s'"
5392 " -- `%s'"),
5393 detail->data[0].s, detail->data[1].s, str);
5394 break;
5395
5396 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5397 handler (_("the preceding `%s' should be followed by `%s` rather"
5398 " than `%s` -- `%s'"),
5399 detail->data[1].s, detail->data[0].s, opcode->name, str);
5400 break;
5401
5402 case AARCH64_OPDE_SYNTAX_ERROR:
5403 case AARCH64_OPDE_RECOVERABLE:
5404 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5405 case AARCH64_OPDE_OTHER_ERROR:
5406 /* Use the prepared error message if there is, otherwise use the
5407 operand description string to describe the error. */
5408 if (detail->error != NULL)
5409 {
5410 if (idx < 0)
5411 handler (_("%s -- `%s'"), detail->error, str);
5412 else
5413 handler (_("%s at operand %d -- `%s'"),
5414 detail->error, idx + 1, str);
5415 }
5416 else
5417 {
5418 gas_assert (idx >= 0);
5419 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5420 aarch64_get_operand_desc (opd_code), str);
5421 }
5422 break;
5423
5424 case AARCH64_OPDE_INVALID_VARIANT:
5425 handler (_("operand mismatch -- `%s'"), str);
5426 if (verbose_error_p)
5427 {
5428 /* We will try to correct the erroneous instruction and also provide
5429 more information e.g. all other valid variants.
5430
5431 The string representation of the corrected instruction and other
5432 valid variants are generated by
5433
5434 1) obtaining the intermediate representation of the erroneous
5435 instruction;
5436 2) manipulating the IR, e.g. replacing the operand qualifier;
5437 3) printing out the instruction by calling the printer functions
5438 shared with the disassembler.
5439
5440 The limitation of this method is that the exact input assembly
5441 line cannot be accurately reproduced in some cases, for example an
5442 optional operand present in the actual assembly line will be
5443 omitted in the output; likewise for the optional syntax rules,
5444 e.g. the # before the immediate. Another limitation is that the
5445 assembly symbols and relocation operations in the assembly line
5446 currently cannot be printed out in the error report. Last but not
5447 least, when there is other error(s) co-exist with this error, the
5448 'corrected' instruction may be still incorrect, e.g. given
5449 'ldnp h0,h1,[x0,#6]!'
5450 this diagnosis will provide the version:
5451 'ldnp s0,s1,[x0,#6]!'
5452 which is still not right. */
5453 size_t len = strlen (get_mnemonic_name (str));
5454 int i, qlf_idx;
5455 bool result;
5456 char buf[2048];
5457 aarch64_inst *inst_base = &inst.base;
5458 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5459
5460 /* Init inst. */
5461 reset_aarch64_instruction (&inst);
5462 inst_base->opcode = opcode;
5463
5464 /* Reset the error report so that there is no side effect on the
5465 following operand parsing. */
5466 init_operand_error_report ();
5467
5468 /* Fill inst. */
5469 result = parse_operands (str + len, opcode)
5470 && programmer_friendly_fixup (&inst);
5471 gas_assert (result);
5472 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5473 NULL, NULL, insn_sequence);
5474 gas_assert (!result);
5475
5476 /* Find the most matched qualifier sequence. */
5477 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5478 gas_assert (qlf_idx > -1);
5479
5480 /* Assign the qualifiers. */
5481 assign_qualifier_sequence (inst_base,
5482 opcode->qualifiers_list[qlf_idx]);
5483
5484 /* Print the hint. */
5485 output_info (_(" did you mean this?"));
5486 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5487 print_operands (buf, opcode, inst_base->operands);
5488 output_info (_(" %s"), buf);
5489
5490 /* Print out other variant(s) if there is any. */
5491 if (qlf_idx != 0 ||
5492 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5493 output_info (_(" other valid variant(s):"));
5494
5495 /* For each pattern. */
5496 qualifiers_list = opcode->qualifiers_list;
5497 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5498 {
5499 /* Most opcodes has much fewer patterns in the list.
5500 First NIL qualifier indicates the end in the list. */
5501 if (empty_qualifier_sequence_p (*qualifiers_list))
5502 break;
5503
5504 if (i != qlf_idx)
5505 {
5506 /* Mnemonics name. */
5507 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5508
5509 /* Assign the qualifiers. */
5510 assign_qualifier_sequence (inst_base, *qualifiers_list);
5511
5512 /* Print instruction. */
5513 print_operands (buf, opcode, inst_base->operands);
5514
5515 output_info (_(" %s"), buf);
5516 }
5517 }
5518 }
5519 break;
5520
5521 case AARCH64_OPDE_UNTIED_IMMS:
5522 handler (_("operand %d must have the same immediate value "
5523 "as operand 1 -- `%s'"),
5524 detail->index + 1, str);
5525 break;
5526
5527 case AARCH64_OPDE_UNTIED_OPERAND:
5528 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5529 detail->index + 1, str);
5530 break;
5531
5532 case AARCH64_OPDE_OUT_OF_RANGE:
5533 if (detail->data[0].i != detail->data[1].i)
5534 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5535 detail->error ? detail->error : _("immediate value"),
5536 detail->data[0].i, detail->data[1].i, idx + 1, str);
5537 else
5538 handler (_("%s must be %d at operand %d -- `%s'"),
5539 detail->error ? detail->error : _("immediate value"),
5540 detail->data[0].i, idx + 1, str);
5541 break;
5542
5543 case AARCH64_OPDE_REG_LIST:
5544 if (detail->data[0].i == 1)
5545 handler (_("invalid number of registers in the list; "
5546 "only 1 register is expected at operand %d -- `%s'"),
5547 idx + 1, str);
5548 else
5549 handler (_("invalid number of registers in the list; "
5550 "%d registers are expected at operand %d -- `%s'"),
5551 detail->data[0].i, idx + 1, str);
5552 break;
5553
5554 case AARCH64_OPDE_UNALIGNED:
5555 handler (_("immediate value must be a multiple of "
5556 "%d at operand %d -- `%s'"),
5557 detail->data[0].i, idx + 1, str);
5558 break;
5559
5560 default:
5561 gas_assert (0);
5562 break;
5563 }
5564 }
5565
5566 /* Process and output the error message about the operand mismatching.
5567
5568 When this function is called, the operand error information had
5569 been collected for an assembly line and there will be multiple
5570 errors in the case of multiple instruction templates; output the
5571 error message that most closely describes the problem.
5572
5573 The errors to be printed can be filtered on printing all errors
5574 or only non-fatal errors. This distinction has to be made because
5575 the error buffer may already be filled with fatal errors we don't want to
5576 print due to the different instruction templates. */
5577
5578 static void
5579 output_operand_error_report (char *str, bool non_fatal_only)
5580 {
5581 int largest_error_pos;
5582 const char *msg = NULL;
5583 enum aarch64_operand_error_kind kind;
5584 operand_error_record *curr;
5585 operand_error_record *head = operand_error_report.head;
5586 operand_error_record *record = NULL;
5587
5588 /* No error to report. */
5589 if (head == NULL)
5590 return;
5591
5592 gas_assert (head != NULL && operand_error_report.tail != NULL);
5593
5594 /* Only one error. */
5595 if (head == operand_error_report.tail)
5596 {
5597 /* If the only error is a non-fatal one and we don't want to print it,
5598 just exit. */
5599 if (!non_fatal_only || head->detail.non_fatal)
5600 {
5601 DEBUG_TRACE ("single opcode entry with error kind: %s",
5602 operand_mismatch_kind_names[head->detail.kind]);
5603 output_operand_error_record (head, str);
5604 }
5605 return;
5606 }
5607
5608 /* Find the error kind of the highest severity. */
5609 DEBUG_TRACE ("multiple opcode entries with error kind");
5610 kind = AARCH64_OPDE_NIL;
5611 for (curr = head; curr != NULL; curr = curr->next)
5612 {
5613 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5614 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5615 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5616 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5617 kind = curr->detail.kind;
5618 }
5619
5620 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5621
5622 /* Pick up one of errors of KIND to report. */
5623 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5624 for (curr = head; curr != NULL; curr = curr->next)
5625 {
5626 /* If we don't want to print non-fatal errors then don't consider them
5627 at all. */
5628 if (curr->detail.kind != kind
5629 || (non_fatal_only && !curr->detail.non_fatal))
5630 continue;
5631 /* If there are multiple errors, pick up the one with the highest
5632 mismatching operand index. In the case of multiple errors with
5633 the equally highest operand index, pick up the first one or the
5634 first one with non-NULL error message. */
5635 if (curr->detail.index > largest_error_pos
5636 || (curr->detail.index == largest_error_pos && msg == NULL
5637 && curr->detail.error != NULL))
5638 {
5639 largest_error_pos = curr->detail.index;
5640 record = curr;
5641 msg = record->detail.error;
5642 }
5643 }
5644
5645 /* The way errors are collected in the back-end is a bit non-intuitive. But
5646 essentially, because each operand template is tried recursively you may
5647 always have errors collected from the previous tried OPND. These are
5648 usually skipped if there is one successful match. However now with the
5649 non-fatal errors we have to ignore those previously collected hard errors
5650 when we're only interested in printing the non-fatal ones. This condition
5651 prevents us from printing errors that are not appropriate, since we did
5652 match a condition, but it also has warnings that it wants to print. */
5653 if (non_fatal_only && !record)
5654 return;
5655
5656 gas_assert (largest_error_pos != -2 && record != NULL);
5657 DEBUG_TRACE ("Pick up error kind %s to report",
5658 operand_mismatch_kind_names[record->detail.kind]);
5659
5660 /* Output. */
5661 output_operand_error_record (record, str);
5662 }
5663 \f
5664 /* Write an AARCH64 instruction to buf - always little-endian. */
5665 static void
5666 put_aarch64_insn (char *buf, uint32_t insn)
5667 {
5668 unsigned char *where = (unsigned char *) buf;
5669 where[0] = insn;
5670 where[1] = insn >> 8;
5671 where[2] = insn >> 16;
5672 where[3] = insn >> 24;
5673 }
5674
5675 static uint32_t
5676 get_aarch64_insn (char *buf)
5677 {
5678 unsigned char *where = (unsigned char *) buf;
5679 uint32_t result;
5680 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5681 | ((uint32_t) where[3] << 24)));
5682 return result;
5683 }
5684
5685 static void
5686 output_inst (struct aarch64_inst *new_inst)
5687 {
5688 char *to = NULL;
5689
5690 to = frag_more (INSN_SIZE);
5691
5692 frag_now->tc_frag_data.recorded = 1;
5693
5694 put_aarch64_insn (to, inst.base.value);
5695
5696 if (inst.reloc.type != BFD_RELOC_UNUSED)
5697 {
5698 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5699 INSN_SIZE, &inst.reloc.exp,
5700 inst.reloc.pc_rel,
5701 inst.reloc.type);
5702 DEBUG_TRACE ("Prepared relocation fix up");
5703 /* Don't check the addend value against the instruction size,
5704 that's the job of our code in md_apply_fix(). */
5705 fixp->fx_no_overflow = 1;
5706 if (new_inst != NULL)
5707 fixp->tc_fix_data.inst = new_inst;
5708 if (aarch64_gas_internal_fixup_p ())
5709 {
5710 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5711 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5712 fixp->fx_addnumber = inst.reloc.flags;
5713 }
5714 }
5715
5716 dwarf2_emit_insn (INSN_SIZE);
5717 }
5718
5719 /* Link together opcodes of the same name. */
5720
5721 struct templates
5722 {
5723 const aarch64_opcode *opcode;
5724 struct templates *next;
5725 };
5726
5727 typedef struct templates templates;
5728
5729 static templates *
5730 lookup_mnemonic (const char *start, int len)
5731 {
5732 templates *templ = NULL;
5733
5734 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5735 return templ;
5736 }
5737
5738 /* Subroutine of md_assemble, responsible for looking up the primary
5739 opcode from the mnemonic the user wrote. BASE points to the beginning
5740 of the mnemonic, DOT points to the first '.' within the mnemonic
5741 (if any) and END points to the end of the mnemonic. */
5742
5743 static templates *
5744 opcode_lookup (char *base, char *dot, char *end)
5745 {
5746 const aarch64_cond *cond;
5747 char condname[16];
5748 int len;
5749
5750 if (dot == end)
5751 return 0;
5752
5753 inst.cond = COND_ALWAYS;
5754
5755 /* Handle a possible condition. */
5756 if (dot)
5757 {
5758 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5759 if (!cond)
5760 return 0;
5761 inst.cond = cond->value;
5762 len = dot - base;
5763 }
5764 else
5765 len = end - base;
5766
5767 if (inst.cond == COND_ALWAYS)
5768 {
5769 /* Look for unaffixed mnemonic. */
5770 return lookup_mnemonic (base, len);
5771 }
5772 else if (len <= 13)
5773 {
5774 /* append ".c" to mnemonic if conditional */
5775 memcpy (condname, base, len);
5776 memcpy (condname + len, ".c", 2);
5777 base = condname;
5778 len += 2;
5779 return lookup_mnemonic (base, len);
5780 }
5781
5782 return NULL;
5783 }
5784
5785 /* Process an optional operand that is found omitted from the assembly line.
5786 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5787 instruction's opcode entry while IDX is the index of this omitted operand.
5788 */
5789
5790 static void
5791 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5792 int idx, aarch64_opnd_info *operand)
5793 {
5794 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5795 gas_assert (optional_operand_p (opcode, idx));
5796 gas_assert (!operand->present);
5797
5798 switch (type)
5799 {
5800 case AARCH64_OPND_Rd:
5801 case AARCH64_OPND_Rn:
5802 case AARCH64_OPND_Rm:
5803 case AARCH64_OPND_Rt:
5804 case AARCH64_OPND_Rt2:
5805 case AARCH64_OPND_Rt_LS64:
5806 case AARCH64_OPND_Rt_SP:
5807 case AARCH64_OPND_Rs:
5808 case AARCH64_OPND_Ra:
5809 case AARCH64_OPND_Rt_SYS:
5810 case AARCH64_OPND_Rd_SP:
5811 case AARCH64_OPND_Rn_SP:
5812 case AARCH64_OPND_Rm_SP:
5813 case AARCH64_OPND_Fd:
5814 case AARCH64_OPND_Fn:
5815 case AARCH64_OPND_Fm:
5816 case AARCH64_OPND_Fa:
5817 case AARCH64_OPND_Ft:
5818 case AARCH64_OPND_Ft2:
5819 case AARCH64_OPND_Sd:
5820 case AARCH64_OPND_Sn:
5821 case AARCH64_OPND_Sm:
5822 case AARCH64_OPND_Va:
5823 case AARCH64_OPND_Vd:
5824 case AARCH64_OPND_Vn:
5825 case AARCH64_OPND_Vm:
5826 case AARCH64_OPND_VdD1:
5827 case AARCH64_OPND_VnD1:
5828 operand->reg.regno = default_value;
5829 break;
5830
5831 case AARCH64_OPND_Ed:
5832 case AARCH64_OPND_En:
5833 case AARCH64_OPND_Em:
5834 case AARCH64_OPND_Em16:
5835 case AARCH64_OPND_SM3_IMM2:
5836 operand->reglane.regno = default_value;
5837 break;
5838
5839 case AARCH64_OPND_IDX:
5840 case AARCH64_OPND_BIT_NUM:
5841 case AARCH64_OPND_IMMR:
5842 case AARCH64_OPND_IMMS:
5843 case AARCH64_OPND_SHLL_IMM:
5844 case AARCH64_OPND_IMM_VLSL:
5845 case AARCH64_OPND_IMM_VLSR:
5846 case AARCH64_OPND_CCMP_IMM:
5847 case AARCH64_OPND_FBITS:
5848 case AARCH64_OPND_UIMM4:
5849 case AARCH64_OPND_UIMM3_OP1:
5850 case AARCH64_OPND_UIMM3_OP2:
5851 case AARCH64_OPND_IMM:
5852 case AARCH64_OPND_IMM_2:
5853 case AARCH64_OPND_WIDTH:
5854 case AARCH64_OPND_UIMM7:
5855 case AARCH64_OPND_NZCV:
5856 case AARCH64_OPND_SVE_PATTERN:
5857 case AARCH64_OPND_SVE_PRFOP:
5858 operand->imm.value = default_value;
5859 break;
5860
5861 case AARCH64_OPND_SVE_PATTERN_SCALED:
5862 operand->imm.value = default_value;
5863 operand->shifter.kind = AARCH64_MOD_MUL;
5864 operand->shifter.amount = 1;
5865 break;
5866
5867 case AARCH64_OPND_EXCEPTION:
5868 inst.reloc.type = BFD_RELOC_UNUSED;
5869 break;
5870
5871 case AARCH64_OPND_BARRIER_ISB:
5872 operand->barrier = aarch64_barrier_options + default_value;
5873 break;
5874
5875 case AARCH64_OPND_BTI_TARGET:
5876 operand->hint_option = aarch64_hint_options + default_value;
5877 break;
5878
5879 default:
5880 break;
5881 }
5882 }
5883
5884 /* Process the relocation type for move wide instructions.
5885 Return TRUE on success; otherwise return FALSE. */
5886
5887 static bool
5888 process_movw_reloc_info (void)
5889 {
5890 int is32;
5891 unsigned shift;
5892
5893 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5894
5895 if (inst.base.opcode->op == OP_MOVK)
5896 switch (inst.reloc.type)
5897 {
5898 case BFD_RELOC_AARCH64_MOVW_G0_S:
5899 case BFD_RELOC_AARCH64_MOVW_G1_S:
5900 case BFD_RELOC_AARCH64_MOVW_G2_S:
5901 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5902 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5903 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5904 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5905 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5906 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5907 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5908 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5909 set_syntax_error
5910 (_("the specified relocation type is not allowed for MOVK"));
5911 return false;
5912 default:
5913 break;
5914 }
5915
5916 switch (inst.reloc.type)
5917 {
5918 case BFD_RELOC_AARCH64_MOVW_G0:
5919 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5920 case BFD_RELOC_AARCH64_MOVW_G0_S:
5921 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5922 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5923 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5924 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5925 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5926 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5927 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5928 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5929 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5930 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5931 shift = 0;
5932 break;
5933 case BFD_RELOC_AARCH64_MOVW_G1:
5934 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5935 case BFD_RELOC_AARCH64_MOVW_G1_S:
5936 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5937 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5938 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5939 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5940 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5941 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5942 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5943 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5944 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5945 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5946 shift = 16;
5947 break;
5948 case BFD_RELOC_AARCH64_MOVW_G2:
5949 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5950 case BFD_RELOC_AARCH64_MOVW_G2_S:
5951 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5952 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5953 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5954 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5955 if (is32)
5956 {
5957 set_fatal_syntax_error
5958 (_("the specified relocation type is not allowed for 32-bit "
5959 "register"));
5960 return false;
5961 }
5962 shift = 32;
5963 break;
5964 case BFD_RELOC_AARCH64_MOVW_G3:
5965 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5966 if (is32)
5967 {
5968 set_fatal_syntax_error
5969 (_("the specified relocation type is not allowed for 32-bit "
5970 "register"));
5971 return false;
5972 }
5973 shift = 48;
5974 break;
5975 default:
5976 /* More cases should be added when more MOVW-related relocation types
5977 are supported in GAS. */
5978 gas_assert (aarch64_gas_internal_fixup_p ());
5979 /* The shift amount should have already been set by the parser. */
5980 return true;
5981 }
5982 inst.base.operands[1].shifter.amount = shift;
5983 return true;
5984 }
5985
5986 /* A primitive log calculator. */
5987
5988 static inline unsigned int
5989 get_logsz (unsigned int size)
5990 {
5991 const unsigned char ls[16] =
5992 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5993 if (size > 16)
5994 {
5995 gas_assert (0);
5996 return -1;
5997 }
5998 gas_assert (ls[size - 1] != (unsigned char)-1);
5999 return ls[size - 1];
6000 }
6001
6002 /* Determine and return the real reloc type code for an instruction
6003 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6004
6005 static inline bfd_reloc_code_real_type
6006 ldst_lo12_determine_real_reloc_type (void)
6007 {
6008 unsigned logsz, max_logsz;
6009 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6010 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6011
6012 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6013 {
6014 BFD_RELOC_AARCH64_LDST8_LO12,
6015 BFD_RELOC_AARCH64_LDST16_LO12,
6016 BFD_RELOC_AARCH64_LDST32_LO12,
6017 BFD_RELOC_AARCH64_LDST64_LO12,
6018 BFD_RELOC_AARCH64_LDST128_LO12
6019 },
6020 {
6021 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6022 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6023 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6024 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6025 BFD_RELOC_AARCH64_NONE
6026 },
6027 {
6028 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6029 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6030 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6031 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6032 BFD_RELOC_AARCH64_NONE
6033 },
6034 {
6035 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6036 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6037 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6038 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6039 BFD_RELOC_AARCH64_NONE
6040 },
6041 {
6042 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6043 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6044 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6045 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6046 BFD_RELOC_AARCH64_NONE
6047 }
6048 };
6049
6050 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6051 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6052 || (inst.reloc.type
6053 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6054 || (inst.reloc.type
6055 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6056 || (inst.reloc.type
6057 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6058 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6059
6060 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6061 opd1_qlf =
6062 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6063 1, opd0_qlf, 0);
6064 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6065
6066 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6067
6068 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6069 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6070 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6071 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6072 max_logsz = 3;
6073 else
6074 max_logsz = 4;
6075
6076 if (logsz > max_logsz)
6077 {
6078 /* SEE PR 27904 for an example of this. */
6079 set_fatal_syntax_error
6080 (_("relocation qualifier does not match instruction size"));
6081 return BFD_RELOC_AARCH64_NONE;
6082 }
6083
6084 /* In reloc.c, these pseudo relocation types should be defined in similar
6085 order as above reloc_ldst_lo12 array. Because the array index calculation
6086 below relies on this. */
6087 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6088 }
6089
6090 /* Check whether a register list REGINFO is valid. The registers must be
6091 numbered in increasing order (modulo 32), in increments of one or two.
6092
6093 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6094 increments of two.
6095
6096 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6097
6098 static bool
6099 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6100 {
6101 uint32_t i, nb_regs, prev_regno, incr;
6102
6103 nb_regs = 1 + (reginfo & 0x3);
6104 reginfo >>= 2;
6105 prev_regno = reginfo & 0x1f;
6106 incr = accept_alternate ? 2 : 1;
6107
6108 for (i = 1; i < nb_regs; ++i)
6109 {
6110 uint32_t curr_regno;
6111 reginfo >>= 5;
6112 curr_regno = reginfo & 0x1f;
6113 if (curr_regno != ((prev_regno + incr) & 0x1f))
6114 return false;
6115 prev_regno = curr_regno;
6116 }
6117
6118 return true;
6119 }
6120
6121 /* Generic instruction operand parser. This does no encoding and no
6122 semantic validation; it merely squirrels values away in the inst
6123 structure. Returns TRUE or FALSE depending on whether the
6124 specified grammar matched. */
6125
6126 static bool
6127 parse_operands (char *str, const aarch64_opcode *opcode)
6128 {
6129 int i;
6130 char *backtrack_pos = 0;
6131 const enum aarch64_opnd *operands = opcode->operands;
6132 aarch64_reg_type imm_reg_type;
6133
6134 clear_error ();
6135 skip_whitespace (str);
6136
6137 if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
6138 AARCH64_FEATURE_SVE
6139 | AARCH64_FEATURE_SVE2))
6140 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6141 else
6142 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6143
6144 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6145 {
6146 int64_t val;
6147 const reg_entry *reg;
6148 int comma_skipped_p = 0;
6149 struct vector_type_el vectype;
6150 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6151 aarch64_opnd_info *info = &inst.base.operands[i];
6152 aarch64_reg_type reg_type;
6153
6154 DEBUG_TRACE ("parse operand %d", i);
6155
6156 /* Assign the operand code. */
6157 info->type = operands[i];
6158
6159 if (optional_operand_p (opcode, i))
6160 {
6161 /* Remember where we are in case we need to backtrack. */
6162 gas_assert (!backtrack_pos);
6163 backtrack_pos = str;
6164 }
6165
6166 /* Expect comma between operands; the backtrack mechanism will take
6167 care of cases of omitted optional operand. */
6168 if (i > 0 && ! skip_past_char (&str, ','))
6169 {
6170 set_syntax_error (_("comma expected between operands"));
6171 goto failure;
6172 }
6173 else
6174 comma_skipped_p = 1;
6175
6176 switch (operands[i])
6177 {
6178 case AARCH64_OPND_Rd:
6179 case AARCH64_OPND_Rn:
6180 case AARCH64_OPND_Rm:
6181 case AARCH64_OPND_Rt:
6182 case AARCH64_OPND_Rt2:
6183 case AARCH64_OPND_Rs:
6184 case AARCH64_OPND_Ra:
6185 case AARCH64_OPND_Rt_LS64:
6186 case AARCH64_OPND_Rt_SYS:
6187 case AARCH64_OPND_PAIRREG:
6188 case AARCH64_OPND_SVE_Rm:
6189 po_int_fp_reg_or_fail (REG_TYPE_R_Z);
6190
6191 /* In LS64 load/store instructions Rt register number must be even
6192 and <=22. */
6193 if (operands[i] == AARCH64_OPND_Rt_LS64)
6194 {
6195 /* We've already checked if this is valid register.
6196 This will check if register number (Rt) is not undefined for LS64
6197 instructions:
6198 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6199 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6200 {
6201 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6202 goto failure;
6203 }
6204 }
6205 break;
6206
6207 case AARCH64_OPND_Rd_SP:
6208 case AARCH64_OPND_Rn_SP:
6209 case AARCH64_OPND_Rt_SP:
6210 case AARCH64_OPND_SVE_Rn_SP:
6211 case AARCH64_OPND_Rm_SP:
6212 po_int_fp_reg_or_fail (REG_TYPE_R_SP);
6213 break;
6214
6215 case AARCH64_OPND_Rm_EXT:
6216 case AARCH64_OPND_Rm_SFT:
6217 po_misc_or_fail (parse_shifter_operand
6218 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6219 ? SHIFTED_ARITH_IMM
6220 : SHIFTED_LOGIC_IMM)));
6221 if (!info->shifter.operator_present)
6222 {
6223 /* Default to LSL if not present. Libopcodes prefers shifter
6224 kind to be explicit. */
6225 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6226 info->shifter.kind = AARCH64_MOD_LSL;
6227 /* For Rm_EXT, libopcodes will carry out further check on whether
6228 or not stack pointer is used in the instruction (Recall that
6229 "the extend operator is not optional unless at least one of
6230 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6231 }
6232 break;
6233
6234 case AARCH64_OPND_Fd:
6235 case AARCH64_OPND_Fn:
6236 case AARCH64_OPND_Fm:
6237 case AARCH64_OPND_Fa:
6238 case AARCH64_OPND_Ft:
6239 case AARCH64_OPND_Ft2:
6240 case AARCH64_OPND_Sd:
6241 case AARCH64_OPND_Sn:
6242 case AARCH64_OPND_Sm:
6243 case AARCH64_OPND_SVE_VZn:
6244 case AARCH64_OPND_SVE_Vd:
6245 case AARCH64_OPND_SVE_Vm:
6246 case AARCH64_OPND_SVE_Vn:
6247 po_int_fp_reg_or_fail (REG_TYPE_BHSDQ);
6248 break;
6249
6250 case AARCH64_OPND_SVE_Pd:
6251 case AARCH64_OPND_SVE_Pg3:
6252 case AARCH64_OPND_SVE_Pg4_5:
6253 case AARCH64_OPND_SVE_Pg4_10:
6254 case AARCH64_OPND_SVE_Pg4_16:
6255 case AARCH64_OPND_SVE_Pm:
6256 case AARCH64_OPND_SVE_Pn:
6257 case AARCH64_OPND_SVE_Pt:
6258 case AARCH64_OPND_SME_Pm:
6259 reg_type = REG_TYPE_PN;
6260 goto vector_reg;
6261
6262 case AARCH64_OPND_SVE_Za_5:
6263 case AARCH64_OPND_SVE_Za_16:
6264 case AARCH64_OPND_SVE_Zd:
6265 case AARCH64_OPND_SVE_Zm_5:
6266 case AARCH64_OPND_SVE_Zm_16:
6267 case AARCH64_OPND_SVE_Zn:
6268 case AARCH64_OPND_SVE_Zt:
6269 reg_type = REG_TYPE_ZN;
6270 goto vector_reg;
6271
6272 case AARCH64_OPND_Va:
6273 case AARCH64_OPND_Vd:
6274 case AARCH64_OPND_Vn:
6275 case AARCH64_OPND_Vm:
6276 reg_type = REG_TYPE_VN;
6277 vector_reg:
6278 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6279 if (!reg)
6280 {
6281 first_error (_(get_reg_expected_msg (reg_type)));
6282 goto failure;
6283 }
6284 if (vectype.defined & NTA_HASINDEX)
6285 goto failure;
6286
6287 info->reg.regno = reg->number;
6288 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6289 && vectype.type == NT_invtype)
6290 /* Unqualified Pn and Zn registers are allowed in certain
6291 contexts. Rely on F_STRICT qualifier checking to catch
6292 invalid uses. */
6293 info->qualifier = AARCH64_OPND_QLF_NIL;
6294 else
6295 {
6296 info->qualifier = vectype_to_qualifier (&vectype);
6297 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6298 goto failure;
6299 }
6300 break;
6301
6302 case AARCH64_OPND_VdD1:
6303 case AARCH64_OPND_VnD1:
6304 reg = aarch64_reg_parse (&str, REG_TYPE_VN, &vectype);
6305 if (!reg)
6306 {
6307 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6308 goto failure;
6309 }
6310 if (vectype.type != NT_d || vectype.index != 1)
6311 {
6312 set_fatal_syntax_error
6313 (_("the top half of a 128-bit FP/SIMD register is expected"));
6314 goto failure;
6315 }
6316 info->reg.regno = reg->number;
6317 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6318 here; it is correct for the purpose of encoding/decoding since
6319 only the register number is explicitly encoded in the related
6320 instructions, although this appears a bit hacky. */
6321 info->qualifier = AARCH64_OPND_QLF_S_D;
6322 break;
6323
6324 case AARCH64_OPND_SVE_Zm3_INDEX:
6325 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6326 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6327 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6328 case AARCH64_OPND_SVE_Zm4_INDEX:
6329 case AARCH64_OPND_SVE_Zn_INDEX:
6330 reg_type = REG_TYPE_ZN;
6331 goto vector_reg_index;
6332
6333 case AARCH64_OPND_Ed:
6334 case AARCH64_OPND_En:
6335 case AARCH64_OPND_Em:
6336 case AARCH64_OPND_Em16:
6337 case AARCH64_OPND_SM3_IMM2:
6338 reg_type = REG_TYPE_VN;
6339 vector_reg_index:
6340 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6341 if (!reg)
6342 {
6343 first_error (_(get_reg_expected_msg (reg_type)));
6344 goto failure;
6345 }
6346 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6347 goto failure;
6348
6349 info->reglane.regno = reg->number;
6350 info->reglane.index = vectype.index;
6351 info->qualifier = vectype_to_qualifier (&vectype);
6352 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6353 goto failure;
6354 break;
6355
6356 case AARCH64_OPND_SVE_ZnxN:
6357 case AARCH64_OPND_SVE_ZtxN:
6358 reg_type = REG_TYPE_ZN;
6359 goto vector_reg_list;
6360
6361 case AARCH64_OPND_LVn:
6362 case AARCH64_OPND_LVt:
6363 case AARCH64_OPND_LVt_AL:
6364 case AARCH64_OPND_LEt:
6365 reg_type = REG_TYPE_VN;
6366 vector_reg_list:
6367 if (reg_type == REG_TYPE_ZN
6368 && get_opcode_dependent_value (opcode) == 1
6369 && *str != '{')
6370 {
6371 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6372 if (!reg)
6373 {
6374 first_error (_(get_reg_expected_msg (reg_type)));
6375 goto failure;
6376 }
6377 info->reglist.first_regno = reg->number;
6378 info->reglist.num_regs = 1;
6379 }
6380 else
6381 {
6382 val = parse_vector_reg_list (&str, reg_type, &vectype);
6383 if (val == PARSE_FAIL)
6384 goto failure;
6385
6386 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6387 {
6388 set_fatal_syntax_error (_("invalid register list"));
6389 goto failure;
6390 }
6391
6392 if (vectype.width != 0 && *str != ',')
6393 {
6394 set_fatal_syntax_error
6395 (_("expected element type rather than vector type"));
6396 goto failure;
6397 }
6398
6399 info->reglist.first_regno = (val >> 2) & 0x1f;
6400 info->reglist.num_regs = (val & 0x3) + 1;
6401 }
6402 if (operands[i] == AARCH64_OPND_LEt)
6403 {
6404 if (!(vectype.defined & NTA_HASINDEX))
6405 goto failure;
6406 info->reglist.has_index = 1;
6407 info->reglist.index = vectype.index;
6408 }
6409 else
6410 {
6411 if (vectype.defined & NTA_HASINDEX)
6412 goto failure;
6413 if (!(vectype.defined & NTA_HASTYPE))
6414 {
6415 if (reg_type == REG_TYPE_ZN)
6416 set_fatal_syntax_error (_("missing type suffix"));
6417 goto failure;
6418 }
6419 }
6420 info->qualifier = vectype_to_qualifier (&vectype);
6421 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6422 goto failure;
6423 break;
6424
6425 case AARCH64_OPND_CRn:
6426 case AARCH64_OPND_CRm:
6427 {
6428 char prefix = *(str++);
6429 if (prefix != 'c' && prefix != 'C')
6430 goto failure;
6431
6432 po_imm_nc_or_fail ();
6433 if (val > 15)
6434 {
6435 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6436 goto failure;
6437 }
6438 info->qualifier = AARCH64_OPND_QLF_CR;
6439 info->imm.value = val;
6440 break;
6441 }
6442
6443 case AARCH64_OPND_SHLL_IMM:
6444 case AARCH64_OPND_IMM_VLSR:
6445 po_imm_or_fail (1, 64);
6446 info->imm.value = val;
6447 break;
6448
6449 case AARCH64_OPND_CCMP_IMM:
6450 case AARCH64_OPND_SIMM5:
6451 case AARCH64_OPND_FBITS:
6452 case AARCH64_OPND_TME_UIMM16:
6453 case AARCH64_OPND_UIMM4:
6454 case AARCH64_OPND_UIMM4_ADDG:
6455 case AARCH64_OPND_UIMM10:
6456 case AARCH64_OPND_UIMM3_OP1:
6457 case AARCH64_OPND_UIMM3_OP2:
6458 case AARCH64_OPND_IMM_VLSL:
6459 case AARCH64_OPND_IMM:
6460 case AARCH64_OPND_IMM_2:
6461 case AARCH64_OPND_WIDTH:
6462 case AARCH64_OPND_SVE_INV_LIMM:
6463 case AARCH64_OPND_SVE_LIMM:
6464 case AARCH64_OPND_SVE_LIMM_MOV:
6465 case AARCH64_OPND_SVE_SHLIMM_PRED:
6466 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6467 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6468 case AARCH64_OPND_SVE_SHRIMM_PRED:
6469 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6470 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6471 case AARCH64_OPND_SVE_SIMM5:
6472 case AARCH64_OPND_SVE_SIMM5B:
6473 case AARCH64_OPND_SVE_SIMM6:
6474 case AARCH64_OPND_SVE_SIMM8:
6475 case AARCH64_OPND_SVE_UIMM3:
6476 case AARCH64_OPND_SVE_UIMM7:
6477 case AARCH64_OPND_SVE_UIMM8:
6478 case AARCH64_OPND_SVE_UIMM8_53:
6479 case AARCH64_OPND_IMM_ROT1:
6480 case AARCH64_OPND_IMM_ROT2:
6481 case AARCH64_OPND_IMM_ROT3:
6482 case AARCH64_OPND_SVE_IMM_ROT1:
6483 case AARCH64_OPND_SVE_IMM_ROT2:
6484 case AARCH64_OPND_SVE_IMM_ROT3:
6485 case AARCH64_OPND_CSSC_SIMM8:
6486 case AARCH64_OPND_CSSC_UIMM8:
6487 po_imm_nc_or_fail ();
6488 info->imm.value = val;
6489 break;
6490
6491 case AARCH64_OPND_SVE_AIMM:
6492 case AARCH64_OPND_SVE_ASIMM:
6493 po_imm_nc_or_fail ();
6494 info->imm.value = val;
6495 skip_whitespace (str);
6496 if (skip_past_comma (&str))
6497 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6498 else
6499 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6500 break;
6501
6502 case AARCH64_OPND_SVE_PATTERN:
6503 po_enum_or_fail (aarch64_sve_pattern_array);
6504 info->imm.value = val;
6505 break;
6506
6507 case AARCH64_OPND_SVE_PATTERN_SCALED:
6508 po_enum_or_fail (aarch64_sve_pattern_array);
6509 info->imm.value = val;
6510 if (skip_past_comma (&str)
6511 && !parse_shift (&str, info, SHIFTED_MUL))
6512 goto failure;
6513 if (!info->shifter.operator_present)
6514 {
6515 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6516 info->shifter.kind = AARCH64_MOD_MUL;
6517 info->shifter.amount = 1;
6518 }
6519 break;
6520
6521 case AARCH64_OPND_SVE_PRFOP:
6522 po_enum_or_fail (aarch64_sve_prfop_array);
6523 info->imm.value = val;
6524 break;
6525
6526 case AARCH64_OPND_UIMM7:
6527 po_imm_or_fail (0, 127);
6528 info->imm.value = val;
6529 break;
6530
6531 case AARCH64_OPND_IDX:
6532 case AARCH64_OPND_MASK:
6533 case AARCH64_OPND_BIT_NUM:
6534 case AARCH64_OPND_IMMR:
6535 case AARCH64_OPND_IMMS:
6536 po_imm_or_fail (0, 63);
6537 info->imm.value = val;
6538 break;
6539
6540 case AARCH64_OPND_IMM0:
6541 po_imm_nc_or_fail ();
6542 if (val != 0)
6543 {
6544 set_fatal_syntax_error (_("immediate zero expected"));
6545 goto failure;
6546 }
6547 info->imm.value = 0;
6548 break;
6549
6550 case AARCH64_OPND_FPIMM0:
6551 {
6552 int qfloat;
6553 bool res1 = false, res2 = false;
6554 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6555 it is probably not worth the effort to support it. */
6556 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6557 imm_reg_type))
6558 && (error_p ()
6559 || !(res2 = parse_constant_immediate (&str, &val,
6560 imm_reg_type))))
6561 goto failure;
6562 if ((res1 && qfloat == 0) || (res2 && val == 0))
6563 {
6564 info->imm.value = 0;
6565 info->imm.is_fp = 1;
6566 break;
6567 }
6568 set_fatal_syntax_error (_("immediate zero expected"));
6569 goto failure;
6570 }
6571
6572 case AARCH64_OPND_IMM_MOV:
6573 {
6574 char *saved = str;
6575 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6576 reg_name_p (str, REG_TYPE_VN))
6577 goto failure;
6578 str = saved;
6579 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6580 GE_OPT_PREFIX, REJECT_ABSENT));
6581 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6582 later. fix_mov_imm_insn will try to determine a machine
6583 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6584 message if the immediate cannot be moved by a single
6585 instruction. */
6586 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6587 inst.base.operands[i].skip = 1;
6588 }
6589 break;
6590
6591 case AARCH64_OPND_SIMD_IMM:
6592 case AARCH64_OPND_SIMD_IMM_SFT:
6593 if (! parse_big_immediate (&str, &val, imm_reg_type))
6594 goto failure;
6595 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6596 /* addr_off_p */ 0,
6597 /* need_libopcodes_p */ 1,
6598 /* skip_p */ 1);
6599 /* Parse shift.
6600 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6601 shift, we don't check it here; we leave the checking to
6602 the libopcodes (operand_general_constraint_met_p). By
6603 doing this, we achieve better diagnostics. */
6604 if (skip_past_comma (&str)
6605 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6606 goto failure;
6607 if (!info->shifter.operator_present
6608 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6609 {
6610 /* Default to LSL if not present. Libopcodes prefers shifter
6611 kind to be explicit. */
6612 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6613 info->shifter.kind = AARCH64_MOD_LSL;
6614 }
6615 break;
6616
6617 case AARCH64_OPND_FPIMM:
6618 case AARCH64_OPND_SIMD_FPIMM:
6619 case AARCH64_OPND_SVE_FPIMM8:
6620 {
6621 int qfloat;
6622 bool dp_p;
6623
6624 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6625 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6626 || !aarch64_imm_float_p (qfloat))
6627 {
6628 if (!error_p ())
6629 set_fatal_syntax_error (_("invalid floating-point"
6630 " constant"));
6631 goto failure;
6632 }
6633 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6634 inst.base.operands[i].imm.is_fp = 1;
6635 }
6636 break;
6637
6638 case AARCH64_OPND_SVE_I1_HALF_ONE:
6639 case AARCH64_OPND_SVE_I1_HALF_TWO:
6640 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6641 {
6642 int qfloat;
6643 bool dp_p;
6644
6645 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6646 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6647 {
6648 if (!error_p ())
6649 set_fatal_syntax_error (_("invalid floating-point"
6650 " constant"));
6651 goto failure;
6652 }
6653 inst.base.operands[i].imm.value = qfloat;
6654 inst.base.operands[i].imm.is_fp = 1;
6655 }
6656 break;
6657
6658 case AARCH64_OPND_LIMM:
6659 po_misc_or_fail (parse_shifter_operand (&str, info,
6660 SHIFTED_LOGIC_IMM));
6661 if (info->shifter.operator_present)
6662 {
6663 set_fatal_syntax_error
6664 (_("shift not allowed for bitmask immediate"));
6665 goto failure;
6666 }
6667 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6668 /* addr_off_p */ 0,
6669 /* need_libopcodes_p */ 1,
6670 /* skip_p */ 1);
6671 break;
6672
6673 case AARCH64_OPND_AIMM:
6674 if (opcode->op == OP_ADD)
6675 /* ADD may have relocation types. */
6676 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6677 SHIFTED_ARITH_IMM));
6678 else
6679 po_misc_or_fail (parse_shifter_operand (&str, info,
6680 SHIFTED_ARITH_IMM));
6681 switch (inst.reloc.type)
6682 {
6683 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6684 info->shifter.amount = 12;
6685 break;
6686 case BFD_RELOC_UNUSED:
6687 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6688 if (info->shifter.kind != AARCH64_MOD_NONE)
6689 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6690 inst.reloc.pc_rel = 0;
6691 break;
6692 default:
6693 break;
6694 }
6695 info->imm.value = 0;
6696 if (!info->shifter.operator_present)
6697 {
6698 /* Default to LSL if not present. Libopcodes prefers shifter
6699 kind to be explicit. */
6700 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6701 info->shifter.kind = AARCH64_MOD_LSL;
6702 }
6703 break;
6704
6705 case AARCH64_OPND_HALF:
6706 {
6707 /* #<imm16> or relocation. */
6708 int internal_fixup_p;
6709 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6710 if (internal_fixup_p)
6711 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6712 skip_whitespace (str);
6713 if (skip_past_comma (&str))
6714 {
6715 /* {, LSL #<shift>} */
6716 if (! aarch64_gas_internal_fixup_p ())
6717 {
6718 set_fatal_syntax_error (_("can't mix relocation modifier "
6719 "with explicit shift"));
6720 goto failure;
6721 }
6722 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6723 }
6724 else
6725 inst.base.operands[i].shifter.amount = 0;
6726 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6727 inst.base.operands[i].imm.value = 0;
6728 if (! process_movw_reloc_info ())
6729 goto failure;
6730 }
6731 break;
6732
6733 case AARCH64_OPND_EXCEPTION:
6734 case AARCH64_OPND_UNDEFINED:
6735 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6736 imm_reg_type));
6737 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6738 /* addr_off_p */ 0,
6739 /* need_libopcodes_p */ 0,
6740 /* skip_p */ 1);
6741 break;
6742
6743 case AARCH64_OPND_NZCV:
6744 {
6745 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6746 if (nzcv != NULL)
6747 {
6748 str += 4;
6749 info->imm.value = nzcv->value;
6750 break;
6751 }
6752 po_imm_or_fail (0, 15);
6753 info->imm.value = val;
6754 }
6755 break;
6756
6757 case AARCH64_OPND_COND:
6758 case AARCH64_OPND_COND1:
6759 {
6760 char *start = str;
6761 do
6762 str++;
6763 while (ISALPHA (*str));
6764 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6765 if (info->cond == NULL)
6766 {
6767 set_syntax_error (_("invalid condition"));
6768 goto failure;
6769 }
6770 else if (operands[i] == AARCH64_OPND_COND1
6771 && (info->cond->value & 0xe) == 0xe)
6772 {
6773 /* Do not allow AL or NV. */
6774 set_default_error ();
6775 goto failure;
6776 }
6777 }
6778 break;
6779
6780 case AARCH64_OPND_ADDR_ADRP:
6781 po_misc_or_fail (parse_adrp (&str));
6782 /* Clear the value as operand needs to be relocated. */
6783 info->imm.value = 0;
6784 break;
6785
6786 case AARCH64_OPND_ADDR_PCREL14:
6787 case AARCH64_OPND_ADDR_PCREL19:
6788 case AARCH64_OPND_ADDR_PCREL21:
6789 case AARCH64_OPND_ADDR_PCREL26:
6790 po_misc_or_fail (parse_address (&str, info));
6791 if (!info->addr.pcrel)
6792 {
6793 set_syntax_error (_("invalid pc-relative address"));
6794 goto failure;
6795 }
6796 if (inst.gen_lit_pool
6797 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6798 {
6799 /* Only permit "=value" in the literal load instructions.
6800 The literal will be generated by programmer_friendly_fixup. */
6801 set_syntax_error (_("invalid use of \"=immediate\""));
6802 goto failure;
6803 }
6804 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6805 {
6806 set_syntax_error (_("unrecognized relocation suffix"));
6807 goto failure;
6808 }
6809 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6810 {
6811 info->imm.value = inst.reloc.exp.X_add_number;
6812 inst.reloc.type = BFD_RELOC_UNUSED;
6813 }
6814 else
6815 {
6816 info->imm.value = 0;
6817 if (inst.reloc.type == BFD_RELOC_UNUSED)
6818 switch (opcode->iclass)
6819 {
6820 case compbranch:
6821 case condbranch:
6822 /* e.g. CBZ or B.COND */
6823 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6824 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6825 break;
6826 case testbranch:
6827 /* e.g. TBZ */
6828 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6829 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6830 break;
6831 case branch_imm:
6832 /* e.g. B or BL */
6833 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6834 inst.reloc.type =
6835 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6836 : BFD_RELOC_AARCH64_JUMP26;
6837 break;
6838 case loadlit:
6839 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6840 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6841 break;
6842 case pcreladdr:
6843 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6844 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6845 break;
6846 default:
6847 gas_assert (0);
6848 abort ();
6849 }
6850 inst.reloc.pc_rel = 1;
6851 }
6852 break;
6853
6854 case AARCH64_OPND_ADDR_SIMPLE:
6855 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6856 {
6857 /* [<Xn|SP>{, #<simm>}] */
6858 char *start = str;
6859 /* First use the normal address-parsing routines, to get
6860 the usual syntax errors. */
6861 po_misc_or_fail (parse_address (&str, info));
6862 if (info->addr.pcrel || info->addr.offset.is_reg
6863 || !info->addr.preind || info->addr.postind
6864 || info->addr.writeback)
6865 {
6866 set_syntax_error (_("invalid addressing mode"));
6867 goto failure;
6868 }
6869
6870 /* Then retry, matching the specific syntax of these addresses. */
6871 str = start;
6872 po_char_or_fail ('[');
6873 po_reg_or_fail (REG_TYPE_R64_SP);
6874 /* Accept optional ", #0". */
6875 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6876 && skip_past_char (&str, ','))
6877 {
6878 skip_past_char (&str, '#');
6879 if (! skip_past_char (&str, '0'))
6880 {
6881 set_fatal_syntax_error
6882 (_("the optional immediate offset can only be 0"));
6883 goto failure;
6884 }
6885 }
6886 po_char_or_fail (']');
6887 break;
6888 }
6889
6890 case AARCH64_OPND_ADDR_REGOFF:
6891 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6892 po_misc_or_fail (parse_address (&str, info));
6893 regoff_addr:
6894 if (info->addr.pcrel || !info->addr.offset.is_reg
6895 || !info->addr.preind || info->addr.postind
6896 || info->addr.writeback)
6897 {
6898 set_syntax_error (_("invalid addressing mode"));
6899 goto failure;
6900 }
6901 if (!info->shifter.operator_present)
6902 {
6903 /* Default to LSL if not present. Libopcodes prefers shifter
6904 kind to be explicit. */
6905 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6906 info->shifter.kind = AARCH64_MOD_LSL;
6907 }
6908 /* Qualifier to be deduced by libopcodes. */
6909 break;
6910
6911 case AARCH64_OPND_ADDR_SIMM7:
6912 po_misc_or_fail (parse_address (&str, info));
6913 if (info->addr.pcrel || info->addr.offset.is_reg
6914 || (!info->addr.preind && !info->addr.postind))
6915 {
6916 set_syntax_error (_("invalid addressing mode"));
6917 goto failure;
6918 }
6919 if (inst.reloc.type != BFD_RELOC_UNUSED)
6920 {
6921 set_syntax_error (_("relocation not allowed"));
6922 goto failure;
6923 }
6924 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6925 /* addr_off_p */ 1,
6926 /* need_libopcodes_p */ 1,
6927 /* skip_p */ 0);
6928 break;
6929
6930 case AARCH64_OPND_ADDR_SIMM9:
6931 case AARCH64_OPND_ADDR_SIMM9_2:
6932 case AARCH64_OPND_ADDR_SIMM11:
6933 case AARCH64_OPND_ADDR_SIMM13:
6934 po_misc_or_fail (parse_address (&str, info));
6935 if (info->addr.pcrel || info->addr.offset.is_reg
6936 || (!info->addr.preind && !info->addr.postind)
6937 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6938 && info->addr.writeback))
6939 {
6940 set_syntax_error (_("invalid addressing mode"));
6941 goto failure;
6942 }
6943 if (inst.reloc.type != BFD_RELOC_UNUSED)
6944 {
6945 set_syntax_error (_("relocation not allowed"));
6946 goto failure;
6947 }
6948 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6949 /* addr_off_p */ 1,
6950 /* need_libopcodes_p */ 1,
6951 /* skip_p */ 0);
6952 break;
6953
6954 case AARCH64_OPND_ADDR_SIMM10:
6955 case AARCH64_OPND_ADDR_OFFSET:
6956 po_misc_or_fail (parse_address (&str, info));
6957 if (info->addr.pcrel || info->addr.offset.is_reg
6958 || !info->addr.preind || info->addr.postind)
6959 {
6960 set_syntax_error (_("invalid addressing mode"));
6961 goto failure;
6962 }
6963 if (inst.reloc.type != BFD_RELOC_UNUSED)
6964 {
6965 set_syntax_error (_("relocation not allowed"));
6966 goto failure;
6967 }
6968 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6969 /* addr_off_p */ 1,
6970 /* need_libopcodes_p */ 1,
6971 /* skip_p */ 0);
6972 break;
6973
6974 case AARCH64_OPND_ADDR_UIMM12:
6975 po_misc_or_fail (parse_address (&str, info));
6976 if (info->addr.pcrel || info->addr.offset.is_reg
6977 || !info->addr.preind || info->addr.writeback)
6978 {
6979 set_syntax_error (_("invalid addressing mode"));
6980 goto failure;
6981 }
6982 if (inst.reloc.type == BFD_RELOC_UNUSED)
6983 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6984 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6985 || (inst.reloc.type
6986 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6987 || (inst.reloc.type
6988 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6989 || (inst.reloc.type
6990 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6991 || (inst.reloc.type
6992 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6993 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6994 /* Leave qualifier to be determined by libopcodes. */
6995 break;
6996
6997 case AARCH64_OPND_SIMD_ADDR_POST:
6998 /* [<Xn|SP>], <Xm|#<amount>> */
6999 po_misc_or_fail (parse_address (&str, info));
7000 if (!info->addr.postind || !info->addr.writeback)
7001 {
7002 set_syntax_error (_("invalid addressing mode"));
7003 goto failure;
7004 }
7005 if (!info->addr.offset.is_reg)
7006 {
7007 if (inst.reloc.exp.X_op == O_constant)
7008 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7009 else
7010 {
7011 set_fatal_syntax_error
7012 (_("writeback value must be an immediate constant"));
7013 goto failure;
7014 }
7015 }
7016 /* No qualifier. */
7017 break;
7018
7019 case AARCH64_OPND_SME_SM_ZA:
7020 /* { SM | ZA } */
7021 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7022 {
7023 set_syntax_error (_("unknown or missing PSTATE field name"));
7024 goto failure;
7025 }
7026 info->reg.regno = val;
7027 break;
7028
7029 case AARCH64_OPND_SME_PnT_Wm_imm:
7030 if (!parse_dual_indexed_reg (&str, REG_TYPE_PN,
7031 &info->indexed_za, &qualifier))
7032 goto failure;
7033 info->qualifier = qualifier;
7034 break;
7035
7036 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7037 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7038 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7039 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7040 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7041 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7042 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7043 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7044 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7045 case AARCH64_OPND_SVE_ADDR_RI_U6:
7046 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7047 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7048 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7049 /* [X<n>{, #imm, MUL VL}]
7050 [X<n>{, #imm}]
7051 but recognizing SVE registers. */
7052 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7053 &offset_qualifier));
7054 if (base_qualifier != AARCH64_OPND_QLF_X)
7055 {
7056 set_syntax_error (_("invalid addressing mode"));
7057 goto failure;
7058 }
7059 sve_regimm:
7060 if (info->addr.pcrel || info->addr.offset.is_reg
7061 || !info->addr.preind || info->addr.writeback)
7062 {
7063 set_syntax_error (_("invalid addressing mode"));
7064 goto failure;
7065 }
7066 if (inst.reloc.type != BFD_RELOC_UNUSED
7067 || inst.reloc.exp.X_op != O_constant)
7068 {
7069 /* Make sure this has priority over
7070 "invalid addressing mode". */
7071 set_fatal_syntax_error (_("constant offset required"));
7072 goto failure;
7073 }
7074 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7075 break;
7076
7077 case AARCH64_OPND_SVE_ADDR_R:
7078 /* [<Xn|SP>{, <R><m>}]
7079 but recognizing SVE registers. */
7080 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7081 &offset_qualifier));
7082 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7083 {
7084 offset_qualifier = AARCH64_OPND_QLF_X;
7085 info->addr.offset.is_reg = 1;
7086 info->addr.offset.regno = 31;
7087 }
7088 else if (base_qualifier != AARCH64_OPND_QLF_X
7089 || offset_qualifier != AARCH64_OPND_QLF_X)
7090 {
7091 set_syntax_error (_("invalid addressing mode"));
7092 goto failure;
7093 }
7094 goto regoff_addr;
7095
7096 case AARCH64_OPND_SVE_ADDR_RR:
7097 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7098 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7099 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7100 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7101 case AARCH64_OPND_SVE_ADDR_RX:
7102 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7103 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7104 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7105 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7106 but recognizing SVE registers. */
7107 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7108 &offset_qualifier));
7109 if (base_qualifier != AARCH64_OPND_QLF_X
7110 || offset_qualifier != AARCH64_OPND_QLF_X)
7111 {
7112 set_syntax_error (_("invalid addressing mode"));
7113 goto failure;
7114 }
7115 goto regoff_addr;
7116
7117 case AARCH64_OPND_SVE_ADDR_RZ:
7118 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7119 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7120 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7121 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7122 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7123 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7124 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7125 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7126 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7127 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7128 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7129 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7130 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7131 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7132 &offset_qualifier));
7133 if (base_qualifier != AARCH64_OPND_QLF_X
7134 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7135 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7136 {
7137 set_syntax_error (_("invalid addressing mode"));
7138 goto failure;
7139 }
7140 info->qualifier = offset_qualifier;
7141 goto regoff_addr;
7142
7143 case AARCH64_OPND_SVE_ADDR_ZX:
7144 /* [Zn.<T>{, <Xm>}]. */
7145 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7146 &offset_qualifier));
7147 /* Things to check:
7148 base_qualifier either S_S or S_D
7149 offset_qualifier must be X
7150 */
7151 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7152 && base_qualifier != AARCH64_OPND_QLF_S_D)
7153 || offset_qualifier != AARCH64_OPND_QLF_X)
7154 {
7155 set_syntax_error (_("invalid addressing mode"));
7156 goto failure;
7157 }
7158 info->qualifier = base_qualifier;
7159 if (!info->addr.offset.is_reg || info->addr.pcrel
7160 || !info->addr.preind || info->addr.writeback
7161 || info->shifter.operator_present != 0)
7162 {
7163 set_syntax_error (_("invalid addressing mode"));
7164 goto failure;
7165 }
7166 info->shifter.kind = AARCH64_MOD_LSL;
7167 break;
7168
7169
7170 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7171 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7172 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7173 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7174 /* [Z<n>.<T>{, #imm}] */
7175 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7176 &offset_qualifier));
7177 if (base_qualifier != AARCH64_OPND_QLF_S_S
7178 && base_qualifier != AARCH64_OPND_QLF_S_D)
7179 {
7180 set_syntax_error (_("invalid addressing mode"));
7181 goto failure;
7182 }
7183 info->qualifier = base_qualifier;
7184 goto sve_regimm;
7185
7186 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7187 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7188 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7189 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7190 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7191
7192 We don't reject:
7193
7194 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7195
7196 here since we get better error messages by leaving it to
7197 the qualifier checking routines. */
7198 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7199 &offset_qualifier));
7200 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7201 && base_qualifier != AARCH64_OPND_QLF_S_D)
7202 || offset_qualifier != base_qualifier)
7203 {
7204 set_syntax_error (_("invalid addressing mode"));
7205 goto failure;
7206 }
7207 info->qualifier = base_qualifier;
7208 goto regoff_addr;
7209
7210 case AARCH64_OPND_SYSREG:
7211 {
7212 uint32_t sysreg_flags;
7213 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7214 &sysreg_flags)) == PARSE_FAIL)
7215 {
7216 set_syntax_error (_("unknown or missing system register name"));
7217 goto failure;
7218 }
7219 inst.base.operands[i].sysreg.value = val;
7220 inst.base.operands[i].sysreg.flags = sysreg_flags;
7221 break;
7222 }
7223
7224 case AARCH64_OPND_PSTATEFIELD:
7225 {
7226 uint32_t sysreg_flags;
7227 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7228 &sysreg_flags)) == PARSE_FAIL)
7229 {
7230 set_syntax_error (_("unknown or missing PSTATE field name"));
7231 goto failure;
7232 }
7233 inst.base.operands[i].pstatefield = val;
7234 inst.base.operands[i].sysreg.flags = sysreg_flags;
7235 break;
7236 }
7237
7238 case AARCH64_OPND_SYSREG_IC:
7239 inst.base.operands[i].sysins_op =
7240 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7241 goto sys_reg_ins;
7242
7243 case AARCH64_OPND_SYSREG_DC:
7244 inst.base.operands[i].sysins_op =
7245 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7246 goto sys_reg_ins;
7247
7248 case AARCH64_OPND_SYSREG_AT:
7249 inst.base.operands[i].sysins_op =
7250 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7251 goto sys_reg_ins;
7252
7253 case AARCH64_OPND_SYSREG_SR:
7254 inst.base.operands[i].sysins_op =
7255 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7256 goto sys_reg_ins;
7257
7258 case AARCH64_OPND_SYSREG_TLBI:
7259 inst.base.operands[i].sysins_op =
7260 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7261 sys_reg_ins:
7262 if (inst.base.operands[i].sysins_op == NULL)
7263 {
7264 set_fatal_syntax_error ( _("unknown or missing operation name"));
7265 goto failure;
7266 }
7267 break;
7268
7269 case AARCH64_OPND_BARRIER:
7270 case AARCH64_OPND_BARRIER_ISB:
7271 val = parse_barrier (&str);
7272 if (val != PARSE_FAIL
7273 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7274 {
7275 /* ISB only accepts options name 'sy'. */
7276 set_syntax_error
7277 (_("the specified option is not accepted in ISB"));
7278 /* Turn off backtrack as this optional operand is present. */
7279 backtrack_pos = 0;
7280 goto failure;
7281 }
7282 if (val != PARSE_FAIL
7283 && operands[i] == AARCH64_OPND_BARRIER)
7284 {
7285 /* Regular barriers accept options CRm (C0-C15).
7286 DSB nXS barrier variant accepts values > 15. */
7287 if (val < 0 || val > 15)
7288 {
7289 set_syntax_error (_("the specified option is not accepted in DSB"));
7290 goto failure;
7291 }
7292 }
7293 /* This is an extension to accept a 0..15 immediate. */
7294 if (val == PARSE_FAIL)
7295 po_imm_or_fail (0, 15);
7296 info->barrier = aarch64_barrier_options + val;
7297 break;
7298
7299 case AARCH64_OPND_BARRIER_DSB_NXS:
7300 val = parse_barrier (&str);
7301 if (val != PARSE_FAIL)
7302 {
7303 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7304 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7305 {
7306 set_syntax_error (_("the specified option is not accepted in DSB"));
7307 /* Turn off backtrack as this optional operand is present. */
7308 backtrack_pos = 0;
7309 goto failure;
7310 }
7311 }
7312 else
7313 {
7314 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7315 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7316 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7317 goto failure;
7318 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7319 {
7320 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7321 goto failure;
7322 }
7323 }
7324 /* Option index is encoded as 2-bit value in val<3:2>. */
7325 val = (val >> 2) - 4;
7326 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7327 break;
7328
7329 case AARCH64_OPND_PRFOP:
7330 val = parse_pldop (&str);
7331 /* This is an extension to accept a 0..31 immediate. */
7332 if (val == PARSE_FAIL)
7333 po_imm_or_fail (0, 31);
7334 inst.base.operands[i].prfop = aarch64_prfops + val;
7335 break;
7336
7337 case AARCH64_OPND_BARRIER_PSB:
7338 val = parse_barrier_psb (&str, &(info->hint_option));
7339 if (val == PARSE_FAIL)
7340 goto failure;
7341 break;
7342
7343 case AARCH64_OPND_BTI_TARGET:
7344 val = parse_bti_operand (&str, &(info->hint_option));
7345 if (val == PARSE_FAIL)
7346 goto failure;
7347 break;
7348
7349 case AARCH64_OPND_SME_ZAda_2b:
7350 case AARCH64_OPND_SME_ZAda_3b:
7351 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier);
7352 if (!reg)
7353 goto failure;
7354 info->reg.regno = reg->number;
7355 info->qualifier = qualifier;
7356 break;
7357
7358 case AARCH64_OPND_SME_ZA_HV_idx_src:
7359 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7360 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7361 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7362 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7363 &info->indexed_za,
7364 &qualifier)
7365 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7366 &info->indexed_za, &qualifier))
7367 goto failure;
7368 info->qualifier = qualifier;
7369 break;
7370
7371 case AARCH64_OPND_SME_list_of_64bit_tiles:
7372 val = parse_sme_list_of_64bit_tiles (&str);
7373 if (val == PARSE_FAIL)
7374 goto failure;
7375 info->imm.value = val;
7376 break;
7377
7378 case AARCH64_OPND_SME_ZA_array:
7379 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7380 &info->indexed_za, &qualifier))
7381 goto failure;
7382 info->qualifier = qualifier;
7383 break;
7384
7385 case AARCH64_OPND_MOPS_ADDR_Rd:
7386 case AARCH64_OPND_MOPS_ADDR_Rs:
7387 po_char_or_fail ('[');
7388 if (!parse_x0_to_x30 (&str, info))
7389 goto failure;
7390 po_char_or_fail (']');
7391 po_char_or_fail ('!');
7392 break;
7393
7394 case AARCH64_OPND_MOPS_WB_Rn:
7395 if (!parse_x0_to_x30 (&str, info))
7396 goto failure;
7397 po_char_or_fail ('!');
7398 break;
7399
7400 default:
7401 as_fatal (_("unhandled operand code %d"), operands[i]);
7402 }
7403
7404 /* If we get here, this operand was successfully parsed. */
7405 inst.base.operands[i].present = 1;
7406 continue;
7407
7408 failure:
7409 /* The parse routine should already have set the error, but in case
7410 not, set a default one here. */
7411 if (! error_p ())
7412 set_default_error ();
7413
7414 if (! backtrack_pos)
7415 goto parse_operands_return;
7416
7417 {
7418 /* We reach here because this operand is marked as optional, and
7419 either no operand was supplied or the operand was supplied but it
7420 was syntactically incorrect. In the latter case we report an
7421 error. In the former case we perform a few more checks before
7422 dropping through to the code to insert the default operand. */
7423
7424 char *tmp = backtrack_pos;
7425 char endchar = END_OF_INSN;
7426
7427 if (i != (aarch64_num_of_operands (opcode) - 1))
7428 endchar = ',';
7429 skip_past_char (&tmp, ',');
7430
7431 if (*tmp != endchar)
7432 /* The user has supplied an operand in the wrong format. */
7433 goto parse_operands_return;
7434
7435 /* Make sure there is not a comma before the optional operand.
7436 For example the fifth operand of 'sys' is optional:
7437
7438 sys #0,c0,c0,#0, <--- wrong
7439 sys #0,c0,c0,#0 <--- correct. */
7440 if (comma_skipped_p && i && endchar == END_OF_INSN)
7441 {
7442 set_fatal_syntax_error
7443 (_("unexpected comma before the omitted optional operand"));
7444 goto parse_operands_return;
7445 }
7446 }
7447
7448 /* Reaching here means we are dealing with an optional operand that is
7449 omitted from the assembly line. */
7450 gas_assert (optional_operand_p (opcode, i));
7451 info->present = 0;
7452 process_omitted_operand (operands[i], opcode, i, info);
7453
7454 /* Try again, skipping the optional operand at backtrack_pos. */
7455 str = backtrack_pos;
7456 backtrack_pos = 0;
7457
7458 /* Clear any error record after the omitted optional operand has been
7459 successfully handled. */
7460 clear_error ();
7461 }
7462
7463 /* Check if we have parsed all the operands. */
7464 if (*str != '\0' && ! error_p ())
7465 {
7466 /* Set I to the index of the last present operand; this is
7467 for the purpose of diagnostics. */
7468 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7469 ;
7470 set_fatal_syntax_error
7471 (_("unexpected characters following instruction"));
7472 }
7473
7474 parse_operands_return:
7475
7476 if (error_p ())
7477 {
7478 inst.parsing_error.index = i;
7479 DEBUG_TRACE ("parsing FAIL: %s - %s",
7480 operand_mismatch_kind_names[inst.parsing_error.kind],
7481 inst.parsing_error.error);
7482 /* Record the operand error properly; this is useful when there
7483 are multiple instruction templates for a mnemonic name, so that
7484 later on, we can select the error that most closely describes
7485 the problem. */
7486 record_operand_error_info (opcode, &inst.parsing_error);
7487 return false;
7488 }
7489 else
7490 {
7491 DEBUG_TRACE ("parsing SUCCESS");
7492 return true;
7493 }
7494 }
7495
7496 /* It does some fix-up to provide some programmer friendly feature while
7497 keeping the libopcodes happy, i.e. libopcodes only accepts
7498 the preferred architectural syntax.
7499 Return FALSE if there is any failure; otherwise return TRUE. */
7500
7501 static bool
7502 programmer_friendly_fixup (aarch64_instruction *instr)
7503 {
7504 aarch64_inst *base = &instr->base;
7505 const aarch64_opcode *opcode = base->opcode;
7506 enum aarch64_op op = opcode->op;
7507 aarch64_opnd_info *operands = base->operands;
7508
7509 DEBUG_TRACE ("enter");
7510
7511 switch (opcode->iclass)
7512 {
7513 case testbranch:
7514 /* TBNZ Xn|Wn, #uimm6, label
7515 Test and Branch Not Zero: conditionally jumps to label if bit number
7516 uimm6 in register Xn is not zero. The bit number implies the width of
7517 the register, which may be written and should be disassembled as Wn if
7518 uimm is less than 32. */
7519 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7520 {
7521 if (operands[1].imm.value >= 32)
7522 {
7523 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7524 0, 31);
7525 return false;
7526 }
7527 operands[0].qualifier = AARCH64_OPND_QLF_X;
7528 }
7529 break;
7530 case loadlit:
7531 /* LDR Wt, label | =value
7532 As a convenience assemblers will typically permit the notation
7533 "=value" in conjunction with the pc-relative literal load instructions
7534 to automatically place an immediate value or symbolic address in a
7535 nearby literal pool and generate a hidden label which references it.
7536 ISREG has been set to 0 in the case of =value. */
7537 if (instr->gen_lit_pool
7538 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7539 {
7540 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7541 if (op == OP_LDRSW_LIT)
7542 size = 4;
7543 if (instr->reloc.exp.X_op != O_constant
7544 && instr->reloc.exp.X_op != O_big
7545 && instr->reloc.exp.X_op != O_symbol)
7546 {
7547 record_operand_error (opcode, 1,
7548 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7549 _("constant expression expected"));
7550 return false;
7551 }
7552 if (! add_to_lit_pool (&instr->reloc.exp, size))
7553 {
7554 record_operand_error (opcode, 1,
7555 AARCH64_OPDE_OTHER_ERROR,
7556 _("literal pool insertion failed"));
7557 return false;
7558 }
7559 }
7560 break;
7561 case log_shift:
7562 case bitfield:
7563 /* UXT[BHW] Wd, Wn
7564 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7565 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7566 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7567 A programmer-friendly assembler should accept a destination Xd in
7568 place of Wd, however that is not the preferred form for disassembly.
7569 */
7570 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7571 && operands[1].qualifier == AARCH64_OPND_QLF_W
7572 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7573 operands[0].qualifier = AARCH64_OPND_QLF_W;
7574 break;
7575
7576 case addsub_ext:
7577 {
7578 /* In the 64-bit form, the final register operand is written as Wm
7579 for all but the (possibly omitted) UXTX/LSL and SXTX
7580 operators.
7581 As a programmer-friendly assembler, we accept e.g.
7582 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7583 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7584 int idx = aarch64_operand_index (opcode->operands,
7585 AARCH64_OPND_Rm_EXT);
7586 gas_assert (idx == 1 || idx == 2);
7587 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7588 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7589 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7590 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7591 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7592 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7593 }
7594 break;
7595
7596 default:
7597 break;
7598 }
7599
7600 DEBUG_TRACE ("exit with SUCCESS");
7601 return true;
7602 }
7603
7604 /* Check for loads and stores that will cause unpredictable behavior. */
7605
7606 static void
7607 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7608 {
7609 aarch64_inst *base = &instr->base;
7610 const aarch64_opcode *opcode = base->opcode;
7611 const aarch64_opnd_info *opnds = base->operands;
7612 switch (opcode->iclass)
7613 {
7614 case ldst_pos:
7615 case ldst_imm9:
7616 case ldst_imm10:
7617 case ldst_unscaled:
7618 case ldst_unpriv:
7619 /* Loading/storing the base register is unpredictable if writeback. */
7620 if ((aarch64_get_operand_class (opnds[0].type)
7621 == AARCH64_OPND_CLASS_INT_REG)
7622 && opnds[0].reg.regno == opnds[1].addr.base_regno
7623 && opnds[1].addr.base_regno != REG_SP
7624 /* Exempt STG/STZG/ST2G/STZ2G. */
7625 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7626 && opnds[1].addr.writeback)
7627 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7628 break;
7629
7630 case ldstpair_off:
7631 case ldstnapair_offs:
7632 case ldstpair_indexed:
7633 /* Loading/storing the base register is unpredictable if writeback. */
7634 if ((aarch64_get_operand_class (opnds[0].type)
7635 == AARCH64_OPND_CLASS_INT_REG)
7636 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7637 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7638 && opnds[2].addr.base_regno != REG_SP
7639 /* Exempt STGP. */
7640 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7641 && opnds[2].addr.writeback)
7642 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7643 /* Load operations must load different registers. */
7644 if ((opcode->opcode & (1 << 22))
7645 && opnds[0].reg.regno == opnds[1].reg.regno)
7646 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7647 break;
7648
7649 case ldstexcl:
7650 if ((aarch64_get_operand_class (opnds[0].type)
7651 == AARCH64_OPND_CLASS_INT_REG)
7652 && (aarch64_get_operand_class (opnds[1].type)
7653 == AARCH64_OPND_CLASS_INT_REG))
7654 {
7655 if ((opcode->opcode & (1 << 22)))
7656 {
7657 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7658 if ((opcode->opcode & (1 << 21))
7659 && opnds[0].reg.regno == opnds[1].reg.regno)
7660 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7661 }
7662 else
7663 {
7664 /* Store-Exclusive is unpredictable if Rt == Rs. */
7665 if (opnds[0].reg.regno == opnds[1].reg.regno)
7666 as_warn
7667 (_("unpredictable: identical transfer and status registers"
7668 " --`%s'"),str);
7669
7670 if (opnds[0].reg.regno == opnds[2].reg.regno)
7671 {
7672 if (!(opcode->opcode & (1 << 21)))
7673 /* Store-Exclusive is unpredictable if Rn == Rs. */
7674 as_warn
7675 (_("unpredictable: identical base and status registers"
7676 " --`%s'"),str);
7677 else
7678 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7679 as_warn
7680 (_("unpredictable: "
7681 "identical transfer and status registers"
7682 " --`%s'"),str);
7683 }
7684
7685 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7686 if ((opcode->opcode & (1 << 21))
7687 && opnds[0].reg.regno == opnds[3].reg.regno
7688 && opnds[3].reg.regno != REG_SP)
7689 as_warn (_("unpredictable: identical base and status registers"
7690 " --`%s'"),str);
7691 }
7692 }
7693 break;
7694
7695 default:
7696 break;
7697 }
7698 }
7699
7700 static void
7701 force_automatic_sequence_close (void)
7702 {
7703 struct aarch64_segment_info_type *tc_seg_info;
7704
7705 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7706 if (tc_seg_info->insn_sequence.instr)
7707 {
7708 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7709 _("previous `%s' sequence has not been closed"),
7710 tc_seg_info->insn_sequence.instr->opcode->name);
7711 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7712 }
7713 }
7714
7715 /* A wrapper function to interface with libopcodes on encoding and
7716 record the error message if there is any.
7717
7718 Return TRUE on success; otherwise return FALSE. */
7719
7720 static bool
7721 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7722 aarch64_insn *code)
7723 {
7724 aarch64_operand_error error_info;
7725 memset (&error_info, '\0', sizeof (error_info));
7726 error_info.kind = AARCH64_OPDE_NIL;
7727 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7728 && !error_info.non_fatal)
7729 return true;
7730
7731 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7732 record_operand_error_info (opcode, &error_info);
7733 return error_info.non_fatal;
7734 }
7735
7736 #ifdef DEBUG_AARCH64
7737 static inline void
7738 dump_opcode_operands (const aarch64_opcode *opcode)
7739 {
7740 int i = 0;
7741 while (opcode->operands[i] != AARCH64_OPND_NIL)
7742 {
7743 aarch64_verbose ("\t\t opnd%d: %s", i,
7744 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7745 ? aarch64_get_operand_name (opcode->operands[i])
7746 : aarch64_get_operand_desc (opcode->operands[i]));
7747 ++i;
7748 }
7749 }
7750 #endif /* DEBUG_AARCH64 */
7751
7752 /* This is the guts of the machine-dependent assembler. STR points to a
7753 machine dependent instruction. This function is supposed to emit
7754 the frags/bytes it assembles to. */
7755
7756 void
7757 md_assemble (char *str)
7758 {
7759 templates *template;
7760 const aarch64_opcode *opcode;
7761 struct aarch64_segment_info_type *tc_seg_info;
7762 aarch64_inst *inst_base;
7763 unsigned saved_cond;
7764
7765 /* Align the previous label if needed. */
7766 if (last_label_seen != NULL)
7767 {
7768 symbol_set_frag (last_label_seen, frag_now);
7769 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7770 S_SET_SEGMENT (last_label_seen, now_seg);
7771 }
7772
7773 /* Update the current insn_sequence from the segment. */
7774 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7775 insn_sequence = &tc_seg_info->insn_sequence;
7776 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7777
7778 inst.reloc.type = BFD_RELOC_UNUSED;
7779
7780 DEBUG_TRACE ("\n\n");
7781 DEBUG_TRACE ("==============================");
7782 DEBUG_TRACE ("Enter md_assemble with %s", str);
7783
7784 /* Scan up to the end of the mnemonic, which must end in whitespace,
7785 '.', or end of string. */
7786 char *p = str;
7787 char *dot = 0;
7788 for (; is_part_of_name (*p); p++)
7789 if (*p == '.' && !dot)
7790 dot = p;
7791
7792 if (p == str)
7793 {
7794 as_bad (_("unknown mnemonic -- `%s'"), str);
7795 return;
7796 }
7797
7798 if (!dot && create_register_alias (str, p))
7799 return;
7800
7801 template = opcode_lookup (str, dot, p);
7802 if (!template)
7803 {
7804 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7805 str);
7806 return;
7807 }
7808
7809 skip_whitespace (p);
7810 if (*p == ',')
7811 {
7812 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7813 get_mnemonic_name (str), str);
7814 return;
7815 }
7816
7817 init_operand_error_report ();
7818
7819 /* Sections are assumed to start aligned. In executable section, there is no
7820 MAP_DATA symbol pending. So we only align the address during
7821 MAP_DATA --> MAP_INSN transition.
7822 For other sections, this is not guaranteed. */
7823 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7824 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7825 frag_align_code (2, 0);
7826
7827 saved_cond = inst.cond;
7828 reset_aarch64_instruction (&inst);
7829 inst.cond = saved_cond;
7830
7831 /* Iterate through all opcode entries with the same mnemonic name. */
7832 do
7833 {
7834 opcode = template->opcode;
7835
7836 DEBUG_TRACE ("opcode %s found", opcode->name);
7837 #ifdef DEBUG_AARCH64
7838 if (debug_dump)
7839 dump_opcode_operands (opcode);
7840 #endif /* DEBUG_AARCH64 */
7841
7842 mapping_state (MAP_INSN);
7843
7844 inst_base = &inst.base;
7845 inst_base->opcode = opcode;
7846
7847 /* Truly conditionally executed instructions, e.g. b.cond. */
7848 if (opcode->flags & F_COND)
7849 {
7850 gas_assert (inst.cond != COND_ALWAYS);
7851 inst_base->cond = get_cond_from_value (inst.cond);
7852 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7853 }
7854 else if (inst.cond != COND_ALWAYS)
7855 {
7856 /* It shouldn't arrive here, where the assembly looks like a
7857 conditional instruction but the found opcode is unconditional. */
7858 gas_assert (0);
7859 continue;
7860 }
7861
7862 if (parse_operands (p, opcode)
7863 && programmer_friendly_fixup (&inst)
7864 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7865 {
7866 /* Check that this instruction is supported for this CPU. */
7867 if (!opcode->avariant
7868 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7869 {
7870 as_bad (_("selected processor does not support `%s'"), str);
7871 return;
7872 }
7873
7874 warn_unpredictable_ldst (&inst, str);
7875
7876 if (inst.reloc.type == BFD_RELOC_UNUSED
7877 || !inst.reloc.need_libopcodes_p)
7878 output_inst (NULL);
7879 else
7880 {
7881 /* If there is relocation generated for the instruction,
7882 store the instruction information for the future fix-up. */
7883 struct aarch64_inst *copy;
7884 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7885 copy = XNEW (struct aarch64_inst);
7886 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7887 output_inst (copy);
7888 }
7889
7890 /* Issue non-fatal messages if any. */
7891 output_operand_error_report (str, true);
7892 return;
7893 }
7894
7895 template = template->next;
7896 if (template != NULL)
7897 {
7898 reset_aarch64_instruction (&inst);
7899 inst.cond = saved_cond;
7900 }
7901 }
7902 while (template != NULL);
7903
7904 /* Issue the error messages if any. */
7905 output_operand_error_report (str, false);
7906 }
7907
7908 /* Various frobbings of labels and their addresses. */
7909
7910 void
7911 aarch64_start_line_hook (void)
7912 {
7913 last_label_seen = NULL;
7914 }
7915
7916 void
7917 aarch64_frob_label (symbolS * sym)
7918 {
7919 last_label_seen = sym;
7920
7921 dwarf2_emit_label (sym);
7922 }
7923
7924 void
7925 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7926 {
7927 /* Check to see if we have a block to close. */
7928 force_automatic_sequence_close ();
7929 }
7930
7931 int
7932 aarch64_data_in_code (void)
7933 {
7934 if (startswith (input_line_pointer + 1, "data:"))
7935 {
7936 *input_line_pointer = '/';
7937 input_line_pointer += 5;
7938 *input_line_pointer = 0;
7939 return 1;
7940 }
7941
7942 return 0;
7943 }
7944
7945 char *
7946 aarch64_canonicalize_symbol_name (char *name)
7947 {
7948 int len;
7949
7950 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7951 *(name + len - 5) = 0;
7952
7953 return name;
7954 }
7955 \f
7956 /* Table of all register names defined by default. The user can
7957 define additional names with .req. Note that all register names
7958 should appear in both upper and lowercase variants. Some registers
7959 also have mixed-case names. */
7960
7961 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
7962 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
7963 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7964 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
7965 #define REGSET16(p,t) \
7966 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7967 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7968 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7969 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7970 #define REGSET16S(p,s,t) \
7971 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
7972 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
7973 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
7974 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
7975 #define REGSET31(p,t) \
7976 REGSET16(p, t), \
7977 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7978 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7979 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7980 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7981 #define REGSET(p,t) \
7982 REGSET31(p,t), REGNUM(p,31,t)
7983
7984 /* These go into aarch64_reg_hsh hash-table. */
7985 static const reg_entry reg_names[] = {
7986 /* Integer registers. */
7987 REGSET31 (x, R_64), REGSET31 (X, R_64),
7988 REGSET31 (w, R_32), REGSET31 (W, R_32),
7989
7990 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7991 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7992 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7993 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7994 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7995 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7996
7997 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7998 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7999
8000 /* Floating-point single precision registers. */
8001 REGSET (s, FP_S), REGSET (S, FP_S),
8002
8003 /* Floating-point double precision registers. */
8004 REGSET (d, FP_D), REGSET (D, FP_D),
8005
8006 /* Floating-point half precision registers. */
8007 REGSET (h, FP_H), REGSET (H, FP_H),
8008
8009 /* Floating-point byte precision registers. */
8010 REGSET (b, FP_B), REGSET (B, FP_B),
8011
8012 /* Floating-point quad precision registers. */
8013 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8014
8015 /* FP/SIMD registers. */
8016 REGSET (v, VN), REGSET (V, VN),
8017
8018 /* SVE vector registers. */
8019 REGSET (z, ZN), REGSET (Z, ZN),
8020
8021 /* SVE predicate registers. */
8022 REGSET16 (p, PN), REGSET16 (P, PN),
8023
8024 /* SME ZA. We model this as a register because it acts syntactically
8025 like ZA0H, supporting qualifier suffixes and indexing. */
8026 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8027
8028 /* SME ZA tile registers. */
8029 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8030
8031 /* SME ZA tile registers (horizontal slice). */
8032 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8033
8034 /* SME ZA tile registers (vertical slice). */
8035 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV)
8036 };
8037
8038 #undef REGDEF
8039 #undef REGDEF_ALIAS
8040 #undef REGNUM
8041 #undef REGSET16
8042 #undef REGSET31
8043 #undef REGSET
8044
8045 #define N 1
8046 #define n 0
8047 #define Z 1
8048 #define z 0
8049 #define C 1
8050 #define c 0
8051 #define V 1
8052 #define v 0
8053 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8054 static const asm_nzcv nzcv_names[] = {
8055 {"nzcv", B (n, z, c, v)},
8056 {"nzcV", B (n, z, c, V)},
8057 {"nzCv", B (n, z, C, v)},
8058 {"nzCV", B (n, z, C, V)},
8059 {"nZcv", B (n, Z, c, v)},
8060 {"nZcV", B (n, Z, c, V)},
8061 {"nZCv", B (n, Z, C, v)},
8062 {"nZCV", B (n, Z, C, V)},
8063 {"Nzcv", B (N, z, c, v)},
8064 {"NzcV", B (N, z, c, V)},
8065 {"NzCv", B (N, z, C, v)},
8066 {"NzCV", B (N, z, C, V)},
8067 {"NZcv", B (N, Z, c, v)},
8068 {"NZcV", B (N, Z, c, V)},
8069 {"NZCv", B (N, Z, C, v)},
8070 {"NZCV", B (N, Z, C, V)}
8071 };
8072
8073 #undef N
8074 #undef n
8075 #undef Z
8076 #undef z
8077 #undef C
8078 #undef c
8079 #undef V
8080 #undef v
8081 #undef B
8082 \f
8083 /* MD interface: bits in the object file. */
8084
8085 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8086 for use in the a.out file, and stores them in the array pointed to by buf.
8087 This knows about the endian-ness of the target machine and does
8088 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8089 2 (short) and 4 (long) Floating numbers are put out as a series of
8090 LITTLENUMS (shorts, here at least). */
8091
8092 void
8093 md_number_to_chars (char *buf, valueT val, int n)
8094 {
8095 if (target_big_endian)
8096 number_to_chars_bigendian (buf, val, n);
8097 else
8098 number_to_chars_littleendian (buf, val, n);
8099 }
8100
8101 /* MD interface: Sections. */
8102
8103 /* Estimate the size of a frag before relaxing. Assume everything fits in
8104 4 bytes. */
8105
8106 int
8107 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8108 {
8109 fragp->fr_var = 4;
8110 return 4;
8111 }
8112
8113 /* Round up a section size to the appropriate boundary. */
8114
8115 valueT
8116 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8117 {
8118 return size;
8119 }
8120
8121 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8122 of an rs_align_code fragment.
8123
8124 Here we fill the frag with the appropriate info for padding the
8125 output stream. The resulting frag will consist of a fixed (fr_fix)
8126 and of a repeating (fr_var) part.
8127
8128 The fixed content is always emitted before the repeating content and
8129 these two parts are used as follows in constructing the output:
8130 - the fixed part will be used to align to a valid instruction word
8131 boundary, in case that we start at a misaligned address; as no
8132 executable instruction can live at the misaligned location, we
8133 simply fill with zeros;
8134 - the variable part will be used to cover the remaining padding and
8135 we fill using the AArch64 NOP instruction.
8136
8137 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8138 enough storage space for up to 3 bytes for padding the back to a valid
8139 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8140
8141 void
8142 aarch64_handle_align (fragS * fragP)
8143 {
8144 /* NOP = d503201f */
8145 /* AArch64 instructions are always little-endian. */
8146 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8147
8148 int bytes, fix, noop_size;
8149 char *p;
8150
8151 if (fragP->fr_type != rs_align_code)
8152 return;
8153
8154 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8155 p = fragP->fr_literal + fragP->fr_fix;
8156
8157 #ifdef OBJ_ELF
8158 gas_assert (fragP->tc_frag_data.recorded);
8159 #endif
8160
8161 noop_size = sizeof (aarch64_noop);
8162
8163 fix = bytes & (noop_size - 1);
8164 if (fix)
8165 {
8166 #if defined OBJ_ELF || defined OBJ_COFF
8167 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8168 #endif
8169 memset (p, 0, fix);
8170 p += fix;
8171 fragP->fr_fix += fix;
8172 }
8173
8174 if (noop_size)
8175 memcpy (p, aarch64_noop, noop_size);
8176 fragP->fr_var = noop_size;
8177 }
8178
8179 /* Perform target specific initialisation of a frag.
8180 Note - despite the name this initialisation is not done when the frag
8181 is created, but only when its type is assigned. A frag can be created
8182 and used a long time before its type is set, so beware of assuming that
8183 this initialisation is performed first. */
8184
8185 #ifndef OBJ_ELF
8186 void
8187 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8188 int max_chars ATTRIBUTE_UNUSED)
8189 {
8190 }
8191
8192 #else /* OBJ_ELF is defined. */
8193 void
8194 aarch64_init_frag (fragS * fragP, int max_chars)
8195 {
8196 /* Record a mapping symbol for alignment frags. We will delete this
8197 later if the alignment ends up empty. */
8198 if (!fragP->tc_frag_data.recorded)
8199 fragP->tc_frag_data.recorded = 1;
8200
8201 /* PR 21809: Do not set a mapping state for debug sections
8202 - it just confuses other tools. */
8203 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8204 return;
8205
8206 switch (fragP->fr_type)
8207 {
8208 case rs_align_test:
8209 case rs_fill:
8210 mapping_state_2 (MAP_DATA, max_chars);
8211 break;
8212 case rs_align:
8213 /* PR 20364: We can get alignment frags in code sections,
8214 so do not just assume that we should use the MAP_DATA state. */
8215 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8216 break;
8217 case rs_align_code:
8218 mapping_state_2 (MAP_INSN, max_chars);
8219 break;
8220 default:
8221 break;
8222 }
8223 }
8224
8225 /* Whether SFrame stack trace info is supported. */
8226
8227 bool
8228 aarch64_support_sframe_p (void)
8229 {
8230 /* At this time, SFrame is supported for aarch64 only. */
8231 return (aarch64_abi == AARCH64_ABI_LP64);
8232 }
8233
8234 /* Specify if RA tracking is needed. */
8235
8236 bool
8237 aarch64_sframe_ra_tracking_p (void)
8238 {
8239 return true;
8240 }
8241
8242 /* Specify the fixed offset to recover RA from CFA.
8243 (useful only when RA tracking is not needed). */
8244
8245 offsetT
8246 aarch64_sframe_cfa_ra_offset (void)
8247 {
8248 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8249 }
8250
8251 /* Get the abi/arch indentifier for SFrame. */
8252
8253 unsigned char
8254 aarch64_sframe_get_abi_arch (void)
8255 {
8256 unsigned char sframe_abi_arch = 0;
8257
8258 if (aarch64_support_sframe_p ())
8259 {
8260 sframe_abi_arch = target_big_endian
8261 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8262 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8263 }
8264
8265 return sframe_abi_arch;
8266 }
8267
8268 #endif /* OBJ_ELF */
8269 \f
8270 /* Initialize the DWARF-2 unwind information for this procedure. */
8271
8272 void
8273 tc_aarch64_frame_initial_instructions (void)
8274 {
8275 cfi_add_CFA_def_cfa (REG_SP, 0);
8276 }
8277
8278 /* Convert REGNAME to a DWARF-2 register number. */
8279
8280 int
8281 tc_aarch64_regname_to_dw2regnum (char *regname)
8282 {
8283 const reg_entry *reg = parse_reg (&regname);
8284 if (reg == NULL)
8285 return -1;
8286
8287 switch (reg->type)
8288 {
8289 case REG_TYPE_SP_32:
8290 case REG_TYPE_SP_64:
8291 case REG_TYPE_R_32:
8292 case REG_TYPE_R_64:
8293 return reg->number;
8294
8295 case REG_TYPE_FP_B:
8296 case REG_TYPE_FP_H:
8297 case REG_TYPE_FP_S:
8298 case REG_TYPE_FP_D:
8299 case REG_TYPE_FP_Q:
8300 return reg->number + 64;
8301
8302 default:
8303 break;
8304 }
8305 return -1;
8306 }
8307
8308 /* Implement DWARF2_ADDR_SIZE. */
8309
8310 int
8311 aarch64_dwarf2_addr_size (void)
8312 {
8313 if (ilp32_p)
8314 return 4;
8315 else if (llp64_p)
8316 return 8;
8317 return bfd_arch_bits_per_address (stdoutput) / 8;
8318 }
8319
8320 /* MD interface: Symbol and relocation handling. */
8321
8322 /* Return the address within the segment that a PC-relative fixup is
8323 relative to. For AArch64 PC-relative fixups applied to instructions
8324 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8325
8326 long
8327 md_pcrel_from_section (fixS * fixP, segT seg)
8328 {
8329 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8330
8331 /* If this is pc-relative and we are going to emit a relocation
8332 then we just want to put out any pipeline compensation that the linker
8333 will need. Otherwise we want to use the calculated base. */
8334 if (fixP->fx_pcrel
8335 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8336 || aarch64_force_relocation (fixP)))
8337 base = 0;
8338
8339 /* AArch64 should be consistent for all pc-relative relocations. */
8340 return base + AARCH64_PCREL_OFFSET;
8341 }
8342
8343 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8344 Otherwise we have no need to default values of symbols. */
8345
8346 symbolS *
8347 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8348 {
8349 #ifdef OBJ_ELF
8350 if (name[0] == '_' && name[1] == 'G'
8351 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8352 {
8353 if (!GOT_symbol)
8354 {
8355 if (symbol_find (name))
8356 as_bad (_("GOT already in the symbol table"));
8357
8358 GOT_symbol = symbol_new (name, undefined_section,
8359 &zero_address_frag, 0);
8360 }
8361
8362 return GOT_symbol;
8363 }
8364 #endif
8365
8366 return 0;
8367 }
8368
8369 /* Return non-zero if the indicated VALUE has overflowed the maximum
8370 range expressible by a unsigned number with the indicated number of
8371 BITS. */
8372
8373 static bool
8374 unsigned_overflow (valueT value, unsigned bits)
8375 {
8376 valueT lim;
8377 if (bits >= sizeof (valueT) * 8)
8378 return false;
8379 lim = (valueT) 1 << bits;
8380 return (value >= lim);
8381 }
8382
8383
8384 /* Return non-zero if the indicated VALUE has overflowed the maximum
8385 range expressible by an signed number with the indicated number of
8386 BITS. */
8387
8388 static bool
8389 signed_overflow (offsetT value, unsigned bits)
8390 {
8391 offsetT lim;
8392 if (bits >= sizeof (offsetT) * 8)
8393 return false;
8394 lim = (offsetT) 1 << (bits - 1);
8395 return (value < -lim || value >= lim);
8396 }
8397
8398 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8399 unsigned immediate offset load/store instruction, try to encode it as
8400 an unscaled, 9-bit, signed immediate offset load/store instruction.
8401 Return TRUE if it is successful; otherwise return FALSE.
8402
8403 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8404 in response to the standard LDR/STR mnemonics when the immediate offset is
8405 unambiguous, i.e. when it is negative or unaligned. */
8406
8407 static bool
8408 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8409 {
8410 int idx;
8411 enum aarch64_op new_op;
8412 const aarch64_opcode *new_opcode;
8413
8414 gas_assert (instr->opcode->iclass == ldst_pos);
8415
8416 switch (instr->opcode->op)
8417 {
8418 case OP_LDRB_POS:new_op = OP_LDURB; break;
8419 case OP_STRB_POS: new_op = OP_STURB; break;
8420 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8421 case OP_LDRH_POS: new_op = OP_LDURH; break;
8422 case OP_STRH_POS: new_op = OP_STURH; break;
8423 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8424 case OP_LDR_POS: new_op = OP_LDUR; break;
8425 case OP_STR_POS: new_op = OP_STUR; break;
8426 case OP_LDRF_POS: new_op = OP_LDURV; break;
8427 case OP_STRF_POS: new_op = OP_STURV; break;
8428 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8429 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8430 default: new_op = OP_NIL; break;
8431 }
8432
8433 if (new_op == OP_NIL)
8434 return false;
8435
8436 new_opcode = aarch64_get_opcode (new_op);
8437 gas_assert (new_opcode != NULL);
8438
8439 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8440 instr->opcode->op, new_opcode->op);
8441
8442 aarch64_replace_opcode (instr, new_opcode);
8443
8444 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8445 qualifier matching may fail because the out-of-date qualifier will
8446 prevent the operand being updated with a new and correct qualifier. */
8447 idx = aarch64_operand_index (instr->opcode->operands,
8448 AARCH64_OPND_ADDR_SIMM9);
8449 gas_assert (idx == 1);
8450 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8451
8452 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8453
8454 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8455 insn_sequence))
8456 return false;
8457
8458 return true;
8459 }
8460
8461 /* Called by fix_insn to fix a MOV immediate alias instruction.
8462
8463 Operand for a generic move immediate instruction, which is an alias
8464 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8465 a 32-bit/64-bit immediate value into general register. An assembler error
8466 shall result if the immediate cannot be created by a single one of these
8467 instructions. If there is a choice, then to ensure reversability an
8468 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8469
8470 static void
8471 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8472 {
8473 const aarch64_opcode *opcode;
8474
8475 /* Need to check if the destination is SP/ZR. The check has to be done
8476 before any aarch64_replace_opcode. */
8477 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8478 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8479
8480 instr->operands[1].imm.value = value;
8481 instr->operands[1].skip = 0;
8482
8483 if (try_mov_wide_p)
8484 {
8485 /* Try the MOVZ alias. */
8486 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8487 aarch64_replace_opcode (instr, opcode);
8488 if (aarch64_opcode_encode (instr->opcode, instr,
8489 &instr->value, NULL, NULL, insn_sequence))
8490 {
8491 put_aarch64_insn (buf, instr->value);
8492 return;
8493 }
8494 /* Try the MOVK alias. */
8495 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8496 aarch64_replace_opcode (instr, opcode);
8497 if (aarch64_opcode_encode (instr->opcode, instr,
8498 &instr->value, NULL, NULL, insn_sequence))
8499 {
8500 put_aarch64_insn (buf, instr->value);
8501 return;
8502 }
8503 }
8504
8505 if (try_mov_bitmask_p)
8506 {
8507 /* Try the ORR alias. */
8508 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8509 aarch64_replace_opcode (instr, opcode);
8510 if (aarch64_opcode_encode (instr->opcode, instr,
8511 &instr->value, NULL, NULL, insn_sequence))
8512 {
8513 put_aarch64_insn (buf, instr->value);
8514 return;
8515 }
8516 }
8517
8518 as_bad_where (fixP->fx_file, fixP->fx_line,
8519 _("immediate cannot be moved by a single instruction"));
8520 }
8521
8522 /* An instruction operand which is immediate related may have symbol used
8523 in the assembly, e.g.
8524
8525 mov w0, u32
8526 .set u32, 0x00ffff00
8527
8528 At the time when the assembly instruction is parsed, a referenced symbol,
8529 like 'u32' in the above example may not have been seen; a fixS is created
8530 in such a case and is handled here after symbols have been resolved.
8531 Instruction is fixed up with VALUE using the information in *FIXP plus
8532 extra information in FLAGS.
8533
8534 This function is called by md_apply_fix to fix up instructions that need
8535 a fix-up described above but does not involve any linker-time relocation. */
8536
8537 static void
8538 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8539 {
8540 int idx;
8541 uint32_t insn;
8542 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8543 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8544 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8545
8546 if (new_inst)
8547 {
8548 /* Now the instruction is about to be fixed-up, so the operand that
8549 was previously marked as 'ignored' needs to be unmarked in order
8550 to get the encoding done properly. */
8551 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8552 new_inst->operands[idx].skip = 0;
8553 }
8554
8555 gas_assert (opnd != AARCH64_OPND_NIL);
8556
8557 switch (opnd)
8558 {
8559 case AARCH64_OPND_EXCEPTION:
8560 case AARCH64_OPND_UNDEFINED:
8561 if (unsigned_overflow (value, 16))
8562 as_bad_where (fixP->fx_file, fixP->fx_line,
8563 _("immediate out of range"));
8564 insn = get_aarch64_insn (buf);
8565 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8566 put_aarch64_insn (buf, insn);
8567 break;
8568
8569 case AARCH64_OPND_AIMM:
8570 /* ADD or SUB with immediate.
8571 NOTE this assumes we come here with a add/sub shifted reg encoding
8572 3 322|2222|2 2 2 21111 111111
8573 1 098|7654|3 2 1 09876 543210 98765 43210
8574 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8575 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8576 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8577 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8578 ->
8579 3 322|2222|2 2 221111111111
8580 1 098|7654|3 2 109876543210 98765 43210
8581 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8582 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8583 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8584 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8585 Fields sf Rn Rd are already set. */
8586 insn = get_aarch64_insn (buf);
8587 if (value < 0)
8588 {
8589 /* Add <-> sub. */
8590 insn = reencode_addsub_switch_add_sub (insn);
8591 value = -value;
8592 }
8593
8594 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8595 && unsigned_overflow (value, 12))
8596 {
8597 /* Try to shift the value by 12 to make it fit. */
8598 if (((value >> 12) << 12) == value
8599 && ! unsigned_overflow (value, 12 + 12))
8600 {
8601 value >>= 12;
8602 insn |= encode_addsub_imm_shift_amount (1);
8603 }
8604 }
8605
8606 if (unsigned_overflow (value, 12))
8607 as_bad_where (fixP->fx_file, fixP->fx_line,
8608 _("immediate out of range"));
8609
8610 insn |= encode_addsub_imm (value);
8611
8612 put_aarch64_insn (buf, insn);
8613 break;
8614
8615 case AARCH64_OPND_SIMD_IMM:
8616 case AARCH64_OPND_SIMD_IMM_SFT:
8617 case AARCH64_OPND_LIMM:
8618 /* Bit mask immediate. */
8619 gas_assert (new_inst != NULL);
8620 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8621 new_inst->operands[idx].imm.value = value;
8622 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8623 &new_inst->value, NULL, NULL, insn_sequence))
8624 put_aarch64_insn (buf, new_inst->value);
8625 else
8626 as_bad_where (fixP->fx_file, fixP->fx_line,
8627 _("invalid immediate"));
8628 break;
8629
8630 case AARCH64_OPND_HALF:
8631 /* 16-bit unsigned immediate. */
8632 if (unsigned_overflow (value, 16))
8633 as_bad_where (fixP->fx_file, fixP->fx_line,
8634 _("immediate out of range"));
8635 insn = get_aarch64_insn (buf);
8636 insn |= encode_movw_imm (value & 0xffff);
8637 put_aarch64_insn (buf, insn);
8638 break;
8639
8640 case AARCH64_OPND_IMM_MOV:
8641 /* Operand for a generic move immediate instruction, which is
8642 an alias instruction that generates a single MOVZ, MOVN or ORR
8643 instruction to loads a 32-bit/64-bit immediate value into general
8644 register. An assembler error shall result if the immediate cannot be
8645 created by a single one of these instructions. If there is a choice,
8646 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8647 and MOVZ or MOVN to ORR. */
8648 gas_assert (new_inst != NULL);
8649 fix_mov_imm_insn (fixP, buf, new_inst, value);
8650 break;
8651
8652 case AARCH64_OPND_ADDR_SIMM7:
8653 case AARCH64_OPND_ADDR_SIMM9:
8654 case AARCH64_OPND_ADDR_SIMM9_2:
8655 case AARCH64_OPND_ADDR_SIMM10:
8656 case AARCH64_OPND_ADDR_UIMM12:
8657 case AARCH64_OPND_ADDR_SIMM11:
8658 case AARCH64_OPND_ADDR_SIMM13:
8659 /* Immediate offset in an address. */
8660 insn = get_aarch64_insn (buf);
8661
8662 gas_assert (new_inst != NULL && new_inst->value == insn);
8663 gas_assert (new_inst->opcode->operands[1] == opnd
8664 || new_inst->opcode->operands[2] == opnd);
8665
8666 /* Get the index of the address operand. */
8667 if (new_inst->opcode->operands[1] == opnd)
8668 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8669 idx = 1;
8670 else
8671 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8672 idx = 2;
8673
8674 /* Update the resolved offset value. */
8675 new_inst->operands[idx].addr.offset.imm = value;
8676
8677 /* Encode/fix-up. */
8678 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8679 &new_inst->value, NULL, NULL, insn_sequence))
8680 {
8681 put_aarch64_insn (buf, new_inst->value);
8682 break;
8683 }
8684 else if (new_inst->opcode->iclass == ldst_pos
8685 && try_to_encode_as_unscaled_ldst (new_inst))
8686 {
8687 put_aarch64_insn (buf, new_inst->value);
8688 break;
8689 }
8690
8691 as_bad_where (fixP->fx_file, fixP->fx_line,
8692 _("immediate offset out of range"));
8693 break;
8694
8695 default:
8696 gas_assert (0);
8697 as_fatal (_("unhandled operand code %d"), opnd);
8698 }
8699 }
8700
8701 /* Apply a fixup (fixP) to segment data, once it has been determined
8702 by our caller that we have all the info we need to fix it up.
8703
8704 Parameter valP is the pointer to the value of the bits. */
8705
8706 void
8707 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8708 {
8709 offsetT value = *valP;
8710 uint32_t insn;
8711 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8712 int scale;
8713 unsigned flags = fixP->fx_addnumber;
8714
8715 DEBUG_TRACE ("\n\n");
8716 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8717 DEBUG_TRACE ("Enter md_apply_fix");
8718
8719 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8720
8721 /* Note whether this will delete the relocation. */
8722
8723 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8724 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8725 fixP->fx_done = 1;
8726
8727 /* Process the relocations. */
8728 switch (fixP->fx_r_type)
8729 {
8730 case BFD_RELOC_NONE:
8731 /* This will need to go in the object file. */
8732 fixP->fx_done = 0;
8733 break;
8734
8735 case BFD_RELOC_8:
8736 case BFD_RELOC_8_PCREL:
8737 if (fixP->fx_done || !seg->use_rela_p)
8738 md_number_to_chars (buf, value, 1);
8739 break;
8740
8741 case BFD_RELOC_16:
8742 case BFD_RELOC_16_PCREL:
8743 if (fixP->fx_done || !seg->use_rela_p)
8744 md_number_to_chars (buf, value, 2);
8745 break;
8746
8747 case BFD_RELOC_32:
8748 case BFD_RELOC_32_PCREL:
8749 if (fixP->fx_done || !seg->use_rela_p)
8750 md_number_to_chars (buf, value, 4);
8751 break;
8752
8753 case BFD_RELOC_64:
8754 case BFD_RELOC_64_PCREL:
8755 if (fixP->fx_done || !seg->use_rela_p)
8756 md_number_to_chars (buf, value, 8);
8757 break;
8758
8759 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8760 /* We claim that these fixups have been processed here, even if
8761 in fact we generate an error because we do not have a reloc
8762 for them, so tc_gen_reloc() will reject them. */
8763 fixP->fx_done = 1;
8764 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8765 {
8766 as_bad_where (fixP->fx_file, fixP->fx_line,
8767 _("undefined symbol %s used as an immediate value"),
8768 S_GET_NAME (fixP->fx_addsy));
8769 goto apply_fix_return;
8770 }
8771 fix_insn (fixP, flags, value);
8772 break;
8773
8774 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8775 if (fixP->fx_done || !seg->use_rela_p)
8776 {
8777 if (value & 3)
8778 as_bad_where (fixP->fx_file, fixP->fx_line,
8779 _("pc-relative load offset not word aligned"));
8780 if (signed_overflow (value, 21))
8781 as_bad_where (fixP->fx_file, fixP->fx_line,
8782 _("pc-relative load offset out of range"));
8783 insn = get_aarch64_insn (buf);
8784 insn |= encode_ld_lit_ofs_19 (value >> 2);
8785 put_aarch64_insn (buf, insn);
8786 }
8787 break;
8788
8789 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8790 if (fixP->fx_done || !seg->use_rela_p)
8791 {
8792 if (signed_overflow (value, 21))
8793 as_bad_where (fixP->fx_file, fixP->fx_line,
8794 _("pc-relative address offset out of range"));
8795 insn = get_aarch64_insn (buf);
8796 insn |= encode_adr_imm (value);
8797 put_aarch64_insn (buf, insn);
8798 }
8799 break;
8800
8801 case BFD_RELOC_AARCH64_BRANCH19:
8802 if (fixP->fx_done || !seg->use_rela_p)
8803 {
8804 if (value & 3)
8805 as_bad_where (fixP->fx_file, fixP->fx_line,
8806 _("conditional branch target not word aligned"));
8807 if (signed_overflow (value, 21))
8808 as_bad_where (fixP->fx_file, fixP->fx_line,
8809 _("conditional branch out of range"));
8810 insn = get_aarch64_insn (buf);
8811 insn |= encode_cond_branch_ofs_19 (value >> 2);
8812 put_aarch64_insn (buf, insn);
8813 }
8814 break;
8815
8816 case BFD_RELOC_AARCH64_TSTBR14:
8817 if (fixP->fx_done || !seg->use_rela_p)
8818 {
8819 if (value & 3)
8820 as_bad_where (fixP->fx_file, fixP->fx_line,
8821 _("conditional branch target not word aligned"));
8822 if (signed_overflow (value, 16))
8823 as_bad_where (fixP->fx_file, fixP->fx_line,
8824 _("conditional branch out of range"));
8825 insn = get_aarch64_insn (buf);
8826 insn |= encode_tst_branch_ofs_14 (value >> 2);
8827 put_aarch64_insn (buf, insn);
8828 }
8829 break;
8830
8831 case BFD_RELOC_AARCH64_CALL26:
8832 case BFD_RELOC_AARCH64_JUMP26:
8833 if (fixP->fx_done || !seg->use_rela_p)
8834 {
8835 if (value & 3)
8836 as_bad_where (fixP->fx_file, fixP->fx_line,
8837 _("branch target not word aligned"));
8838 if (signed_overflow (value, 28))
8839 as_bad_where (fixP->fx_file, fixP->fx_line,
8840 _("branch out of range"));
8841 insn = get_aarch64_insn (buf);
8842 insn |= encode_branch_ofs_26 (value >> 2);
8843 put_aarch64_insn (buf, insn);
8844 }
8845 break;
8846
8847 case BFD_RELOC_AARCH64_MOVW_G0:
8848 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8849 case BFD_RELOC_AARCH64_MOVW_G0_S:
8850 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8851 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8852 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8853 scale = 0;
8854 goto movw_common;
8855 case BFD_RELOC_AARCH64_MOVW_G1:
8856 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8857 case BFD_RELOC_AARCH64_MOVW_G1_S:
8858 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8859 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8860 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8861 scale = 16;
8862 goto movw_common;
8863 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8864 scale = 0;
8865 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8866 /* Should always be exported to object file, see
8867 aarch64_force_relocation(). */
8868 gas_assert (!fixP->fx_done);
8869 gas_assert (seg->use_rela_p);
8870 goto movw_common;
8871 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8872 scale = 16;
8873 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8874 /* Should always be exported to object file, see
8875 aarch64_force_relocation(). */
8876 gas_assert (!fixP->fx_done);
8877 gas_assert (seg->use_rela_p);
8878 goto movw_common;
8879 case BFD_RELOC_AARCH64_MOVW_G2:
8880 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8881 case BFD_RELOC_AARCH64_MOVW_G2_S:
8882 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8883 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8884 scale = 32;
8885 goto movw_common;
8886 case BFD_RELOC_AARCH64_MOVW_G3:
8887 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8888 scale = 48;
8889 movw_common:
8890 if (fixP->fx_done || !seg->use_rela_p)
8891 {
8892 insn = get_aarch64_insn (buf);
8893
8894 if (!fixP->fx_done)
8895 {
8896 /* REL signed addend must fit in 16 bits */
8897 if (signed_overflow (value, 16))
8898 as_bad_where (fixP->fx_file, fixP->fx_line,
8899 _("offset out of range"));
8900 }
8901 else
8902 {
8903 /* Check for overflow and scale. */
8904 switch (fixP->fx_r_type)
8905 {
8906 case BFD_RELOC_AARCH64_MOVW_G0:
8907 case BFD_RELOC_AARCH64_MOVW_G1:
8908 case BFD_RELOC_AARCH64_MOVW_G2:
8909 case BFD_RELOC_AARCH64_MOVW_G3:
8910 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8911 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8912 if (unsigned_overflow (value, scale + 16))
8913 as_bad_where (fixP->fx_file, fixP->fx_line,
8914 _("unsigned value out of range"));
8915 break;
8916 case BFD_RELOC_AARCH64_MOVW_G0_S:
8917 case BFD_RELOC_AARCH64_MOVW_G1_S:
8918 case BFD_RELOC_AARCH64_MOVW_G2_S:
8919 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8920 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8921 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8922 /* NOTE: We can only come here with movz or movn. */
8923 if (signed_overflow (value, scale + 16))
8924 as_bad_where (fixP->fx_file, fixP->fx_line,
8925 _("signed value out of range"));
8926 if (value < 0)
8927 {
8928 /* Force use of MOVN. */
8929 value = ~value;
8930 insn = reencode_movzn_to_movn (insn);
8931 }
8932 else
8933 {
8934 /* Force use of MOVZ. */
8935 insn = reencode_movzn_to_movz (insn);
8936 }
8937 break;
8938 default:
8939 /* Unchecked relocations. */
8940 break;
8941 }
8942 value >>= scale;
8943 }
8944
8945 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8946 insn |= encode_movw_imm (value & 0xffff);
8947
8948 put_aarch64_insn (buf, insn);
8949 }
8950 break;
8951
8952 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8953 fixP->fx_r_type = (ilp32_p
8954 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8955 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8956 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8957 /* Should always be exported to object file, see
8958 aarch64_force_relocation(). */
8959 gas_assert (!fixP->fx_done);
8960 gas_assert (seg->use_rela_p);
8961 break;
8962
8963 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8964 fixP->fx_r_type = (ilp32_p
8965 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8966 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8967 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8968 /* Should always be exported to object file, see
8969 aarch64_force_relocation(). */
8970 gas_assert (!fixP->fx_done);
8971 gas_assert (seg->use_rela_p);
8972 break;
8973
8974 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8975 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8976 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8977 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8978 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8979 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8980 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8981 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8982 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8983 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8984 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8985 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8986 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8987 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8988 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8989 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8990 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8991 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8992 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8993 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8994 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8995 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8996 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8997 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8998 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8999 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9000 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9001 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9002 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9003 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9004 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9005 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9006 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9007 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9008 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9009 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9010 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9011 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9012 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9013 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9014 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9015 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9016 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9017 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9018 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9019 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9020 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9021 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9022 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9023 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9024 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9025 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9026 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9027 /* Should always be exported to object file, see
9028 aarch64_force_relocation(). */
9029 gas_assert (!fixP->fx_done);
9030 gas_assert (seg->use_rela_p);
9031 break;
9032
9033 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9034 /* Should always be exported to object file, see
9035 aarch64_force_relocation(). */
9036 fixP->fx_r_type = (ilp32_p
9037 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9038 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9039 gas_assert (!fixP->fx_done);
9040 gas_assert (seg->use_rela_p);
9041 break;
9042
9043 case BFD_RELOC_AARCH64_ADD_LO12:
9044 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9045 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9046 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9047 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9048 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9049 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9050 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9051 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9052 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9053 case BFD_RELOC_AARCH64_LDST128_LO12:
9054 case BFD_RELOC_AARCH64_LDST16_LO12:
9055 case BFD_RELOC_AARCH64_LDST32_LO12:
9056 case BFD_RELOC_AARCH64_LDST64_LO12:
9057 case BFD_RELOC_AARCH64_LDST8_LO12:
9058 /* Should always be exported to object file, see
9059 aarch64_force_relocation(). */
9060 gas_assert (!fixP->fx_done);
9061 gas_assert (seg->use_rela_p);
9062 break;
9063
9064 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9065 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9066 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9067 break;
9068
9069 case BFD_RELOC_UNUSED:
9070 /* An error will already have been reported. */
9071 break;
9072
9073 case BFD_RELOC_RVA:
9074 case BFD_RELOC_32_SECREL:
9075 case BFD_RELOC_16_SECIDX:
9076 break;
9077
9078 default:
9079 as_bad_where (fixP->fx_file, fixP->fx_line,
9080 _("unexpected %s fixup"),
9081 bfd_get_reloc_code_name (fixP->fx_r_type));
9082 break;
9083 }
9084
9085 apply_fix_return:
9086 /* Free the allocated the struct aarch64_inst.
9087 N.B. currently there are very limited number of fix-up types actually use
9088 this field, so the impact on the performance should be minimal . */
9089 free (fixP->tc_fix_data.inst);
9090
9091 return;
9092 }
9093
9094 /* Translate internal representation of relocation info to BFD target
9095 format. */
9096
9097 arelent *
9098 tc_gen_reloc (asection * section, fixS * fixp)
9099 {
9100 arelent *reloc;
9101 bfd_reloc_code_real_type code;
9102
9103 reloc = XNEW (arelent);
9104
9105 reloc->sym_ptr_ptr = XNEW (asymbol *);
9106 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9107 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9108
9109 if (fixp->fx_pcrel)
9110 {
9111 if (section->use_rela_p)
9112 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9113 else
9114 fixp->fx_offset = reloc->address;
9115 }
9116 reloc->addend = fixp->fx_offset;
9117
9118 code = fixp->fx_r_type;
9119 switch (code)
9120 {
9121 case BFD_RELOC_16:
9122 if (fixp->fx_pcrel)
9123 code = BFD_RELOC_16_PCREL;
9124 break;
9125
9126 case BFD_RELOC_32:
9127 if (fixp->fx_pcrel)
9128 code = BFD_RELOC_32_PCREL;
9129 break;
9130
9131 case BFD_RELOC_64:
9132 if (fixp->fx_pcrel)
9133 code = BFD_RELOC_64_PCREL;
9134 break;
9135
9136 default:
9137 break;
9138 }
9139
9140 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9141 if (reloc->howto == NULL)
9142 {
9143 as_bad_where (fixp->fx_file, fixp->fx_line,
9144 _
9145 ("cannot represent %s relocation in this object file format"),
9146 bfd_get_reloc_code_name (code));
9147 return NULL;
9148 }
9149
9150 return reloc;
9151 }
9152
9153 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9154
9155 void
9156 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9157 {
9158 bfd_reloc_code_real_type type;
9159 int pcrel = 0;
9160
9161 #ifdef TE_PE
9162 if (exp->X_op == O_secrel)
9163 {
9164 exp->X_op = O_symbol;
9165 type = BFD_RELOC_32_SECREL;
9166 }
9167 else if (exp->X_op == O_secidx)
9168 {
9169 exp->X_op = O_symbol;
9170 type = BFD_RELOC_16_SECIDX;
9171 }
9172 else
9173 {
9174 #endif
9175 /* Pick a reloc.
9176 FIXME: @@ Should look at CPU word size. */
9177 switch (size)
9178 {
9179 case 1:
9180 type = BFD_RELOC_8;
9181 break;
9182 case 2:
9183 type = BFD_RELOC_16;
9184 break;
9185 case 4:
9186 type = BFD_RELOC_32;
9187 break;
9188 case 8:
9189 type = BFD_RELOC_64;
9190 break;
9191 default:
9192 as_bad (_("cannot do %u-byte relocation"), size);
9193 type = BFD_RELOC_UNUSED;
9194 break;
9195 }
9196 #ifdef TE_PE
9197 }
9198 #endif
9199
9200 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9201 }
9202
9203 /* Implement md_after_parse_args. This is the earliest time we need to decide
9204 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9205
9206 void
9207 aarch64_after_parse_args (void)
9208 {
9209 if (aarch64_abi != AARCH64_ABI_NONE)
9210 return;
9211
9212 #ifdef OBJ_ELF
9213 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9214 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9215 aarch64_abi = AARCH64_ABI_ILP32;
9216 else
9217 aarch64_abi = AARCH64_ABI_LP64;
9218 #else
9219 aarch64_abi = AARCH64_ABI_LLP64;
9220 #endif
9221 }
9222
9223 #ifdef OBJ_ELF
9224 const char *
9225 elf64_aarch64_target_format (void)
9226 {
9227 #ifdef TE_CLOUDABI
9228 /* FIXME: What to do for ilp32_p ? */
9229 if (target_big_endian)
9230 return "elf64-bigaarch64-cloudabi";
9231 else
9232 return "elf64-littleaarch64-cloudabi";
9233 #else
9234 if (target_big_endian)
9235 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9236 else
9237 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9238 #endif
9239 }
9240
9241 void
9242 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9243 {
9244 elf_frob_symbol (symp, puntp);
9245 }
9246 #elif defined OBJ_COFF
9247 const char *
9248 coff_aarch64_target_format (void)
9249 {
9250 return "pe-aarch64-little";
9251 }
9252 #endif
9253
9254 /* MD interface: Finalization. */
9255
9256 /* A good place to do this, although this was probably not intended
9257 for this kind of use. We need to dump the literal pool before
9258 references are made to a null symbol pointer. */
9259
9260 void
9261 aarch64_cleanup (void)
9262 {
9263 literal_pool *pool;
9264
9265 for (pool = list_of_pools; pool; pool = pool->next)
9266 {
9267 /* Put it at the end of the relevant section. */
9268 subseg_set (pool->section, pool->sub_section);
9269 s_ltorg (0);
9270 }
9271 }
9272
9273 #ifdef OBJ_ELF
9274 /* Remove any excess mapping symbols generated for alignment frags in
9275 SEC. We may have created a mapping symbol before a zero byte
9276 alignment; remove it if there's a mapping symbol after the
9277 alignment. */
9278 static void
9279 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9280 void *dummy ATTRIBUTE_UNUSED)
9281 {
9282 segment_info_type *seginfo = seg_info (sec);
9283 fragS *fragp;
9284
9285 if (seginfo == NULL || seginfo->frchainP == NULL)
9286 return;
9287
9288 for (fragp = seginfo->frchainP->frch_root;
9289 fragp != NULL; fragp = fragp->fr_next)
9290 {
9291 symbolS *sym = fragp->tc_frag_data.last_map;
9292 fragS *next = fragp->fr_next;
9293
9294 /* Variable-sized frags have been converted to fixed size by
9295 this point. But if this was variable-sized to start with,
9296 there will be a fixed-size frag after it. So don't handle
9297 next == NULL. */
9298 if (sym == NULL || next == NULL)
9299 continue;
9300
9301 if (S_GET_VALUE (sym) < next->fr_address)
9302 /* Not at the end of this frag. */
9303 continue;
9304 know (S_GET_VALUE (sym) == next->fr_address);
9305
9306 do
9307 {
9308 if (next->tc_frag_data.first_map != NULL)
9309 {
9310 /* Next frag starts with a mapping symbol. Discard this
9311 one. */
9312 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9313 break;
9314 }
9315
9316 if (next->fr_next == NULL)
9317 {
9318 /* This mapping symbol is at the end of the section. Discard
9319 it. */
9320 know (next->fr_fix == 0 && next->fr_var == 0);
9321 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9322 break;
9323 }
9324
9325 /* As long as we have empty frags without any mapping symbols,
9326 keep looking. */
9327 /* If the next frag is non-empty and does not start with a
9328 mapping symbol, then this mapping symbol is required. */
9329 if (next->fr_address != next->fr_next->fr_address)
9330 break;
9331
9332 next = next->fr_next;
9333 }
9334 while (next != NULL);
9335 }
9336 }
9337 #endif
9338
9339 /* Adjust the symbol table. */
9340
9341 void
9342 aarch64_adjust_symtab (void)
9343 {
9344 #ifdef OBJ_ELF
9345 /* Remove any overlapping mapping symbols generated by alignment frags. */
9346 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9347 /* Now do generic ELF adjustments. */
9348 elf_adjust_symtab ();
9349 #endif
9350 }
9351
9352 static void
9353 checked_hash_insert (htab_t table, const char *key, void *value)
9354 {
9355 str_hash_insert (table, key, value, 0);
9356 }
9357
9358 static void
9359 sysreg_hash_insert (htab_t table, const char *key, void *value)
9360 {
9361 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9362 checked_hash_insert (table, key, value);
9363 }
9364
9365 static void
9366 fill_instruction_hash_table (void)
9367 {
9368 const aarch64_opcode *opcode = aarch64_opcode_table;
9369
9370 while (opcode->name != NULL)
9371 {
9372 templates *templ, *new_templ;
9373 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9374
9375 new_templ = XNEW (templates);
9376 new_templ->opcode = opcode;
9377 new_templ->next = NULL;
9378
9379 if (!templ)
9380 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9381 else
9382 {
9383 new_templ->next = templ->next;
9384 templ->next = new_templ;
9385 }
9386 ++opcode;
9387 }
9388 }
9389
9390 static inline void
9391 convert_to_upper (char *dst, const char *src, size_t num)
9392 {
9393 unsigned int i;
9394 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9395 *dst = TOUPPER (*src);
9396 *dst = '\0';
9397 }
9398
9399 /* Assume STR point to a lower-case string, allocate, convert and return
9400 the corresponding upper-case string. */
9401 static inline const char*
9402 get_upper_str (const char *str)
9403 {
9404 char *ret;
9405 size_t len = strlen (str);
9406 ret = XNEWVEC (char, len + 1);
9407 convert_to_upper (ret, str, len);
9408 return ret;
9409 }
9410
9411 /* MD interface: Initialization. */
9412
9413 void
9414 md_begin (void)
9415 {
9416 unsigned mach;
9417 unsigned int i;
9418
9419 aarch64_ops_hsh = str_htab_create ();
9420 aarch64_cond_hsh = str_htab_create ();
9421 aarch64_shift_hsh = str_htab_create ();
9422 aarch64_sys_regs_hsh = str_htab_create ();
9423 aarch64_pstatefield_hsh = str_htab_create ();
9424 aarch64_sys_regs_ic_hsh = str_htab_create ();
9425 aarch64_sys_regs_dc_hsh = str_htab_create ();
9426 aarch64_sys_regs_at_hsh = str_htab_create ();
9427 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9428 aarch64_sys_regs_sr_hsh = str_htab_create ();
9429 aarch64_reg_hsh = str_htab_create ();
9430 aarch64_barrier_opt_hsh = str_htab_create ();
9431 aarch64_nzcv_hsh = str_htab_create ();
9432 aarch64_pldop_hsh = str_htab_create ();
9433 aarch64_hint_opt_hsh = str_htab_create ();
9434
9435 fill_instruction_hash_table ();
9436
9437 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9438 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9439 (void *) (aarch64_sys_regs + i));
9440
9441 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9442 sysreg_hash_insert (aarch64_pstatefield_hsh,
9443 aarch64_pstatefields[i].name,
9444 (void *) (aarch64_pstatefields + i));
9445
9446 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9447 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9448 aarch64_sys_regs_ic[i].name,
9449 (void *) (aarch64_sys_regs_ic + i));
9450
9451 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9452 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9453 aarch64_sys_regs_dc[i].name,
9454 (void *) (aarch64_sys_regs_dc + i));
9455
9456 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9457 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9458 aarch64_sys_regs_at[i].name,
9459 (void *) (aarch64_sys_regs_at + i));
9460
9461 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9462 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9463 aarch64_sys_regs_tlbi[i].name,
9464 (void *) (aarch64_sys_regs_tlbi + i));
9465
9466 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9467 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9468 aarch64_sys_regs_sr[i].name,
9469 (void *) (aarch64_sys_regs_sr + i));
9470
9471 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9472 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9473 (void *) (reg_names + i));
9474
9475 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9476 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9477 (void *) (nzcv_names + i));
9478
9479 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9480 {
9481 const char *name = aarch64_operand_modifiers[i].name;
9482 checked_hash_insert (aarch64_shift_hsh, name,
9483 (void *) (aarch64_operand_modifiers + i));
9484 /* Also hash the name in the upper case. */
9485 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9486 (void *) (aarch64_operand_modifiers + i));
9487 }
9488
9489 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9490 {
9491 unsigned int j;
9492 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9493 the same condition code. */
9494 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9495 {
9496 const char *name = aarch64_conds[i].names[j];
9497 if (name == NULL)
9498 break;
9499 checked_hash_insert (aarch64_cond_hsh, name,
9500 (void *) (aarch64_conds + i));
9501 /* Also hash the name in the upper case. */
9502 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9503 (void *) (aarch64_conds + i));
9504 }
9505 }
9506
9507 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9508 {
9509 const char *name = aarch64_barrier_options[i].name;
9510 /* Skip xx00 - the unallocated values of option. */
9511 if ((i & 0x3) == 0)
9512 continue;
9513 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9514 (void *) (aarch64_barrier_options + i));
9515 /* Also hash the name in the upper case. */
9516 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9517 (void *) (aarch64_barrier_options + i));
9518 }
9519
9520 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9521 {
9522 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9523 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9524 (void *) (aarch64_barrier_dsb_nxs_options + i));
9525 /* Also hash the name in the upper case. */
9526 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9527 (void *) (aarch64_barrier_dsb_nxs_options + i));
9528 }
9529
9530 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9531 {
9532 const char* name = aarch64_prfops[i].name;
9533 /* Skip the unallocated hint encodings. */
9534 if (name == NULL)
9535 continue;
9536 checked_hash_insert (aarch64_pldop_hsh, name,
9537 (void *) (aarch64_prfops + i));
9538 /* Also hash the name in the upper case. */
9539 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9540 (void *) (aarch64_prfops + i));
9541 }
9542
9543 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9544 {
9545 const char* name = aarch64_hint_options[i].name;
9546 const char* upper_name = get_upper_str(name);
9547
9548 checked_hash_insert (aarch64_hint_opt_hsh, name,
9549 (void *) (aarch64_hint_options + i));
9550
9551 /* Also hash the name in the upper case if not the same. */
9552 if (strcmp (name, upper_name) != 0)
9553 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9554 (void *) (aarch64_hint_options + i));
9555 }
9556
9557 /* Set the cpu variant based on the command-line options. */
9558 if (!mcpu_cpu_opt)
9559 mcpu_cpu_opt = march_cpu_opt;
9560
9561 if (!mcpu_cpu_opt)
9562 mcpu_cpu_opt = &cpu_default;
9563
9564 cpu_variant = *mcpu_cpu_opt;
9565
9566 /* Record the CPU type. */
9567 if(ilp32_p)
9568 mach = bfd_mach_aarch64_ilp32;
9569 else if (llp64_p)
9570 mach = bfd_mach_aarch64_llp64;
9571 else
9572 mach = bfd_mach_aarch64;
9573
9574 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9575 #ifdef OBJ_ELF
9576 /* FIXME - is there a better way to do it ? */
9577 aarch64_sframe_cfa_sp_reg = 31;
9578 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
9579 aarch64_sframe_cfa_ra_reg = 30;
9580 #endif
9581 }
9582
9583 /* Command line processing. */
9584
9585 const char *md_shortopts = "m:";
9586
9587 #ifdef AARCH64_BI_ENDIAN
9588 #define OPTION_EB (OPTION_MD_BASE + 0)
9589 #define OPTION_EL (OPTION_MD_BASE + 1)
9590 #else
9591 #if TARGET_BYTES_BIG_ENDIAN
9592 #define OPTION_EB (OPTION_MD_BASE + 0)
9593 #else
9594 #define OPTION_EL (OPTION_MD_BASE + 1)
9595 #endif
9596 #endif
9597
9598 struct option md_longopts[] = {
9599 #ifdef OPTION_EB
9600 {"EB", no_argument, NULL, OPTION_EB},
9601 #endif
9602 #ifdef OPTION_EL
9603 {"EL", no_argument, NULL, OPTION_EL},
9604 #endif
9605 {NULL, no_argument, NULL, 0}
9606 };
9607
9608 size_t md_longopts_size = sizeof (md_longopts);
9609
9610 struct aarch64_option_table
9611 {
9612 const char *option; /* Option name to match. */
9613 const char *help; /* Help information. */
9614 int *var; /* Variable to change. */
9615 int value; /* What to change it to. */
9616 char *deprecated; /* If non-null, print this message. */
9617 };
9618
9619 static struct aarch64_option_table aarch64_opts[] = {
9620 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9621 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9622 NULL},
9623 #ifdef DEBUG_AARCH64
9624 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9625 #endif /* DEBUG_AARCH64 */
9626 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9627 NULL},
9628 {"mno-verbose-error", N_("do not output verbose error messages"),
9629 &verbose_error_p, 0, NULL},
9630 {NULL, NULL, NULL, 0, NULL}
9631 };
9632
9633 struct aarch64_cpu_option_table
9634 {
9635 const char *name;
9636 const aarch64_feature_set value;
9637 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9638 case. */
9639 const char *canonical_name;
9640 };
9641
9642 /* This list should, at a minimum, contain all the cpu names
9643 recognized by GCC. */
9644 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9645 {"all", AARCH64_ANY, NULL},
9646 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9647 AARCH64_FEATURE_CRC), "Cortex-A34"},
9648 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9649 AARCH64_FEATURE_CRC), "Cortex-A35"},
9650 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9651 AARCH64_FEATURE_CRC), "Cortex-A53"},
9652 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9653 AARCH64_FEATURE_CRC), "Cortex-A57"},
9654 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9655 AARCH64_FEATURE_CRC), "Cortex-A72"},
9656 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9657 AARCH64_FEATURE_CRC), "Cortex-A73"},
9658 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9659 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9660 "Cortex-A55"},
9661 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9662 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9663 "Cortex-A75"},
9664 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9665 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9666 "Cortex-A76"},
9667 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9668 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9669 | AARCH64_FEATURE_DOTPROD
9670 | AARCH64_FEATURE_SSBS),
9671 "Cortex-A76AE"},
9672 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9673 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9674 | AARCH64_FEATURE_DOTPROD
9675 | AARCH64_FEATURE_SSBS),
9676 "Cortex-A77"},
9677 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9678 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9679 | AARCH64_FEATURE_DOTPROD
9680 | AARCH64_FEATURE_SSBS),
9681 "Cortex-A65"},
9682 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9683 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9684 | AARCH64_FEATURE_DOTPROD
9685 | AARCH64_FEATURE_SSBS),
9686 "Cortex-A65AE"},
9687 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9688 AARCH64_FEATURE_F16
9689 | AARCH64_FEATURE_RCPC
9690 | AARCH64_FEATURE_DOTPROD
9691 | AARCH64_FEATURE_SSBS
9692 | AARCH64_FEATURE_PROFILE),
9693 "Cortex-A78"},
9694 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9695 AARCH64_FEATURE_F16
9696 | AARCH64_FEATURE_RCPC
9697 | AARCH64_FEATURE_DOTPROD
9698 | AARCH64_FEATURE_SSBS
9699 | AARCH64_FEATURE_PROFILE),
9700 "Cortex-A78AE"},
9701 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9702 AARCH64_FEATURE_DOTPROD
9703 | AARCH64_FEATURE_F16
9704 | AARCH64_FEATURE_FLAGM
9705 | AARCH64_FEATURE_PAC
9706 | AARCH64_FEATURE_PROFILE
9707 | AARCH64_FEATURE_RCPC
9708 | AARCH64_FEATURE_SSBS),
9709 "Cortex-A78C"},
9710 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9711 AARCH64_FEATURE_BFLOAT16
9712 | AARCH64_FEATURE_I8MM
9713 | AARCH64_FEATURE_MEMTAG
9714 | AARCH64_FEATURE_SVE2_BITPERM),
9715 "Cortex-A510"},
9716 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9717 AARCH64_FEATURE_BFLOAT16
9718 | AARCH64_FEATURE_I8MM
9719 | AARCH64_FEATURE_MEMTAG
9720 | AARCH64_FEATURE_SVE2_BITPERM),
9721 "Cortex-A710"},
9722 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9723 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9724 | AARCH64_FEATURE_DOTPROD
9725 | AARCH64_FEATURE_PROFILE),
9726 "Ares"},
9727 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9728 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9729 "Samsung Exynos M1"},
9730 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9731 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9732 | AARCH64_FEATURE_RDMA),
9733 "Qualcomm Falkor"},
9734 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9735 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9736 | AARCH64_FEATURE_DOTPROD
9737 | AARCH64_FEATURE_SSBS),
9738 "Neoverse E1"},
9739 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9740 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9741 | AARCH64_FEATURE_DOTPROD
9742 | AARCH64_FEATURE_PROFILE),
9743 "Neoverse N1"},
9744 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9745 AARCH64_FEATURE_BFLOAT16
9746 | AARCH64_FEATURE_I8MM
9747 | AARCH64_FEATURE_F16
9748 | AARCH64_FEATURE_SVE
9749 | AARCH64_FEATURE_SVE2
9750 | AARCH64_FEATURE_SVE2_BITPERM
9751 | AARCH64_FEATURE_MEMTAG
9752 | AARCH64_FEATURE_RNG),
9753 "Neoverse N2"},
9754 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9755 AARCH64_FEATURE_PROFILE
9756 | AARCH64_FEATURE_CVADP
9757 | AARCH64_FEATURE_SVE
9758 | AARCH64_FEATURE_SSBS
9759 | AARCH64_FEATURE_RNG
9760 | AARCH64_FEATURE_F16
9761 | AARCH64_FEATURE_BFLOAT16
9762 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9763 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9764 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9765 | AARCH64_FEATURE_RDMA),
9766 "Qualcomm QDF24XX"},
9767 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9768 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9769 "Qualcomm Saphira"},
9770 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9771 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9772 "Cavium ThunderX"},
9773 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9774 AARCH64_FEATURE_CRYPTO),
9775 "Broadcom Vulcan"},
9776 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9777 in earlier releases and is superseded by 'xgene1' in all
9778 tools. */
9779 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9780 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9781 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9782 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9783 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9784 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9785 AARCH64_FEATURE_F16
9786 | AARCH64_FEATURE_RCPC
9787 | AARCH64_FEATURE_DOTPROD
9788 | AARCH64_FEATURE_SSBS
9789 | AARCH64_FEATURE_PROFILE),
9790 "Cortex-X1"},
9791 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9792 AARCH64_FEATURE_BFLOAT16
9793 | AARCH64_FEATURE_I8MM
9794 | AARCH64_FEATURE_MEMTAG
9795 | AARCH64_FEATURE_SVE2_BITPERM),
9796 "Cortex-X2"},
9797 {"generic", AARCH64_ARCH_V8, NULL},
9798
9799 {NULL, AARCH64_ARCH_NONE, NULL}
9800 };
9801
9802 struct aarch64_arch_option_table
9803 {
9804 const char *name;
9805 const aarch64_feature_set value;
9806 };
9807
9808 /* This list should, at a minimum, contain all the architecture names
9809 recognized by GCC. */
9810 static const struct aarch64_arch_option_table aarch64_archs[] = {
9811 {"all", AARCH64_ANY},
9812 {"armv8-a", AARCH64_ARCH_V8},
9813 {"armv8.1-a", AARCH64_ARCH_V8_1},
9814 {"armv8.2-a", AARCH64_ARCH_V8_2},
9815 {"armv8.3-a", AARCH64_ARCH_V8_3},
9816 {"armv8.4-a", AARCH64_ARCH_V8_4},
9817 {"armv8.5-a", AARCH64_ARCH_V8_5},
9818 {"armv8.6-a", AARCH64_ARCH_V8_6},
9819 {"armv8.7-a", AARCH64_ARCH_V8_7},
9820 {"armv8.8-a", AARCH64_ARCH_V8_8},
9821 {"armv8-r", AARCH64_ARCH_V8_R},
9822 {"armv9-a", AARCH64_ARCH_V9},
9823 {"armv9.1-a", AARCH64_ARCH_V9_1},
9824 {"armv9.2-a", AARCH64_ARCH_V9_2},
9825 {"armv9.3-a", AARCH64_ARCH_V9_3},
9826 {NULL, AARCH64_ARCH_NONE}
9827 };
9828
9829 /* ISA extensions. */
9830 struct aarch64_option_cpu_value_table
9831 {
9832 const char *name;
9833 const aarch64_feature_set value;
9834 const aarch64_feature_set require; /* Feature dependencies. */
9835 };
9836
9837 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9838 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9839 AARCH64_ARCH_NONE},
9840 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9841 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9842 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9843 AARCH64_ARCH_NONE},
9844 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9845 AARCH64_ARCH_NONE},
9846 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9847 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9848 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9849 AARCH64_ARCH_NONE},
9850 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9851 AARCH64_ARCH_NONE},
9852 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9853 AARCH64_ARCH_NONE},
9854 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9855 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9856 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9857 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9858 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9859 AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
9860 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9861 AARCH64_ARCH_NONE},
9862 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9863 AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
9864 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9865 AARCH64_ARCH_NONE},
9866 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9867 AARCH64_FEATURE (AARCH64_FEATURE_F16
9868 | AARCH64_FEATURE_SIMD, 0)},
9869 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9870 AARCH64_ARCH_NONE},
9871 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9872 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9873 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9874 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9875 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9876 AARCH64_ARCH_NONE},
9877 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9878 AARCH64_ARCH_NONE},
9879 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9880 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9881 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9882 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9883 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9884 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9885 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9886 AARCH64_ARCH_NONE},
9887 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9888 AARCH64_ARCH_NONE},
9889 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9890 AARCH64_ARCH_NONE},
9891 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9892 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9893 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9894 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9895 | AARCH64_FEATURE_SM4, 0)},
9896 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9897 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9898 | AARCH64_FEATURE_AES, 0)},
9899 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9900 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9901 | AARCH64_FEATURE_SHA3, 0)},
9902 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9903 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9904 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9905 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9906 | AARCH64_FEATURE_BFLOAT16, 0)},
9907 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
9908 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
9909 {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
9910 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
9911 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
9912 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
9913 {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
9914 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
9915 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9916 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9917 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9918 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9919 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9920 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9921 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9922 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9923 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9924 AARCH64_ARCH_NONE},
9925 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9926 AARCH64_ARCH_NONE},
9927 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9928 AARCH64_ARCH_NONE},
9929 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
9930 AARCH64_ARCH_NONE},
9931 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
9932 AARCH64_ARCH_NONE},
9933 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
9934 AARCH64_ARCH_NONE},
9935 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9936 };
9937
9938 struct aarch64_long_option_table
9939 {
9940 const char *option; /* Substring to match. */
9941 const char *help; /* Help information. */
9942 int (*func) (const char *subopt); /* Function to decode sub-option. */
9943 char *deprecated; /* If non-null, print this message. */
9944 };
9945
9946 /* Transitive closure of features depending on set. */
9947 static aarch64_feature_set
9948 aarch64_feature_disable_set (aarch64_feature_set set)
9949 {
9950 const struct aarch64_option_cpu_value_table *opt;
9951 aarch64_feature_set prev = 0;
9952
9953 while (prev != set) {
9954 prev = set;
9955 for (opt = aarch64_features; opt->name != NULL; opt++)
9956 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9957 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9958 }
9959 return set;
9960 }
9961
9962 /* Transitive closure of dependencies of set. */
9963 static aarch64_feature_set
9964 aarch64_feature_enable_set (aarch64_feature_set set)
9965 {
9966 const struct aarch64_option_cpu_value_table *opt;
9967 aarch64_feature_set prev = 0;
9968
9969 while (prev != set) {
9970 prev = set;
9971 for (opt = aarch64_features; opt->name != NULL; opt++)
9972 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9973 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9974 }
9975 return set;
9976 }
9977
9978 static int
9979 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9980 bool ext_only)
9981 {
9982 /* We insist on extensions being added before being removed. We achieve
9983 this by using the ADDING_VALUE variable to indicate whether we are
9984 adding an extension (1) or removing it (0) and only allowing it to
9985 change in the order -1 -> 1 -> 0. */
9986 int adding_value = -1;
9987 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9988
9989 /* Copy the feature set, so that we can modify it. */
9990 *ext_set = **opt_p;
9991 *opt_p = ext_set;
9992
9993 while (str != NULL && *str != 0)
9994 {
9995 const struct aarch64_option_cpu_value_table *opt;
9996 const char *ext = NULL;
9997 int optlen;
9998
9999 if (!ext_only)
10000 {
10001 if (*str != '+')
10002 {
10003 as_bad (_("invalid architectural extension"));
10004 return 0;
10005 }
10006
10007 ext = strchr (++str, '+');
10008 }
10009
10010 if (ext != NULL)
10011 optlen = ext - str;
10012 else
10013 optlen = strlen (str);
10014
10015 if (optlen >= 2 && startswith (str, "no"))
10016 {
10017 if (adding_value != 0)
10018 adding_value = 0;
10019 optlen -= 2;
10020 str += 2;
10021 }
10022 else if (optlen > 0)
10023 {
10024 if (adding_value == -1)
10025 adding_value = 1;
10026 else if (adding_value != 1)
10027 {
10028 as_bad (_("must specify extensions to add before specifying "
10029 "those to remove"));
10030 return false;
10031 }
10032 }
10033
10034 if (optlen == 0)
10035 {
10036 as_bad (_("missing architectural extension"));
10037 return 0;
10038 }
10039
10040 gas_assert (adding_value != -1);
10041
10042 for (opt = aarch64_features; opt->name != NULL; opt++)
10043 if (strncmp (opt->name, str, optlen) == 0)
10044 {
10045 aarch64_feature_set set;
10046
10047 /* Add or remove the extension. */
10048 if (adding_value)
10049 {
10050 set = aarch64_feature_enable_set (opt->value);
10051 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10052 }
10053 else
10054 {
10055 set = aarch64_feature_disable_set (opt->value);
10056 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10057 }
10058 break;
10059 }
10060
10061 if (opt->name == NULL)
10062 {
10063 as_bad (_("unknown architectural extension `%s'"), str);
10064 return 0;
10065 }
10066
10067 str = ext;
10068 };
10069
10070 return 1;
10071 }
10072
10073 static int
10074 aarch64_parse_cpu (const char *str)
10075 {
10076 const struct aarch64_cpu_option_table *opt;
10077 const char *ext = strchr (str, '+');
10078 size_t optlen;
10079
10080 if (ext != NULL)
10081 optlen = ext - str;
10082 else
10083 optlen = strlen (str);
10084
10085 if (optlen == 0)
10086 {
10087 as_bad (_("missing cpu name `%s'"), str);
10088 return 0;
10089 }
10090
10091 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10092 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10093 {
10094 mcpu_cpu_opt = &opt->value;
10095 if (ext != NULL)
10096 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10097
10098 return 1;
10099 }
10100
10101 as_bad (_("unknown cpu `%s'"), str);
10102 return 0;
10103 }
10104
10105 static int
10106 aarch64_parse_arch (const char *str)
10107 {
10108 const struct aarch64_arch_option_table *opt;
10109 const char *ext = strchr (str, '+');
10110 size_t optlen;
10111
10112 if (ext != NULL)
10113 optlen = ext - str;
10114 else
10115 optlen = strlen (str);
10116
10117 if (optlen == 0)
10118 {
10119 as_bad (_("missing architecture name `%s'"), str);
10120 return 0;
10121 }
10122
10123 for (opt = aarch64_archs; opt->name != NULL; opt++)
10124 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10125 {
10126 march_cpu_opt = &opt->value;
10127 if (ext != NULL)
10128 return aarch64_parse_features (ext, &march_cpu_opt, false);
10129
10130 return 1;
10131 }
10132
10133 as_bad (_("unknown architecture `%s'\n"), str);
10134 return 0;
10135 }
10136
10137 /* ABIs. */
10138 struct aarch64_option_abi_value_table
10139 {
10140 const char *name;
10141 enum aarch64_abi_type value;
10142 };
10143
10144 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10145 #ifdef OBJ_ELF
10146 {"ilp32", AARCH64_ABI_ILP32},
10147 {"lp64", AARCH64_ABI_LP64},
10148 #else
10149 {"llp64", AARCH64_ABI_LLP64},
10150 #endif
10151 };
10152
10153 static int
10154 aarch64_parse_abi (const char *str)
10155 {
10156 unsigned int i;
10157
10158 if (str[0] == '\0')
10159 {
10160 as_bad (_("missing abi name `%s'"), str);
10161 return 0;
10162 }
10163
10164 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10165 if (strcmp (str, aarch64_abis[i].name) == 0)
10166 {
10167 aarch64_abi = aarch64_abis[i].value;
10168 return 1;
10169 }
10170
10171 as_bad (_("unknown abi `%s'\n"), str);
10172 return 0;
10173 }
10174
10175 static struct aarch64_long_option_table aarch64_long_opts[] = {
10176 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10177 aarch64_parse_abi, NULL},
10178 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10179 aarch64_parse_cpu, NULL},
10180 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10181 aarch64_parse_arch, NULL},
10182 {NULL, NULL, 0, NULL}
10183 };
10184
10185 int
10186 md_parse_option (int c, const char *arg)
10187 {
10188 struct aarch64_option_table *opt;
10189 struct aarch64_long_option_table *lopt;
10190
10191 switch (c)
10192 {
10193 #ifdef OPTION_EB
10194 case OPTION_EB:
10195 target_big_endian = 1;
10196 break;
10197 #endif
10198
10199 #ifdef OPTION_EL
10200 case OPTION_EL:
10201 target_big_endian = 0;
10202 break;
10203 #endif
10204
10205 case 'a':
10206 /* Listing option. Just ignore these, we don't support additional
10207 ones. */
10208 return 0;
10209
10210 default:
10211 for (opt = aarch64_opts; opt->option != NULL; opt++)
10212 {
10213 if (c == opt->option[0]
10214 && ((arg == NULL && opt->option[1] == 0)
10215 || streq (arg, opt->option + 1)))
10216 {
10217 /* If the option is deprecated, tell the user. */
10218 if (opt->deprecated != NULL)
10219 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10220 arg ? arg : "", _(opt->deprecated));
10221
10222 if (opt->var != NULL)
10223 *opt->var = opt->value;
10224
10225 return 1;
10226 }
10227 }
10228
10229 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10230 {
10231 /* These options are expected to have an argument. */
10232 if (c == lopt->option[0]
10233 && arg != NULL
10234 && startswith (arg, lopt->option + 1))
10235 {
10236 /* If the option is deprecated, tell the user. */
10237 if (lopt->deprecated != NULL)
10238 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10239 _(lopt->deprecated));
10240
10241 /* Call the sup-option parser. */
10242 return lopt->func (arg + strlen (lopt->option) - 1);
10243 }
10244 }
10245
10246 return 0;
10247 }
10248
10249 return 1;
10250 }
10251
10252 void
10253 md_show_usage (FILE * fp)
10254 {
10255 struct aarch64_option_table *opt;
10256 struct aarch64_long_option_table *lopt;
10257
10258 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10259
10260 for (opt = aarch64_opts; opt->option != NULL; opt++)
10261 if (opt->help != NULL)
10262 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10263
10264 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10265 if (lopt->help != NULL)
10266 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10267
10268 #ifdef OPTION_EB
10269 fprintf (fp, _("\
10270 -EB assemble code for a big-endian cpu\n"));
10271 #endif
10272
10273 #ifdef OPTION_EL
10274 fprintf (fp, _("\
10275 -EL assemble code for a little-endian cpu\n"));
10276 #endif
10277 }
10278
10279 /* Parse a .cpu directive. */
10280
10281 static void
10282 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10283 {
10284 const struct aarch64_cpu_option_table *opt;
10285 char saved_char;
10286 char *name;
10287 char *ext;
10288 size_t optlen;
10289
10290 name = input_line_pointer;
10291 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10292 saved_char = *input_line_pointer;
10293 *input_line_pointer = 0;
10294
10295 ext = strchr (name, '+');
10296
10297 if (ext != NULL)
10298 optlen = ext - name;
10299 else
10300 optlen = strlen (name);
10301
10302 /* Skip the first "all" entry. */
10303 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10304 if (strlen (opt->name) == optlen
10305 && strncmp (name, opt->name, optlen) == 0)
10306 {
10307 mcpu_cpu_opt = &opt->value;
10308 if (ext != NULL)
10309 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10310 return;
10311
10312 cpu_variant = *mcpu_cpu_opt;
10313
10314 *input_line_pointer = saved_char;
10315 demand_empty_rest_of_line ();
10316 return;
10317 }
10318 as_bad (_("unknown cpu `%s'"), name);
10319 *input_line_pointer = saved_char;
10320 ignore_rest_of_line ();
10321 }
10322
10323
10324 /* Parse a .arch directive. */
10325
10326 static void
10327 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10328 {
10329 const struct aarch64_arch_option_table *opt;
10330 char saved_char;
10331 char *name;
10332 char *ext;
10333 size_t optlen;
10334
10335 name = input_line_pointer;
10336 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10337 saved_char = *input_line_pointer;
10338 *input_line_pointer = 0;
10339
10340 ext = strchr (name, '+');
10341
10342 if (ext != NULL)
10343 optlen = ext - name;
10344 else
10345 optlen = strlen (name);
10346
10347 /* Skip the first "all" entry. */
10348 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10349 if (strlen (opt->name) == optlen
10350 && strncmp (name, opt->name, optlen) == 0)
10351 {
10352 mcpu_cpu_opt = &opt->value;
10353 if (ext != NULL)
10354 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10355 return;
10356
10357 cpu_variant = *mcpu_cpu_opt;
10358
10359 *input_line_pointer = saved_char;
10360 demand_empty_rest_of_line ();
10361 return;
10362 }
10363
10364 as_bad (_("unknown architecture `%s'\n"), name);
10365 *input_line_pointer = saved_char;
10366 ignore_rest_of_line ();
10367 }
10368
10369 /* Parse a .arch_extension directive. */
10370
10371 static void
10372 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10373 {
10374 char saved_char;
10375 char *ext = input_line_pointer;
10376
10377 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10378 saved_char = *input_line_pointer;
10379 *input_line_pointer = 0;
10380
10381 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10382 return;
10383
10384 cpu_variant = *mcpu_cpu_opt;
10385
10386 *input_line_pointer = saved_char;
10387 demand_empty_rest_of_line ();
10388 }
10389
10390 /* Copy symbol information. */
10391
10392 void
10393 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10394 {
10395 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10396 }
10397
10398 #ifdef OBJ_ELF
10399 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10400 This is needed so AArch64 specific st_other values can be independently
10401 specified for an IFUNC resolver (that is called by the dynamic linker)
10402 and the symbol it resolves (aliased to the resolver). In particular,
10403 if a function symbol has special st_other value set via directives,
10404 then attaching an IFUNC resolver to that symbol should not override
10405 the st_other setting. Requiring the directive on the IFUNC resolver
10406 symbol would be unexpected and problematic in C code, where the two
10407 symbols appear as two independent function declarations. */
10408
10409 void
10410 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10411 {
10412 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10413 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10414 /* If size is unset, copy size from src. Because we don't track whether
10415 .size has been used, we can't differentiate .size dest, 0 from the case
10416 where dest's size is unset. */
10417 if (!destelf->size && S_GET_SIZE (dest) == 0)
10418 {
10419 if (srcelf->size)
10420 {
10421 destelf->size = XNEW (expressionS);
10422 *destelf->size = *srcelf->size;
10423 }
10424 S_SET_SIZE (dest, S_GET_SIZE (src));
10425 }
10426 }
10427 #endif