aarch64: [SME] Add LD1x, ST1x, LDR and STR instructions
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* SME horizontal or vertical slice indicator, encoded in "V".
103 Values:
104 0 - Horizontal
105 1 - vertical
106 */
107 enum sme_hv_slice
108 {
109 HV_horizontal = 0,
110 HV_vertical = 1
111 };
112
113 /* Bits for DEFINED field in vector_type_el. */
114 #define NTA_HASTYPE 1
115 #define NTA_HASINDEX 2
116 #define NTA_HASVARWIDTH 4
117
118 struct vector_type_el
119 {
120 enum vector_el_type type;
121 unsigned char defined;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 struct
144 {
145 enum aarch64_operand_error_kind kind;
146 const char *error;
147 } parsing_error;
148 /* The condition that appears in the assembly line. */
149 int cond;
150 /* Relocation information (including the GAS internal fixup). */
151 struct reloc reloc;
152 /* Need to generate an immediate in the literal pool. */
153 unsigned gen_lit_pool : 1;
154 };
155
156 typedef struct aarch64_instruction aarch64_instruction;
157
158 static aarch64_instruction inst;
159
160 static bool parse_operands (char *, const aarch64_opcode *);
161 static bool programmer_friendly_fixup (aarch64_instruction *);
162
163 #ifdef OBJ_ELF
164 # define now_instr_sequence seg_info \
165 (now_seg)->tc_segment_info_data.insn_sequence
166 #else
167 static struct aarch64_instr_sequence now_instr_sequence;
168 #endif
169
170 /* Diagnostics inline function utilities.
171
172 These are lightweight utilities which should only be called by parse_operands
173 and other parsers. GAS processes each assembly line by parsing it against
174 instruction template(s), in the case of multiple templates (for the same
175 mnemonic name), those templates are tried one by one until one succeeds or
176 all fail. An assembly line may fail a few templates before being
177 successfully parsed; an error saved here in most cases is not a user error
178 but an error indicating the current template is not the right template.
179 Therefore it is very important that errors can be saved at a low cost during
180 the parsing; we don't want to slow down the whole parsing by recording
181 non-user errors in detail.
182
183 Remember that the objective is to help GAS pick up the most appropriate
184 error message in the case of multiple templates, e.g. FMOV which has 8
185 templates. */
186
187 static inline void
188 clear_error (void)
189 {
190 inst.parsing_error.kind = AARCH64_OPDE_NIL;
191 inst.parsing_error.error = NULL;
192 }
193
194 static inline bool
195 error_p (void)
196 {
197 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
198 }
199
200 static inline const char *
201 get_error_message (void)
202 {
203 return inst.parsing_error.error;
204 }
205
206 static inline enum aarch64_operand_error_kind
207 get_error_kind (void)
208 {
209 return inst.parsing_error.kind;
210 }
211
212 static inline void
213 set_error (enum aarch64_operand_error_kind kind, const char *error)
214 {
215 inst.parsing_error.kind = kind;
216 inst.parsing_error.error = error;
217 }
218
219 static inline void
220 set_recoverable_error (const char *error)
221 {
222 set_error (AARCH64_OPDE_RECOVERABLE, error);
223 }
224
225 /* Use the DESC field of the corresponding aarch64_operand entry to compose
226 the error message. */
227 static inline void
228 set_default_error (void)
229 {
230 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
231 }
232
233 static inline void
234 set_syntax_error (const char *error)
235 {
236 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238
239 static inline void
240 set_first_syntax_error (const char *error)
241 {
242 if (! error_p ())
243 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
244 }
245
246 static inline void
247 set_fatal_syntax_error (const char *error)
248 {
249 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
250 }
251 \f
252 /* Return value for certain parsers when the parsing fails; those parsers
253 return the information of the parsed result, e.g. register number, on
254 success. */
255 #define PARSE_FAIL -1
256
257 /* This is an invalid condition code that means no conditional field is
258 present. */
259 #define COND_ALWAYS 0x10
260
261 typedef struct
262 {
263 const char *template;
264 uint32_t value;
265 } asm_nzcv;
266
267 struct reloc_entry
268 {
269 char *name;
270 bfd_reloc_code_real_type reloc;
271 };
272
273 /* Macros to define the register types and masks for the purpose
274 of parsing. */
275
276 #undef AARCH64_REG_TYPES
277 #define AARCH64_REG_TYPES \
278 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
279 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
280 BASIC_REG_TYPE(SP_32) /* wsp */ \
281 BASIC_REG_TYPE(SP_64) /* sp */ \
282 BASIC_REG_TYPE(Z_32) /* wzr */ \
283 BASIC_REG_TYPE(Z_64) /* xzr */ \
284 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
285 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
286 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
287 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
288 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
289 BASIC_REG_TYPE(VN) /* v[0-31] */ \
290 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
291 BASIC_REG_TYPE(PN) /* p[0-15] */ \
292 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
293 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
294 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
295 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
296 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
297 /* Typecheck: same, plus SVE registers. */ \
298 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
299 | REG_TYPE(ZN)) \
300 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
301 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
303 /* Typecheck: same, plus SVE registers. */ \
304 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
306 | REG_TYPE(ZN)) \
307 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
308 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
309 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
310 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
311 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
314 /* Typecheck: any [BHSDQ]P FP. */ \
315 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
318 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
320 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
321 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
322 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
323 be used for SVE instructions, since Zn and Pn are valid symbols \
324 in other contexts. */ \
325 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
326 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
327 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
328 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
329 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
330 | REG_TYPE(ZN) | REG_TYPE(PN)) \
331 /* Any integer register; used for error messages only. */ \
332 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
333 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
334 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
335 /* Pseudo type to mark the end of the enumerator sequence. */ \
336 BASIC_REG_TYPE(MAX)
337
338 #undef BASIC_REG_TYPE
339 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
342
343 /* Register type enumerators. */
344 typedef enum aarch64_reg_type_
345 {
346 /* A list of REG_TYPE_*. */
347 AARCH64_REG_TYPES
348 } aarch64_reg_type;
349
350 #undef BASIC_REG_TYPE
351 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
352 #undef REG_TYPE
353 #define REG_TYPE(T) (1 << REG_TYPE_##T)
354 #undef MULTI_REG_TYPE
355 #define MULTI_REG_TYPE(T,V) V,
356
357 /* Structure for a hash table entry for a register. */
358 typedef struct
359 {
360 const char *name;
361 unsigned char number;
362 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
363 unsigned char builtin;
364 } reg_entry;
365
366 /* Values indexed by aarch64_reg_type to assist the type checking. */
367 static const unsigned reg_type_masks[] =
368 {
369 AARCH64_REG_TYPES
370 };
371
372 #undef BASIC_REG_TYPE
373 #undef REG_TYPE
374 #undef MULTI_REG_TYPE
375 #undef AARCH64_REG_TYPES
376
377 /* Diagnostics used when we don't get a register of the expected type.
378 Note: this has to synchronized with aarch64_reg_type definitions
379 above. */
380 static const char *
381 get_reg_expected_msg (aarch64_reg_type reg_type)
382 {
383 const char *msg;
384
385 switch (reg_type)
386 {
387 case REG_TYPE_R_32:
388 msg = N_("integer 32-bit register expected");
389 break;
390 case REG_TYPE_R_64:
391 msg = N_("integer 64-bit register expected");
392 break;
393 case REG_TYPE_R_N:
394 msg = N_("integer register expected");
395 break;
396 case REG_TYPE_R64_SP:
397 msg = N_("64-bit integer or SP register expected");
398 break;
399 case REG_TYPE_SVE_BASE:
400 msg = N_("base register expected");
401 break;
402 case REG_TYPE_R_Z:
403 msg = N_("integer or zero register expected");
404 break;
405 case REG_TYPE_SVE_OFFSET:
406 msg = N_("offset register expected");
407 break;
408 case REG_TYPE_R_SP:
409 msg = N_("integer or SP register expected");
410 break;
411 case REG_TYPE_R_Z_SP:
412 msg = N_("integer, zero or SP register expected");
413 break;
414 case REG_TYPE_FP_B:
415 msg = N_("8-bit SIMD scalar register expected");
416 break;
417 case REG_TYPE_FP_H:
418 msg = N_("16-bit SIMD scalar or floating-point half precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_S:
422 msg = N_("32-bit SIMD scalar or floating-point single precision "
423 "register expected");
424 break;
425 case REG_TYPE_FP_D:
426 msg = N_("64-bit SIMD scalar or floating-point double precision "
427 "register expected");
428 break;
429 case REG_TYPE_FP_Q:
430 msg = N_("128-bit SIMD scalar or floating-point quad precision "
431 "register expected");
432 break;
433 case REG_TYPE_R_Z_BHSDQ_V:
434 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
435 msg = N_("register expected");
436 break;
437 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
438 msg = N_("SIMD scalar or floating-point register expected");
439 break;
440 case REG_TYPE_VN: /* any V reg */
441 msg = N_("vector register expected");
442 break;
443 case REG_TYPE_ZN:
444 msg = N_("SVE vector register expected");
445 break;
446 case REG_TYPE_PN:
447 msg = N_("SVE predicate register expected");
448 break;
449 default:
450 as_fatal (_("invalid register type %d"), reg_type);
451 }
452 return msg;
453 }
454
455 /* Some well known registers that we refer to directly elsewhere. */
456 #define REG_SP 31
457 #define REG_ZR 31
458
459 /* Instructions take 4 bytes in the object file. */
460 #define INSN_SIZE 4
461
462 static htab_t aarch64_ops_hsh;
463 static htab_t aarch64_cond_hsh;
464 static htab_t aarch64_shift_hsh;
465 static htab_t aarch64_sys_regs_hsh;
466 static htab_t aarch64_pstatefield_hsh;
467 static htab_t aarch64_sys_regs_ic_hsh;
468 static htab_t aarch64_sys_regs_dc_hsh;
469 static htab_t aarch64_sys_regs_at_hsh;
470 static htab_t aarch64_sys_regs_tlbi_hsh;
471 static htab_t aarch64_sys_regs_sr_hsh;
472 static htab_t aarch64_reg_hsh;
473 static htab_t aarch64_barrier_opt_hsh;
474 static htab_t aarch64_nzcv_hsh;
475 static htab_t aarch64_pldop_hsh;
476 static htab_t aarch64_hint_opt_hsh;
477
478 /* Stuff needed to resolve the label ambiguity
479 As:
480 ...
481 label: <insn>
482 may differ from:
483 ...
484 label:
485 <insn> */
486
487 static symbolS *last_label_seen;
488
489 /* Literal pool structure. Held on a per-section
490 and per-sub-section basis. */
491
492 #define MAX_LITERAL_POOL_SIZE 1024
493 typedef struct literal_expression
494 {
495 expressionS exp;
496 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
497 LITTLENUM_TYPE * bignum;
498 } literal_expression;
499
500 typedef struct literal_pool
501 {
502 literal_expression literals[MAX_LITERAL_POOL_SIZE];
503 unsigned int next_free_entry;
504 unsigned int id;
505 symbolS *symbol;
506 segT section;
507 subsegT sub_section;
508 int size;
509 struct literal_pool *next;
510 } literal_pool;
511
512 /* Pointer to a linked list of literal pools. */
513 static literal_pool *list_of_pools = NULL;
514 \f
515 /* Pure syntax. */
516
517 /* This array holds the chars that always start a comment. If the
518 pre-processor is disabled, these aren't very useful. */
519 const char comment_chars[] = "";
520
521 /* This array holds the chars that only start a comment at the beginning of
522 a line. If the line seems to have the form '# 123 filename'
523 .line and .file directives will appear in the pre-processed output. */
524 /* Note that input_file.c hand checks for '#' at the beginning of the
525 first line of the input file. This is because the compiler outputs
526 #NO_APP at the beginning of its output. */
527 /* Also note that comments like this one will always work. */
528 const char line_comment_chars[] = "#";
529
530 const char line_separator_chars[] = ";";
531
532 /* Chars that can be used to separate mant
533 from exp in floating point numbers. */
534 const char EXP_CHARS[] = "eE";
535
536 /* Chars that mean this number is a floating point constant. */
537 /* As in 0f12.456 */
538 /* or 0d1.2345e12 */
539
540 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
541
542 /* Prefix character that indicates the start of an immediate value. */
543 #define is_immediate_prefix(C) ((C) == '#')
544
545 /* Separator character handling. */
546
547 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
548
549 static inline bool
550 skip_past_char (char **str, char c)
551 {
552 if (**str == c)
553 {
554 (*str)++;
555 return true;
556 }
557 else
558 return false;
559 }
560
561 #define skip_past_comma(str) skip_past_char (str, ',')
562
563 /* Arithmetic expressions (possibly involving symbols). */
564
565 static bool in_aarch64_get_expression = false;
566
567 /* Third argument to aarch64_get_expression. */
568 #define GE_NO_PREFIX false
569 #define GE_OPT_PREFIX true
570
571 /* Fourth argument to aarch64_get_expression. */
572 #define ALLOW_ABSENT false
573 #define REJECT_ABSENT true
574
575 /* Fifth argument to aarch64_get_expression. */
576 #define NORMAL_RESOLUTION false
577
578 /* Return TRUE if the string pointed by *STR is successfully parsed
579 as an valid expression; *EP will be filled with the information of
580 such an expression. Otherwise return FALSE.
581
582 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
583 If REJECT_ABSENT is true then trat missing expressions as an error.
584 If DEFER_RESOLUTION is true, then do not resolve expressions against
585 constant symbols. Necessary if the expression is part of a fixup
586 that uses a reloc that must be emitted. */
587
588 static bool
589 aarch64_get_expression (expressionS * ep,
590 char ** str,
591 bool allow_immediate_prefix,
592 bool reject_absent,
593 bool defer_resolution)
594 {
595 char *save_in;
596 segT seg;
597 bool prefix_present = false;
598
599 if (allow_immediate_prefix)
600 {
601 if (is_immediate_prefix (**str))
602 {
603 (*str)++;
604 prefix_present = true;
605 }
606 }
607
608 memset (ep, 0, sizeof (expressionS));
609
610 save_in = input_line_pointer;
611 input_line_pointer = *str;
612 in_aarch64_get_expression = true;
613 if (defer_resolution)
614 seg = deferred_expression (ep);
615 else
616 seg = expression (ep);
617 in_aarch64_get_expression = false;
618
619 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
620 {
621 /* We found a bad expression in md_operand(). */
622 *str = input_line_pointer;
623 input_line_pointer = save_in;
624 if (prefix_present && ! error_p ())
625 set_fatal_syntax_error (_("bad expression"));
626 else
627 set_first_syntax_error (_("bad expression"));
628 return false;
629 }
630
631 #ifdef OBJ_AOUT
632 if (seg != absolute_section
633 && seg != text_section
634 && seg != data_section
635 && seg != bss_section
636 && seg != undefined_section)
637 {
638 set_syntax_error (_("bad segment"));
639 *str = input_line_pointer;
640 input_line_pointer = save_in;
641 return false;
642 }
643 #else
644 (void) seg;
645 #endif
646
647 *str = input_line_pointer;
648 input_line_pointer = save_in;
649 return true;
650 }
651
652 /* Turn a string in input_line_pointer into a floating point constant
653 of type TYPE, and store the appropriate bytes in *LITP. The number
654 of LITTLENUMS emitted is stored in *SIZEP. An error message is
655 returned, or NULL on OK. */
656
657 const char *
658 md_atof (int type, char *litP, int *sizeP)
659 {
660 return ieee_md_atof (type, litP, sizeP, target_big_endian);
661 }
662
663 /* We handle all bad expressions here, so that we can report the faulty
664 instruction in the error message. */
665 void
666 md_operand (expressionS * exp)
667 {
668 if (in_aarch64_get_expression)
669 exp->X_op = O_illegal;
670 }
671
672 /* Immediate values. */
673
674 /* Errors may be set multiple times during parsing or bit encoding
675 (particularly in the Neon bits), but usually the earliest error which is set
676 will be the most meaningful. Avoid overwriting it with later (cascading)
677 errors by calling this function. */
678
679 static void
680 first_error (const char *error)
681 {
682 if (! error_p ())
683 set_syntax_error (error);
684 }
685
686 /* Similar to first_error, but this function accepts formatted error
687 message. */
688 static void
689 first_error_fmt (const char *format, ...)
690 {
691 va_list args;
692 enum
693 { size = 100 };
694 /* N.B. this single buffer will not cause error messages for different
695 instructions to pollute each other; this is because at the end of
696 processing of each assembly line, error message if any will be
697 collected by as_bad. */
698 static char buffer[size];
699
700 if (! error_p ())
701 {
702 int ret ATTRIBUTE_UNUSED;
703 va_start (args, format);
704 ret = vsnprintf (buffer, size, format, args);
705 know (ret <= size - 1 && ret >= 0);
706 va_end (args);
707 set_syntax_error (buffer);
708 }
709 }
710
711 /* Register parsing. */
712
713 /* Generic register parser which is called by other specialized
714 register parsers.
715 CCP points to what should be the beginning of a register name.
716 If it is indeed a valid register name, advance CCP over it and
717 return the reg_entry structure; otherwise return NULL.
718 It does not issue diagnostics. */
719
720 static reg_entry *
721 parse_reg (char **ccp)
722 {
723 char *start = *ccp;
724 char *p;
725 reg_entry *reg;
726
727 #ifdef REGISTER_PREFIX
728 if (*start != REGISTER_PREFIX)
729 return NULL;
730 start++;
731 #endif
732
733 p = start;
734 if (!ISALPHA (*p) || !is_name_beginner (*p))
735 return NULL;
736
737 do
738 p++;
739 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
740
741 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
742
743 if (!reg)
744 return NULL;
745
746 *ccp = p;
747 return reg;
748 }
749
750 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
751 return FALSE. */
752 static bool
753 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
754 {
755 return (reg_type_masks[type] & (1 << reg->type)) != 0;
756 }
757
758 /* Try to parse a base or offset register. Allow SVE base and offset
759 registers if REG_TYPE includes SVE registers. Return the register
760 entry on success, setting *QUALIFIER to the register qualifier.
761 Return null otherwise.
762
763 Note that this function does not issue any diagnostics. */
764
765 static const reg_entry *
766 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
767 aarch64_opnd_qualifier_t *qualifier)
768 {
769 char *str = *ccp;
770 const reg_entry *reg = parse_reg (&str);
771
772 if (reg == NULL)
773 return NULL;
774
775 switch (reg->type)
776 {
777 case REG_TYPE_R_32:
778 case REG_TYPE_SP_32:
779 case REG_TYPE_Z_32:
780 *qualifier = AARCH64_OPND_QLF_W;
781 break;
782
783 case REG_TYPE_R_64:
784 case REG_TYPE_SP_64:
785 case REG_TYPE_Z_64:
786 *qualifier = AARCH64_OPND_QLF_X;
787 break;
788
789 case REG_TYPE_ZN:
790 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
791 || str[0] != '.')
792 return NULL;
793 switch (TOLOWER (str[1]))
794 {
795 case 's':
796 *qualifier = AARCH64_OPND_QLF_S_S;
797 break;
798 case 'd':
799 *qualifier = AARCH64_OPND_QLF_S_D;
800 break;
801 default:
802 return NULL;
803 }
804 str += 2;
805 break;
806
807 default:
808 return NULL;
809 }
810
811 *ccp = str;
812
813 return reg;
814 }
815
816 /* Try to parse a base or offset register. Return the register entry
817 on success, setting *QUALIFIER to the register qualifier. Return null
818 otherwise.
819
820 Note that this function does not issue any diagnostics. */
821
822 static const reg_entry *
823 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
824 {
825 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
826 }
827
828 /* Parse the qualifier of a vector register or vector element of type
829 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
830 succeeds; otherwise return FALSE.
831
832 Accept only one occurrence of:
833 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
834 b h s d q */
835 static bool
836 parse_vector_type_for_operand (aarch64_reg_type reg_type,
837 struct vector_type_el *parsed_type, char **str)
838 {
839 char *ptr = *str;
840 unsigned width;
841 unsigned element_size;
842 enum vector_el_type type;
843
844 /* skip '.' */
845 gas_assert (*ptr == '.');
846 ptr++;
847
848 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
849 {
850 width = 0;
851 goto elt_size;
852 }
853 width = strtoul (ptr, &ptr, 10);
854 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
855 {
856 first_error_fmt (_("bad size %d in vector width specifier"), width);
857 return false;
858 }
859
860 elt_size:
861 switch (TOLOWER (*ptr))
862 {
863 case 'b':
864 type = NT_b;
865 element_size = 8;
866 break;
867 case 'h':
868 type = NT_h;
869 element_size = 16;
870 break;
871 case 's':
872 type = NT_s;
873 element_size = 32;
874 break;
875 case 'd':
876 type = NT_d;
877 element_size = 64;
878 break;
879 case 'q':
880 if (reg_type == REG_TYPE_ZN || width == 1)
881 {
882 type = NT_q;
883 element_size = 128;
884 break;
885 }
886 /* fall through. */
887 default:
888 if (*ptr != '\0')
889 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
890 else
891 first_error (_("missing element size"));
892 return false;
893 }
894 if (width != 0 && width * element_size != 64
895 && width * element_size != 128
896 && !(width == 2 && element_size == 16)
897 && !(width == 4 && element_size == 8))
898 {
899 first_error_fmt (_
900 ("invalid element size %d and vector size combination %c"),
901 width, *ptr);
902 return false;
903 }
904 ptr++;
905
906 parsed_type->type = type;
907 parsed_type->width = width;
908
909 *str = ptr;
910
911 return true;
912 }
913
914 /* *STR contains an SVE zero/merge predication suffix. Parse it into
915 *PARSED_TYPE and point *STR at the end of the suffix. */
916
917 static bool
918 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
919 {
920 char *ptr = *str;
921
922 /* Skip '/'. */
923 gas_assert (*ptr == '/');
924 ptr++;
925 switch (TOLOWER (*ptr))
926 {
927 case 'z':
928 parsed_type->type = NT_zero;
929 break;
930 case 'm':
931 parsed_type->type = NT_merge;
932 break;
933 default:
934 if (*ptr != '\0' && *ptr != ',')
935 first_error_fmt (_("unexpected character `%c' in predication type"),
936 *ptr);
937 else
938 first_error (_("missing predication type"));
939 return false;
940 }
941 parsed_type->width = 0;
942 *str = ptr + 1;
943 return true;
944 }
945
946 /* Parse a register of the type TYPE.
947
948 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
949 name or the parsed register is not of TYPE.
950
951 Otherwise return the register number, and optionally fill in the actual
952 type of the register in *RTYPE when multiple alternatives were given, and
953 return the register shape and element index information in *TYPEINFO.
954
955 IN_REG_LIST should be set with TRUE if the caller is parsing a register
956 list. */
957
958 static int
959 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
960 struct vector_type_el *typeinfo, bool in_reg_list)
961 {
962 char *str = *ccp;
963 const reg_entry *reg = parse_reg (&str);
964 struct vector_type_el atype;
965 struct vector_type_el parsetype;
966 bool is_typed_vecreg = false;
967
968 atype.defined = 0;
969 atype.type = NT_invtype;
970 atype.width = -1;
971 atype.index = 0;
972
973 if (reg == NULL)
974 {
975 if (typeinfo)
976 *typeinfo = atype;
977 set_default_error ();
978 return PARSE_FAIL;
979 }
980
981 if (! aarch64_check_reg_type (reg, type))
982 {
983 DEBUG_TRACE ("reg type check failed");
984 set_default_error ();
985 return PARSE_FAIL;
986 }
987 type = reg->type;
988
989 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
990 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
991 {
992 if (*str == '.')
993 {
994 if (!parse_vector_type_for_operand (type, &parsetype, &str))
995 return PARSE_FAIL;
996 }
997 else
998 {
999 if (!parse_predication_for_operand (&parsetype, &str))
1000 return PARSE_FAIL;
1001 }
1002
1003 /* Register if of the form Vn.[bhsdq]. */
1004 is_typed_vecreg = true;
1005
1006 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1007 {
1008 /* The width is always variable; we don't allow an integer width
1009 to be specified. */
1010 gas_assert (parsetype.width == 0);
1011 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1012 }
1013 else if (parsetype.width == 0)
1014 /* Expect index. In the new scheme we cannot have
1015 Vn.[bhsdq] represent a scalar. Therefore any
1016 Vn.[bhsdq] should have an index following it.
1017 Except in reglists of course. */
1018 atype.defined |= NTA_HASINDEX;
1019 else
1020 atype.defined |= NTA_HASTYPE;
1021
1022 atype.type = parsetype.type;
1023 atype.width = parsetype.width;
1024 }
1025
1026 if (skip_past_char (&str, '['))
1027 {
1028 expressionS exp;
1029
1030 /* Reject Sn[index] syntax. */
1031 if (!is_typed_vecreg)
1032 {
1033 first_error (_("this type of register can't be indexed"));
1034 return PARSE_FAIL;
1035 }
1036
1037 if (in_reg_list)
1038 {
1039 first_error (_("index not allowed inside register list"));
1040 return PARSE_FAIL;
1041 }
1042
1043 atype.defined |= NTA_HASINDEX;
1044
1045 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1046 NORMAL_RESOLUTION);
1047
1048 if (exp.X_op != O_constant)
1049 {
1050 first_error (_("constant expression required"));
1051 return PARSE_FAIL;
1052 }
1053
1054 if (! skip_past_char (&str, ']'))
1055 return PARSE_FAIL;
1056
1057 atype.index = exp.X_add_number;
1058 }
1059 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1060 {
1061 /* Indexed vector register expected. */
1062 first_error (_("indexed vector register expected"));
1063 return PARSE_FAIL;
1064 }
1065
1066 /* A vector reg Vn should be typed or indexed. */
1067 if (type == REG_TYPE_VN && atype.defined == 0)
1068 {
1069 first_error (_("invalid use of vector register"));
1070 }
1071
1072 if (typeinfo)
1073 *typeinfo = atype;
1074
1075 if (rtype)
1076 *rtype = type;
1077
1078 *ccp = str;
1079
1080 return reg->number;
1081 }
1082
1083 /* Parse register.
1084
1085 Return the register number on success; return PARSE_FAIL otherwise.
1086
1087 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1088 the register (e.g. NEON double or quad reg when either has been requested).
1089
1090 If this is a NEON vector register with additional type information, fill
1091 in the struct pointed to by VECTYPE (if non-NULL).
1092
1093 This parser does not handle register list. */
1094
1095 static int
1096 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1097 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1098 {
1099 struct vector_type_el atype;
1100 char *str = *ccp;
1101 int reg = parse_typed_reg (&str, type, rtype, &atype,
1102 /*in_reg_list= */ false);
1103
1104 if (reg == PARSE_FAIL)
1105 return PARSE_FAIL;
1106
1107 if (vectype)
1108 *vectype = atype;
1109
1110 *ccp = str;
1111
1112 return reg;
1113 }
1114
1115 static inline bool
1116 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1117 {
1118 return
1119 e1.type == e2.type
1120 && e1.defined == e2.defined
1121 && e1.width == e2.width && e1.index == e2.index;
1122 }
1123
1124 /* This function parses a list of vector registers of type TYPE.
1125 On success, it returns the parsed register list information in the
1126 following encoded format:
1127
1128 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1129 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1130
1131 The information of the register shape and/or index is returned in
1132 *VECTYPE.
1133
1134 It returns PARSE_FAIL if the register list is invalid.
1135
1136 The list contains one to four registers.
1137 Each register can be one of:
1138 <Vt>.<T>[<index>]
1139 <Vt>.<T>
1140 All <T> should be identical.
1141 All <index> should be identical.
1142 There are restrictions on <Vt> numbers which are checked later
1143 (by reg_list_valid_p). */
1144
1145 static int
1146 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1147 struct vector_type_el *vectype)
1148 {
1149 char *str = *ccp;
1150 int nb_regs;
1151 struct vector_type_el typeinfo, typeinfo_first;
1152 int val, val_range;
1153 int in_range;
1154 int ret_val;
1155 int i;
1156 bool error = false;
1157 bool expect_index = false;
1158
1159 if (*str != '{')
1160 {
1161 set_syntax_error (_("expecting {"));
1162 return PARSE_FAIL;
1163 }
1164 str++;
1165
1166 nb_regs = 0;
1167 typeinfo_first.defined = 0;
1168 typeinfo_first.type = NT_invtype;
1169 typeinfo_first.width = -1;
1170 typeinfo_first.index = 0;
1171 ret_val = 0;
1172 val = -1;
1173 val_range = -1;
1174 in_range = 0;
1175 do
1176 {
1177 if (in_range)
1178 {
1179 str++; /* skip over '-' */
1180 val_range = val;
1181 }
1182 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1183 /*in_reg_list= */ true);
1184 if (val == PARSE_FAIL)
1185 {
1186 set_first_syntax_error (_("invalid vector register in list"));
1187 error = true;
1188 continue;
1189 }
1190 /* reject [bhsd]n */
1191 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1192 {
1193 set_first_syntax_error (_("invalid scalar register in list"));
1194 error = true;
1195 continue;
1196 }
1197
1198 if (typeinfo.defined & NTA_HASINDEX)
1199 expect_index = true;
1200
1201 if (in_range)
1202 {
1203 if (val < val_range)
1204 {
1205 set_first_syntax_error
1206 (_("invalid range in vector register list"));
1207 error = true;
1208 }
1209 val_range++;
1210 }
1211 else
1212 {
1213 val_range = val;
1214 if (nb_regs == 0)
1215 typeinfo_first = typeinfo;
1216 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1217 {
1218 set_first_syntax_error
1219 (_("type mismatch in vector register list"));
1220 error = true;
1221 }
1222 }
1223 if (! error)
1224 for (i = val_range; i <= val; i++)
1225 {
1226 ret_val |= i << (5 * nb_regs);
1227 nb_regs++;
1228 }
1229 in_range = 0;
1230 }
1231 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1232
1233 skip_whitespace (str);
1234 if (*str != '}')
1235 {
1236 set_first_syntax_error (_("end of vector register list not found"));
1237 error = true;
1238 }
1239 str++;
1240
1241 skip_whitespace (str);
1242
1243 if (expect_index)
1244 {
1245 if (skip_past_char (&str, '['))
1246 {
1247 expressionS exp;
1248
1249 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1250 NORMAL_RESOLUTION);
1251 if (exp.X_op != O_constant)
1252 {
1253 set_first_syntax_error (_("constant expression required."));
1254 error = true;
1255 }
1256 if (! skip_past_char (&str, ']'))
1257 error = true;
1258 else
1259 typeinfo_first.index = exp.X_add_number;
1260 }
1261 else
1262 {
1263 set_first_syntax_error (_("expected index"));
1264 error = true;
1265 }
1266 }
1267
1268 if (nb_regs > 4)
1269 {
1270 set_first_syntax_error (_("too many registers in vector register list"));
1271 error = true;
1272 }
1273 else if (nb_regs == 0)
1274 {
1275 set_first_syntax_error (_("empty vector register list"));
1276 error = true;
1277 }
1278
1279 *ccp = str;
1280 if (! error)
1281 *vectype = typeinfo_first;
1282
1283 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1284 }
1285
1286 /* Directives: register aliases. */
1287
1288 static reg_entry *
1289 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1290 {
1291 reg_entry *new;
1292 const char *name;
1293
1294 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1295 {
1296 if (new->builtin)
1297 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1298 str);
1299
1300 /* Only warn about a redefinition if it's not defined as the
1301 same register. */
1302 else if (new->number != number || new->type != type)
1303 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1304
1305 return NULL;
1306 }
1307
1308 name = xstrdup (str);
1309 new = XNEW (reg_entry);
1310
1311 new->name = name;
1312 new->number = number;
1313 new->type = type;
1314 new->builtin = false;
1315
1316 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1317
1318 return new;
1319 }
1320
1321 /* Look for the .req directive. This is of the form:
1322
1323 new_register_name .req existing_register_name
1324
1325 If we find one, or if it looks sufficiently like one that we want to
1326 handle any error here, return TRUE. Otherwise return FALSE. */
1327
1328 static bool
1329 create_register_alias (char *newname, char *p)
1330 {
1331 const reg_entry *old;
1332 char *oldname, *nbuf;
1333 size_t nlen;
1334
1335 /* The input scrubber ensures that whitespace after the mnemonic is
1336 collapsed to single spaces. */
1337 oldname = p;
1338 if (!startswith (oldname, " .req "))
1339 return false;
1340
1341 oldname += 6;
1342 if (*oldname == '\0')
1343 return false;
1344
1345 old = str_hash_find (aarch64_reg_hsh, oldname);
1346 if (!old)
1347 {
1348 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1349 return true;
1350 }
1351
1352 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1353 the desired alias name, and p points to its end. If not, then
1354 the desired alias name is in the global original_case_string. */
1355 #ifdef TC_CASE_SENSITIVE
1356 nlen = p - newname;
1357 #else
1358 newname = original_case_string;
1359 nlen = strlen (newname);
1360 #endif
1361
1362 nbuf = xmemdup0 (newname, nlen);
1363
1364 /* Create aliases under the new name as stated; an all-lowercase
1365 version of the new name; and an all-uppercase version of the new
1366 name. */
1367 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1368 {
1369 for (p = nbuf; *p; p++)
1370 *p = TOUPPER (*p);
1371
1372 if (strncmp (nbuf, newname, nlen))
1373 {
1374 /* If this attempt to create an additional alias fails, do not bother
1375 trying to create the all-lower case alias. We will fail and issue
1376 a second, duplicate error message. This situation arises when the
1377 programmer does something like:
1378 foo .req r0
1379 Foo .req r1
1380 The second .req creates the "Foo" alias but then fails to create
1381 the artificial FOO alias because it has already been created by the
1382 first .req. */
1383 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1384 {
1385 free (nbuf);
1386 return true;
1387 }
1388 }
1389
1390 for (p = nbuf; *p; p++)
1391 *p = TOLOWER (*p);
1392
1393 if (strncmp (nbuf, newname, nlen))
1394 insert_reg_alias (nbuf, old->number, old->type);
1395 }
1396
1397 free (nbuf);
1398 return true;
1399 }
1400
1401 /* Should never be called, as .req goes between the alias and the
1402 register name, not at the beginning of the line. */
1403 static void
1404 s_req (int a ATTRIBUTE_UNUSED)
1405 {
1406 as_bad (_("invalid syntax for .req directive"));
1407 }
1408
1409 /* The .unreq directive deletes an alias which was previously defined
1410 by .req. For example:
1411
1412 my_alias .req r11
1413 .unreq my_alias */
1414
1415 static void
1416 s_unreq (int a ATTRIBUTE_UNUSED)
1417 {
1418 char *name;
1419 char saved_char;
1420
1421 name = input_line_pointer;
1422
1423 while (*input_line_pointer != 0
1424 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1425 ++input_line_pointer;
1426
1427 saved_char = *input_line_pointer;
1428 *input_line_pointer = 0;
1429
1430 if (!*name)
1431 as_bad (_("invalid syntax for .unreq directive"));
1432 else
1433 {
1434 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1435
1436 if (!reg)
1437 as_bad (_("unknown register alias '%s'"), name);
1438 else if (reg->builtin)
1439 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1440 name);
1441 else
1442 {
1443 char *p;
1444 char *nbuf;
1445
1446 str_hash_delete (aarch64_reg_hsh, name);
1447 free ((char *) reg->name);
1448 free (reg);
1449
1450 /* Also locate the all upper case and all lower case versions.
1451 Do not complain if we cannot find one or the other as it
1452 was probably deleted above. */
1453
1454 nbuf = strdup (name);
1455 for (p = nbuf; *p; p++)
1456 *p = TOUPPER (*p);
1457 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1458 if (reg)
1459 {
1460 str_hash_delete (aarch64_reg_hsh, nbuf);
1461 free ((char *) reg->name);
1462 free (reg);
1463 }
1464
1465 for (p = nbuf; *p; p++)
1466 *p = TOLOWER (*p);
1467 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1468 if (reg)
1469 {
1470 str_hash_delete (aarch64_reg_hsh, nbuf);
1471 free ((char *) reg->name);
1472 free (reg);
1473 }
1474
1475 free (nbuf);
1476 }
1477 }
1478
1479 *input_line_pointer = saved_char;
1480 demand_empty_rest_of_line ();
1481 }
1482
1483 /* Directives: Instruction set selection. */
1484
1485 #ifdef OBJ_ELF
1486 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1487 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1488 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1489 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1490
1491 /* Create a new mapping symbol for the transition to STATE. */
1492
1493 static void
1494 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1495 {
1496 symbolS *symbolP;
1497 const char *symname;
1498 int type;
1499
1500 switch (state)
1501 {
1502 case MAP_DATA:
1503 symname = "$d";
1504 type = BSF_NO_FLAGS;
1505 break;
1506 case MAP_INSN:
1507 symname = "$x";
1508 type = BSF_NO_FLAGS;
1509 break;
1510 default:
1511 abort ();
1512 }
1513
1514 symbolP = symbol_new (symname, now_seg, frag, value);
1515 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1516
1517 /* Save the mapping symbols for future reference. Also check that
1518 we do not place two mapping symbols at the same offset within a
1519 frag. We'll handle overlap between frags in
1520 check_mapping_symbols.
1521
1522 If .fill or other data filling directive generates zero sized data,
1523 the mapping symbol for the following code will have the same value
1524 as the one generated for the data filling directive. In this case,
1525 we replace the old symbol with the new one at the same address. */
1526 if (value == 0)
1527 {
1528 if (frag->tc_frag_data.first_map != NULL)
1529 {
1530 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1531 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1532 &symbol_lastP);
1533 }
1534 frag->tc_frag_data.first_map = symbolP;
1535 }
1536 if (frag->tc_frag_data.last_map != NULL)
1537 {
1538 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1539 S_GET_VALUE (symbolP));
1540 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1541 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1542 &symbol_lastP);
1543 }
1544 frag->tc_frag_data.last_map = symbolP;
1545 }
1546
1547 /* We must sometimes convert a region marked as code to data during
1548 code alignment, if an odd number of bytes have to be padded. The
1549 code mapping symbol is pushed to an aligned address. */
1550
1551 static void
1552 insert_data_mapping_symbol (enum mstate state,
1553 valueT value, fragS * frag, offsetT bytes)
1554 {
1555 /* If there was already a mapping symbol, remove it. */
1556 if (frag->tc_frag_data.last_map != NULL
1557 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1558 frag->fr_address + value)
1559 {
1560 symbolS *symp = frag->tc_frag_data.last_map;
1561
1562 if (value == 0)
1563 {
1564 know (frag->tc_frag_data.first_map == symp);
1565 frag->tc_frag_data.first_map = NULL;
1566 }
1567 frag->tc_frag_data.last_map = NULL;
1568 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1569 }
1570
1571 make_mapping_symbol (MAP_DATA, value, frag);
1572 make_mapping_symbol (state, value + bytes, frag);
1573 }
1574
1575 static void mapping_state_2 (enum mstate state, int max_chars);
1576
1577 /* Set the mapping state to STATE. Only call this when about to
1578 emit some STATE bytes to the file. */
1579
1580 void
1581 mapping_state (enum mstate state)
1582 {
1583 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1584
1585 if (state == MAP_INSN)
1586 /* AArch64 instructions require 4-byte alignment. When emitting
1587 instructions into any section, record the appropriate section
1588 alignment. */
1589 record_alignment (now_seg, 2);
1590
1591 if (mapstate == state)
1592 /* The mapping symbol has already been emitted.
1593 There is nothing else to do. */
1594 return;
1595
1596 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1597 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1598 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1599 evaluated later in the next else. */
1600 return;
1601 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1602 {
1603 /* Only add the symbol if the offset is > 0:
1604 if we're at the first frag, check it's size > 0;
1605 if we're not at the first frag, then for sure
1606 the offset is > 0. */
1607 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1608 const int add_symbol = (frag_now != frag_first)
1609 || (frag_now_fix () > 0);
1610
1611 if (add_symbol)
1612 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1613 }
1614 #undef TRANSITION
1615
1616 mapping_state_2 (state, 0);
1617 }
1618
1619 /* Same as mapping_state, but MAX_CHARS bytes have already been
1620 allocated. Put the mapping symbol that far back. */
1621
1622 static void
1623 mapping_state_2 (enum mstate state, int max_chars)
1624 {
1625 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1626
1627 if (!SEG_NORMAL (now_seg))
1628 return;
1629
1630 if (mapstate == state)
1631 /* The mapping symbol has already been emitted.
1632 There is nothing else to do. */
1633 return;
1634
1635 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1636 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1637 }
1638 #else
1639 #define mapping_state(x) /* nothing */
1640 #define mapping_state_2(x, y) /* nothing */
1641 #endif
1642
1643 /* Directives: sectioning and alignment. */
1644
1645 static void
1646 s_bss (int ignore ATTRIBUTE_UNUSED)
1647 {
1648 /* We don't support putting frags in the BSS segment, we fake it by
1649 marking in_bss, then looking at s_skip for clues. */
1650 subseg_set (bss_section, 0);
1651 demand_empty_rest_of_line ();
1652 mapping_state (MAP_DATA);
1653 }
1654
1655 static void
1656 s_even (int ignore ATTRIBUTE_UNUSED)
1657 {
1658 /* Never make frag if expect extra pass. */
1659 if (!need_pass_2)
1660 frag_align (1, 0, 0);
1661
1662 record_alignment (now_seg, 1);
1663
1664 demand_empty_rest_of_line ();
1665 }
1666
1667 /* Directives: Literal pools. */
1668
1669 static literal_pool *
1670 find_literal_pool (int size)
1671 {
1672 literal_pool *pool;
1673
1674 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1675 {
1676 if (pool->section == now_seg
1677 && pool->sub_section == now_subseg && pool->size == size)
1678 break;
1679 }
1680
1681 return pool;
1682 }
1683
1684 static literal_pool *
1685 find_or_make_literal_pool (int size)
1686 {
1687 /* Next literal pool ID number. */
1688 static unsigned int latest_pool_num = 1;
1689 literal_pool *pool;
1690
1691 pool = find_literal_pool (size);
1692
1693 if (pool == NULL)
1694 {
1695 /* Create a new pool. */
1696 pool = XNEW (literal_pool);
1697 if (!pool)
1698 return NULL;
1699
1700 /* Currently we always put the literal pool in the current text
1701 section. If we were generating "small" model code where we
1702 knew that all code and initialised data was within 1MB then
1703 we could output literals to mergeable, read-only data
1704 sections. */
1705
1706 pool->next_free_entry = 0;
1707 pool->section = now_seg;
1708 pool->sub_section = now_subseg;
1709 pool->size = size;
1710 pool->next = list_of_pools;
1711 pool->symbol = NULL;
1712
1713 /* Add it to the list. */
1714 list_of_pools = pool;
1715 }
1716
1717 /* New pools, and emptied pools, will have a NULL symbol. */
1718 if (pool->symbol == NULL)
1719 {
1720 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1721 &zero_address_frag, 0);
1722 pool->id = latest_pool_num++;
1723 }
1724
1725 /* Done. */
1726 return pool;
1727 }
1728
1729 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1730 Return TRUE on success, otherwise return FALSE. */
1731 static bool
1732 add_to_lit_pool (expressionS *exp, int size)
1733 {
1734 literal_pool *pool;
1735 unsigned int entry;
1736
1737 pool = find_or_make_literal_pool (size);
1738
1739 /* Check if this literal value is already in the pool. */
1740 for (entry = 0; entry < pool->next_free_entry; entry++)
1741 {
1742 expressionS * litexp = & pool->literals[entry].exp;
1743
1744 if ((litexp->X_op == exp->X_op)
1745 && (exp->X_op == O_constant)
1746 && (litexp->X_add_number == exp->X_add_number)
1747 && (litexp->X_unsigned == exp->X_unsigned))
1748 break;
1749
1750 if ((litexp->X_op == exp->X_op)
1751 && (exp->X_op == O_symbol)
1752 && (litexp->X_add_number == exp->X_add_number)
1753 && (litexp->X_add_symbol == exp->X_add_symbol)
1754 && (litexp->X_op_symbol == exp->X_op_symbol))
1755 break;
1756 }
1757
1758 /* Do we need to create a new entry? */
1759 if (entry == pool->next_free_entry)
1760 {
1761 if (entry >= MAX_LITERAL_POOL_SIZE)
1762 {
1763 set_syntax_error (_("literal pool overflow"));
1764 return false;
1765 }
1766
1767 pool->literals[entry].exp = *exp;
1768 pool->next_free_entry += 1;
1769 if (exp->X_op == O_big)
1770 {
1771 /* PR 16688: Bignums are held in a single global array. We must
1772 copy and preserve that value now, before it is overwritten. */
1773 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1774 exp->X_add_number);
1775 memcpy (pool->literals[entry].bignum, generic_bignum,
1776 CHARS_PER_LITTLENUM * exp->X_add_number);
1777 }
1778 else
1779 pool->literals[entry].bignum = NULL;
1780 }
1781
1782 exp->X_op = O_symbol;
1783 exp->X_add_number = ((int) entry) * size;
1784 exp->X_add_symbol = pool->symbol;
1785
1786 return true;
1787 }
1788
1789 /* Can't use symbol_new here, so have to create a symbol and then at
1790 a later date assign it a value. That's what these functions do. */
1791
1792 static void
1793 symbol_locate (symbolS * symbolP,
1794 const char *name,/* It is copied, the caller can modify. */
1795 segT segment, /* Segment identifier (SEG_<something>). */
1796 valueT valu, /* Symbol value. */
1797 fragS * frag) /* Associated fragment. */
1798 {
1799 size_t name_length;
1800 char *preserved_copy_of_name;
1801
1802 name_length = strlen (name) + 1; /* +1 for \0. */
1803 obstack_grow (&notes, name, name_length);
1804 preserved_copy_of_name = obstack_finish (&notes);
1805
1806 #ifdef tc_canonicalize_symbol_name
1807 preserved_copy_of_name =
1808 tc_canonicalize_symbol_name (preserved_copy_of_name);
1809 #endif
1810
1811 S_SET_NAME (symbolP, preserved_copy_of_name);
1812
1813 S_SET_SEGMENT (symbolP, segment);
1814 S_SET_VALUE (symbolP, valu);
1815 symbol_clear_list_pointers (symbolP);
1816
1817 symbol_set_frag (symbolP, frag);
1818
1819 /* Link to end of symbol chain. */
1820 {
1821 extern int symbol_table_frozen;
1822
1823 if (symbol_table_frozen)
1824 abort ();
1825 }
1826
1827 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1828
1829 obj_symbol_new_hook (symbolP);
1830
1831 #ifdef tc_symbol_new_hook
1832 tc_symbol_new_hook (symbolP);
1833 #endif
1834
1835 #ifdef DEBUG_SYMS
1836 verify_symbol_chain (symbol_rootP, symbol_lastP);
1837 #endif /* DEBUG_SYMS */
1838 }
1839
1840
1841 static void
1842 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1843 {
1844 unsigned int entry;
1845 literal_pool *pool;
1846 char sym_name[20];
1847 int align;
1848
1849 for (align = 2; align <= 4; align++)
1850 {
1851 int size = 1 << align;
1852
1853 pool = find_literal_pool (size);
1854 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1855 continue;
1856
1857 /* Align pool as you have word accesses.
1858 Only make a frag if we have to. */
1859 if (!need_pass_2)
1860 frag_align (align, 0, 0);
1861
1862 mapping_state (MAP_DATA);
1863
1864 record_alignment (now_seg, align);
1865
1866 sprintf (sym_name, "$$lit_\002%x", pool->id);
1867
1868 symbol_locate (pool->symbol, sym_name, now_seg,
1869 (valueT) frag_now_fix (), frag_now);
1870 symbol_table_insert (pool->symbol);
1871
1872 for (entry = 0; entry < pool->next_free_entry; entry++)
1873 {
1874 expressionS * exp = & pool->literals[entry].exp;
1875
1876 if (exp->X_op == O_big)
1877 {
1878 /* PR 16688: Restore the global bignum value. */
1879 gas_assert (pool->literals[entry].bignum != NULL);
1880 memcpy (generic_bignum, pool->literals[entry].bignum,
1881 CHARS_PER_LITTLENUM * exp->X_add_number);
1882 }
1883
1884 /* First output the expression in the instruction to the pool. */
1885 emit_expr (exp, size); /* .word|.xword */
1886
1887 if (exp->X_op == O_big)
1888 {
1889 free (pool->literals[entry].bignum);
1890 pool->literals[entry].bignum = NULL;
1891 }
1892 }
1893
1894 /* Mark the pool as empty. */
1895 pool->next_free_entry = 0;
1896 pool->symbol = NULL;
1897 }
1898 }
1899
1900 #ifdef OBJ_ELF
1901 /* Forward declarations for functions below, in the MD interface
1902 section. */
1903 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1904 static struct reloc_table_entry * find_reloc_table_entry (char **);
1905
1906 /* Directives: Data. */
1907 /* N.B. the support for relocation suffix in this directive needs to be
1908 implemented properly. */
1909
1910 static void
1911 s_aarch64_elf_cons (int nbytes)
1912 {
1913 expressionS exp;
1914
1915 #ifdef md_flush_pending_output
1916 md_flush_pending_output ();
1917 #endif
1918
1919 if (is_it_end_of_statement ())
1920 {
1921 demand_empty_rest_of_line ();
1922 return;
1923 }
1924
1925 #ifdef md_cons_align
1926 md_cons_align (nbytes);
1927 #endif
1928
1929 mapping_state (MAP_DATA);
1930 do
1931 {
1932 struct reloc_table_entry *reloc;
1933
1934 expression (&exp);
1935
1936 if (exp.X_op != O_symbol)
1937 emit_expr (&exp, (unsigned int) nbytes);
1938 else
1939 {
1940 skip_past_char (&input_line_pointer, '#');
1941 if (skip_past_char (&input_line_pointer, ':'))
1942 {
1943 reloc = find_reloc_table_entry (&input_line_pointer);
1944 if (reloc == NULL)
1945 as_bad (_("unrecognized relocation suffix"));
1946 else
1947 as_bad (_("unimplemented relocation suffix"));
1948 ignore_rest_of_line ();
1949 return;
1950 }
1951 else
1952 emit_expr (&exp, (unsigned int) nbytes);
1953 }
1954 }
1955 while (*input_line_pointer++ == ',');
1956
1957 /* Put terminator back into stream. */
1958 input_line_pointer--;
1959 demand_empty_rest_of_line ();
1960 }
1961
1962 /* Mark symbol that it follows a variant PCS convention. */
1963
1964 static void
1965 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1966 {
1967 char *name;
1968 char c;
1969 symbolS *sym;
1970 asymbol *bfdsym;
1971 elf_symbol_type *elfsym;
1972
1973 c = get_symbol_name (&name);
1974 if (!*name)
1975 as_bad (_("Missing symbol name in directive"));
1976 sym = symbol_find_or_make (name);
1977 restore_line_pointer (c);
1978 demand_empty_rest_of_line ();
1979 bfdsym = symbol_get_bfdsym (sym);
1980 elfsym = elf_symbol_from (bfdsym);
1981 gas_assert (elfsym);
1982 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1983 }
1984 #endif /* OBJ_ELF */
1985
1986 /* Output a 32-bit word, but mark as an instruction. */
1987
1988 static void
1989 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1990 {
1991 expressionS exp;
1992
1993 #ifdef md_flush_pending_output
1994 md_flush_pending_output ();
1995 #endif
1996
1997 if (is_it_end_of_statement ())
1998 {
1999 demand_empty_rest_of_line ();
2000 return;
2001 }
2002
2003 /* Sections are assumed to start aligned. In executable section, there is no
2004 MAP_DATA symbol pending. So we only align the address during
2005 MAP_DATA --> MAP_INSN transition.
2006 For other sections, this is not guaranteed. */
2007 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2008 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2009 frag_align_code (2, 0);
2010
2011 #ifdef OBJ_ELF
2012 mapping_state (MAP_INSN);
2013 #endif
2014
2015 do
2016 {
2017 expression (&exp);
2018 if (exp.X_op != O_constant)
2019 {
2020 as_bad (_("constant expression required"));
2021 ignore_rest_of_line ();
2022 return;
2023 }
2024
2025 if (target_big_endian)
2026 {
2027 unsigned int val = exp.X_add_number;
2028 exp.X_add_number = SWAP_32 (val);
2029 }
2030 emit_expr (&exp, 4);
2031 }
2032 while (*input_line_pointer++ == ',');
2033
2034 /* Put terminator back into stream. */
2035 input_line_pointer--;
2036 demand_empty_rest_of_line ();
2037 }
2038
2039 static void
2040 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2041 {
2042 demand_empty_rest_of_line ();
2043 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2044 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2045 }
2046
2047 #ifdef OBJ_ELF
2048 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2049
2050 static void
2051 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2052 {
2053 expressionS exp;
2054
2055 expression (&exp);
2056 frag_grow (4);
2057 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2058 BFD_RELOC_AARCH64_TLSDESC_ADD);
2059
2060 demand_empty_rest_of_line ();
2061 }
2062
2063 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2064
2065 static void
2066 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2067 {
2068 expressionS exp;
2069
2070 /* Since we're just labelling the code, there's no need to define a
2071 mapping symbol. */
2072 expression (&exp);
2073 /* Make sure there is enough room in this frag for the following
2074 blr. This trick only works if the blr follows immediately after
2075 the .tlsdesc directive. */
2076 frag_grow (4);
2077 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2078 BFD_RELOC_AARCH64_TLSDESC_CALL);
2079
2080 demand_empty_rest_of_line ();
2081 }
2082
2083 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2084
2085 static void
2086 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2087 {
2088 expressionS exp;
2089
2090 expression (&exp);
2091 frag_grow (4);
2092 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2093 BFD_RELOC_AARCH64_TLSDESC_LDR);
2094
2095 demand_empty_rest_of_line ();
2096 }
2097 #endif /* OBJ_ELF */
2098
2099 static void s_aarch64_arch (int);
2100 static void s_aarch64_cpu (int);
2101 static void s_aarch64_arch_extension (int);
2102
2103 /* This table describes all the machine specific pseudo-ops the assembler
2104 has to support. The fields are:
2105 pseudo-op name without dot
2106 function to call to execute this pseudo-op
2107 Integer arg to pass to the function. */
2108
2109 const pseudo_typeS md_pseudo_table[] = {
2110 /* Never called because '.req' does not start a line. */
2111 {"req", s_req, 0},
2112 {"unreq", s_unreq, 0},
2113 {"bss", s_bss, 0},
2114 {"even", s_even, 0},
2115 {"ltorg", s_ltorg, 0},
2116 {"pool", s_ltorg, 0},
2117 {"cpu", s_aarch64_cpu, 0},
2118 {"arch", s_aarch64_arch, 0},
2119 {"arch_extension", s_aarch64_arch_extension, 0},
2120 {"inst", s_aarch64_inst, 0},
2121 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2122 #ifdef OBJ_ELF
2123 {"tlsdescadd", s_tlsdescadd, 0},
2124 {"tlsdesccall", s_tlsdesccall, 0},
2125 {"tlsdescldr", s_tlsdescldr, 0},
2126 {"word", s_aarch64_elf_cons, 4},
2127 {"long", s_aarch64_elf_cons, 4},
2128 {"xword", s_aarch64_elf_cons, 8},
2129 {"dword", s_aarch64_elf_cons, 8},
2130 {"variant_pcs", s_variant_pcs, 0},
2131 #endif
2132 {"float16", float_cons, 'h'},
2133 {"bfloat16", float_cons, 'b'},
2134 {0, 0, 0}
2135 };
2136 \f
2137
2138 /* Check whether STR points to a register name followed by a comma or the
2139 end of line; REG_TYPE indicates which register types are checked
2140 against. Return TRUE if STR is such a register name; otherwise return
2141 FALSE. The function does not intend to produce any diagnostics, but since
2142 the register parser aarch64_reg_parse, which is called by this function,
2143 does produce diagnostics, we call clear_error to clear any diagnostics
2144 that may be generated by aarch64_reg_parse.
2145 Also, the function returns FALSE directly if there is any user error
2146 present at the function entry. This prevents the existing diagnostics
2147 state from being spoiled.
2148 The function currently serves parse_constant_immediate and
2149 parse_big_immediate only. */
2150 static bool
2151 reg_name_p (char *str, aarch64_reg_type reg_type)
2152 {
2153 int reg;
2154
2155 /* Prevent the diagnostics state from being spoiled. */
2156 if (error_p ())
2157 return false;
2158
2159 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2160
2161 /* Clear the parsing error that may be set by the reg parser. */
2162 clear_error ();
2163
2164 if (reg == PARSE_FAIL)
2165 return false;
2166
2167 skip_whitespace (str);
2168 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2169 return true;
2170
2171 return false;
2172 }
2173
2174 /* Parser functions used exclusively in instruction operands. */
2175
2176 /* Parse an immediate expression which may not be constant.
2177
2178 To prevent the expression parser from pushing a register name
2179 into the symbol table as an undefined symbol, firstly a check is
2180 done to find out whether STR is a register of type REG_TYPE followed
2181 by a comma or the end of line. Return FALSE if STR is such a string. */
2182
2183 static bool
2184 parse_immediate_expression (char **str, expressionS *exp,
2185 aarch64_reg_type reg_type)
2186 {
2187 if (reg_name_p (*str, reg_type))
2188 {
2189 set_recoverable_error (_("immediate operand required"));
2190 return false;
2191 }
2192
2193 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2194 NORMAL_RESOLUTION);
2195
2196 if (exp->X_op == O_absent)
2197 {
2198 set_fatal_syntax_error (_("missing immediate expression"));
2199 return false;
2200 }
2201
2202 return true;
2203 }
2204
2205 /* Constant immediate-value read function for use in insn parsing.
2206 STR points to the beginning of the immediate (with the optional
2207 leading #); *VAL receives the value. REG_TYPE says which register
2208 names should be treated as registers rather than as symbolic immediates.
2209
2210 Return TRUE on success; otherwise return FALSE. */
2211
2212 static bool
2213 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2214 {
2215 expressionS exp;
2216
2217 if (! parse_immediate_expression (str, &exp, reg_type))
2218 return false;
2219
2220 if (exp.X_op != O_constant)
2221 {
2222 set_syntax_error (_("constant expression required"));
2223 return false;
2224 }
2225
2226 *val = exp.X_add_number;
2227 return true;
2228 }
2229
2230 static uint32_t
2231 encode_imm_float_bits (uint32_t imm)
2232 {
2233 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2234 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2235 }
2236
2237 /* Return TRUE if the single-precision floating-point value encoded in IMM
2238 can be expressed in the AArch64 8-bit signed floating-point format with
2239 3-bit exponent and normalized 4 bits of precision; in other words, the
2240 floating-point value must be expressable as
2241 (+/-) n / 16 * power (2, r)
2242 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2243
2244 static bool
2245 aarch64_imm_float_p (uint32_t imm)
2246 {
2247 /* If a single-precision floating-point value has the following bit
2248 pattern, it can be expressed in the AArch64 8-bit floating-point
2249 format:
2250
2251 3 32222222 2221111111111
2252 1 09876543 21098765432109876543210
2253 n Eeeeeexx xxxx0000000000000000000
2254
2255 where n, e and each x are either 0 or 1 independently, with
2256 E == ~ e. */
2257
2258 uint32_t pattern;
2259
2260 /* Prepare the pattern for 'Eeeeee'. */
2261 if (((imm >> 30) & 0x1) == 0)
2262 pattern = 0x3e000000;
2263 else
2264 pattern = 0x40000000;
2265
2266 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2267 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2268 }
2269
2270 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2271 as an IEEE float without any loss of precision. Store the value in
2272 *FPWORD if so. */
2273
2274 static bool
2275 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2276 {
2277 /* If a double-precision floating-point value has the following bit
2278 pattern, it can be expressed in a float:
2279
2280 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2281 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2282 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2283
2284 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2285 if Eeee_eeee != 1111_1111
2286
2287 where n, e, s and S are either 0 or 1 independently and where ~ is the
2288 inverse of E. */
2289
2290 uint32_t pattern;
2291 uint32_t high32 = imm >> 32;
2292 uint32_t low32 = imm;
2293
2294 /* Lower 29 bits need to be 0s. */
2295 if ((imm & 0x1fffffff) != 0)
2296 return false;
2297
2298 /* Prepare the pattern for 'Eeeeeeeee'. */
2299 if (((high32 >> 30) & 0x1) == 0)
2300 pattern = 0x38000000;
2301 else
2302 pattern = 0x40000000;
2303
2304 /* Check E~~~. */
2305 if ((high32 & 0x78000000) != pattern)
2306 return false;
2307
2308 /* Check Eeee_eeee != 1111_1111. */
2309 if ((high32 & 0x7ff00000) == 0x47f00000)
2310 return false;
2311
2312 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2313 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2314 | (low32 >> 29)); /* 3 S bits. */
2315 return true;
2316 }
2317
2318 /* Return true if we should treat OPERAND as a double-precision
2319 floating-point operand rather than a single-precision one. */
2320 static bool
2321 double_precision_operand_p (const aarch64_opnd_info *operand)
2322 {
2323 /* Check for unsuffixed SVE registers, which are allowed
2324 for LDR and STR but not in instructions that require an
2325 immediate. We get better error messages if we arbitrarily
2326 pick one size, parse the immediate normally, and then
2327 report the match failure in the normal way. */
2328 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2329 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2330 }
2331
2332 /* Parse a floating-point immediate. Return TRUE on success and return the
2333 value in *IMMED in the format of IEEE754 single-precision encoding.
2334 *CCP points to the start of the string; DP_P is TRUE when the immediate
2335 is expected to be in double-precision (N.B. this only matters when
2336 hexadecimal representation is involved). REG_TYPE says which register
2337 names should be treated as registers rather than as symbolic immediates.
2338
2339 This routine accepts any IEEE float; it is up to the callers to reject
2340 invalid ones. */
2341
2342 static bool
2343 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2344 aarch64_reg_type reg_type)
2345 {
2346 char *str = *ccp;
2347 char *fpnum;
2348 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2349 int64_t val = 0;
2350 unsigned fpword = 0;
2351 bool hex_p = false;
2352
2353 skip_past_char (&str, '#');
2354
2355 fpnum = str;
2356 skip_whitespace (fpnum);
2357
2358 if (startswith (fpnum, "0x"))
2359 {
2360 /* Support the hexadecimal representation of the IEEE754 encoding.
2361 Double-precision is expected when DP_P is TRUE, otherwise the
2362 representation should be in single-precision. */
2363 if (! parse_constant_immediate (&str, &val, reg_type))
2364 goto invalid_fp;
2365
2366 if (dp_p)
2367 {
2368 if (!can_convert_double_to_float (val, &fpword))
2369 goto invalid_fp;
2370 }
2371 else if ((uint64_t) val > 0xffffffff)
2372 goto invalid_fp;
2373 else
2374 fpword = val;
2375
2376 hex_p = true;
2377 }
2378 else if (reg_name_p (str, reg_type))
2379 {
2380 set_recoverable_error (_("immediate operand required"));
2381 return false;
2382 }
2383
2384 if (! hex_p)
2385 {
2386 int i;
2387
2388 if ((str = atof_ieee (str, 's', words)) == NULL)
2389 goto invalid_fp;
2390
2391 /* Our FP word must be 32 bits (single-precision FP). */
2392 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2393 {
2394 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2395 fpword |= words[i];
2396 }
2397 }
2398
2399 *immed = fpword;
2400 *ccp = str;
2401 return true;
2402
2403 invalid_fp:
2404 set_fatal_syntax_error (_("invalid floating-point constant"));
2405 return false;
2406 }
2407
2408 /* Less-generic immediate-value read function with the possibility of loading
2409 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2410 instructions.
2411
2412 To prevent the expression parser from pushing a register name into the
2413 symbol table as an undefined symbol, a check is firstly done to find
2414 out whether STR is a register of type REG_TYPE followed by a comma or
2415 the end of line. Return FALSE if STR is such a register. */
2416
2417 static bool
2418 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2419 {
2420 char *ptr = *str;
2421
2422 if (reg_name_p (ptr, reg_type))
2423 {
2424 set_syntax_error (_("immediate operand required"));
2425 return false;
2426 }
2427
2428 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2429 NORMAL_RESOLUTION);
2430
2431 if (inst.reloc.exp.X_op == O_constant)
2432 *imm = inst.reloc.exp.X_add_number;
2433
2434 *str = ptr;
2435
2436 return true;
2437 }
2438
2439 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2440 if NEED_LIBOPCODES is non-zero, the fixup will need
2441 assistance from the libopcodes. */
2442
2443 static inline void
2444 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2445 const aarch64_opnd_info *operand,
2446 int need_libopcodes_p)
2447 {
2448 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2449 reloc->opnd = operand->type;
2450 if (need_libopcodes_p)
2451 reloc->need_libopcodes_p = 1;
2452 };
2453
2454 /* Return TRUE if the instruction needs to be fixed up later internally by
2455 the GAS; otherwise return FALSE. */
2456
2457 static inline bool
2458 aarch64_gas_internal_fixup_p (void)
2459 {
2460 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2461 }
2462
2463 /* Assign the immediate value to the relevant field in *OPERAND if
2464 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2465 needs an internal fixup in a later stage.
2466 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2467 IMM.VALUE that may get assigned with the constant. */
2468 static inline void
2469 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2470 aarch64_opnd_info *operand,
2471 int addr_off_p,
2472 int need_libopcodes_p,
2473 int skip_p)
2474 {
2475 if (reloc->exp.X_op == O_constant)
2476 {
2477 if (addr_off_p)
2478 operand->addr.offset.imm = reloc->exp.X_add_number;
2479 else
2480 operand->imm.value = reloc->exp.X_add_number;
2481 reloc->type = BFD_RELOC_UNUSED;
2482 }
2483 else
2484 {
2485 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2486 /* Tell libopcodes to ignore this operand or not. This is helpful
2487 when one of the operands needs to be fixed up later but we need
2488 libopcodes to check the other operands. */
2489 operand->skip = skip_p;
2490 }
2491 }
2492
2493 /* Relocation modifiers. Each entry in the table contains the textual
2494 name for the relocation which may be placed before a symbol used as
2495 a load/store offset, or add immediate. It must be surrounded by a
2496 leading and trailing colon, for example:
2497
2498 ldr x0, [x1, #:rello:varsym]
2499 add x0, x1, #:rello:varsym */
2500
2501 struct reloc_table_entry
2502 {
2503 const char *name;
2504 int pc_rel;
2505 bfd_reloc_code_real_type adr_type;
2506 bfd_reloc_code_real_type adrp_type;
2507 bfd_reloc_code_real_type movw_type;
2508 bfd_reloc_code_real_type add_type;
2509 bfd_reloc_code_real_type ldst_type;
2510 bfd_reloc_code_real_type ld_literal_type;
2511 };
2512
2513 static struct reloc_table_entry reloc_table[] =
2514 {
2515 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2516 {"lo12", 0,
2517 0, /* adr_type */
2518 0,
2519 0,
2520 BFD_RELOC_AARCH64_ADD_LO12,
2521 BFD_RELOC_AARCH64_LDST_LO12,
2522 0},
2523
2524 /* Higher 21 bits of pc-relative page offset: ADRP */
2525 {"pg_hi21", 1,
2526 0, /* adr_type */
2527 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2528 0,
2529 0,
2530 0,
2531 0},
2532
2533 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2534 {"pg_hi21_nc", 1,
2535 0, /* adr_type */
2536 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2537 0,
2538 0,
2539 0,
2540 0},
2541
2542 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2543 {"abs_g0", 0,
2544 0, /* adr_type */
2545 0,
2546 BFD_RELOC_AARCH64_MOVW_G0,
2547 0,
2548 0,
2549 0},
2550
2551 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2552 {"abs_g0_s", 0,
2553 0, /* adr_type */
2554 0,
2555 BFD_RELOC_AARCH64_MOVW_G0_S,
2556 0,
2557 0,
2558 0},
2559
2560 /* Less significant bits 0-15 of address/value: MOVK, no check */
2561 {"abs_g0_nc", 0,
2562 0, /* adr_type */
2563 0,
2564 BFD_RELOC_AARCH64_MOVW_G0_NC,
2565 0,
2566 0,
2567 0},
2568
2569 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2570 {"abs_g1", 0,
2571 0, /* adr_type */
2572 0,
2573 BFD_RELOC_AARCH64_MOVW_G1,
2574 0,
2575 0,
2576 0},
2577
2578 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2579 {"abs_g1_s", 0,
2580 0, /* adr_type */
2581 0,
2582 BFD_RELOC_AARCH64_MOVW_G1_S,
2583 0,
2584 0,
2585 0},
2586
2587 /* Less significant bits 16-31 of address/value: MOVK, no check */
2588 {"abs_g1_nc", 0,
2589 0, /* adr_type */
2590 0,
2591 BFD_RELOC_AARCH64_MOVW_G1_NC,
2592 0,
2593 0,
2594 0},
2595
2596 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2597 {"abs_g2", 0,
2598 0, /* adr_type */
2599 0,
2600 BFD_RELOC_AARCH64_MOVW_G2,
2601 0,
2602 0,
2603 0},
2604
2605 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2606 {"abs_g2_s", 0,
2607 0, /* adr_type */
2608 0,
2609 BFD_RELOC_AARCH64_MOVW_G2_S,
2610 0,
2611 0,
2612 0},
2613
2614 /* Less significant bits 32-47 of address/value: MOVK, no check */
2615 {"abs_g2_nc", 0,
2616 0, /* adr_type */
2617 0,
2618 BFD_RELOC_AARCH64_MOVW_G2_NC,
2619 0,
2620 0,
2621 0},
2622
2623 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2624 {"abs_g3", 0,
2625 0, /* adr_type */
2626 0,
2627 BFD_RELOC_AARCH64_MOVW_G3,
2628 0,
2629 0,
2630 0},
2631
2632 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2633 {"prel_g0", 1,
2634 0, /* adr_type */
2635 0,
2636 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2637 0,
2638 0,
2639 0},
2640
2641 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2642 {"prel_g0_nc", 1,
2643 0, /* adr_type */
2644 0,
2645 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2646 0,
2647 0,
2648 0},
2649
2650 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2651 {"prel_g1", 1,
2652 0, /* adr_type */
2653 0,
2654 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2655 0,
2656 0,
2657 0},
2658
2659 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2660 {"prel_g1_nc", 1,
2661 0, /* adr_type */
2662 0,
2663 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2664 0,
2665 0,
2666 0},
2667
2668 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2669 {"prel_g2", 1,
2670 0, /* adr_type */
2671 0,
2672 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2673 0,
2674 0,
2675 0},
2676
2677 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2678 {"prel_g2_nc", 1,
2679 0, /* adr_type */
2680 0,
2681 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2682 0,
2683 0,
2684 0},
2685
2686 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2687 {"prel_g3", 1,
2688 0, /* adr_type */
2689 0,
2690 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2691 0,
2692 0,
2693 0},
2694
2695 /* Get to the page containing GOT entry for a symbol. */
2696 {"got", 1,
2697 0, /* adr_type */
2698 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2699 0,
2700 0,
2701 0,
2702 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2703
2704 /* 12 bit offset into the page containing GOT entry for that symbol. */
2705 {"got_lo12", 0,
2706 0, /* adr_type */
2707 0,
2708 0,
2709 0,
2710 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2711 0},
2712
2713 /* 0-15 bits of address/value: MOVk, no check. */
2714 {"gotoff_g0_nc", 0,
2715 0, /* adr_type */
2716 0,
2717 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2718 0,
2719 0,
2720 0},
2721
2722 /* Most significant bits 16-31 of address/value: MOVZ. */
2723 {"gotoff_g1", 0,
2724 0, /* adr_type */
2725 0,
2726 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2727 0,
2728 0,
2729 0},
2730
2731 /* 15 bit offset into the page containing GOT entry for that symbol. */
2732 {"gotoff_lo15", 0,
2733 0, /* adr_type */
2734 0,
2735 0,
2736 0,
2737 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2738 0},
2739
2740 /* Get to the page containing GOT TLS entry for a symbol */
2741 {"gottprel_g0_nc", 0,
2742 0, /* adr_type */
2743 0,
2744 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2745 0,
2746 0,
2747 0},
2748
2749 /* Get to the page containing GOT TLS entry for a symbol */
2750 {"gottprel_g1", 0,
2751 0, /* adr_type */
2752 0,
2753 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2754 0,
2755 0,
2756 0},
2757
2758 /* Get to the page containing GOT TLS entry for a symbol */
2759 {"tlsgd", 0,
2760 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2761 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2762 0,
2763 0,
2764 0,
2765 0},
2766
2767 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2768 {"tlsgd_lo12", 0,
2769 0, /* adr_type */
2770 0,
2771 0,
2772 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2773 0,
2774 0},
2775
2776 /* Lower 16 bits address/value: MOVk. */
2777 {"tlsgd_g0_nc", 0,
2778 0, /* adr_type */
2779 0,
2780 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2781 0,
2782 0,
2783 0},
2784
2785 /* Most significant bits 16-31 of address/value: MOVZ. */
2786 {"tlsgd_g1", 0,
2787 0, /* adr_type */
2788 0,
2789 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2790 0,
2791 0,
2792 0},
2793
2794 /* Get to the page containing GOT TLS entry for a symbol */
2795 {"tlsdesc", 0,
2796 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2797 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2798 0,
2799 0,
2800 0,
2801 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2802
2803 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2804 {"tlsdesc_lo12", 0,
2805 0, /* adr_type */
2806 0,
2807 0,
2808 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2809 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2810 0},
2811
2812 /* Get to the page containing GOT TLS entry for a symbol.
2813 The same as GD, we allocate two consecutive GOT slots
2814 for module index and module offset, the only difference
2815 with GD is the module offset should be initialized to
2816 zero without any outstanding runtime relocation. */
2817 {"tlsldm", 0,
2818 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2819 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2820 0,
2821 0,
2822 0,
2823 0},
2824
2825 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2826 {"tlsldm_lo12_nc", 0,
2827 0, /* adr_type */
2828 0,
2829 0,
2830 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2831 0,
2832 0},
2833
2834 /* 12 bit offset into the module TLS base address. */
2835 {"dtprel_lo12", 0,
2836 0, /* adr_type */
2837 0,
2838 0,
2839 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2840 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2841 0},
2842
2843 /* Same as dtprel_lo12, no overflow check. */
2844 {"dtprel_lo12_nc", 0,
2845 0, /* adr_type */
2846 0,
2847 0,
2848 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2849 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2850 0},
2851
2852 /* bits[23:12] of offset to the module TLS base address. */
2853 {"dtprel_hi12", 0,
2854 0, /* adr_type */
2855 0,
2856 0,
2857 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2858 0,
2859 0},
2860
2861 /* bits[15:0] of offset to the module TLS base address. */
2862 {"dtprel_g0", 0,
2863 0, /* adr_type */
2864 0,
2865 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2866 0,
2867 0,
2868 0},
2869
2870 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2871 {"dtprel_g0_nc", 0,
2872 0, /* adr_type */
2873 0,
2874 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2875 0,
2876 0,
2877 0},
2878
2879 /* bits[31:16] of offset to the module TLS base address. */
2880 {"dtprel_g1", 0,
2881 0, /* adr_type */
2882 0,
2883 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2884 0,
2885 0,
2886 0},
2887
2888 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2889 {"dtprel_g1_nc", 0,
2890 0, /* adr_type */
2891 0,
2892 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2893 0,
2894 0,
2895 0},
2896
2897 /* bits[47:32] of offset to the module TLS base address. */
2898 {"dtprel_g2", 0,
2899 0, /* adr_type */
2900 0,
2901 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2902 0,
2903 0,
2904 0},
2905
2906 /* Lower 16 bit offset into GOT entry for a symbol */
2907 {"tlsdesc_off_g0_nc", 0,
2908 0, /* adr_type */
2909 0,
2910 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2911 0,
2912 0,
2913 0},
2914
2915 /* Higher 16 bit offset into GOT entry for a symbol */
2916 {"tlsdesc_off_g1", 0,
2917 0, /* adr_type */
2918 0,
2919 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2920 0,
2921 0,
2922 0},
2923
2924 /* Get to the page containing GOT TLS entry for a symbol */
2925 {"gottprel", 0,
2926 0, /* adr_type */
2927 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2928 0,
2929 0,
2930 0,
2931 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2932
2933 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2934 {"gottprel_lo12", 0,
2935 0, /* adr_type */
2936 0,
2937 0,
2938 0,
2939 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2940 0},
2941
2942 /* Get tp offset for a symbol. */
2943 {"tprel", 0,
2944 0, /* adr_type */
2945 0,
2946 0,
2947 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2948 0,
2949 0},
2950
2951 /* Get tp offset for a symbol. */
2952 {"tprel_lo12", 0,
2953 0, /* adr_type */
2954 0,
2955 0,
2956 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2957 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2958 0},
2959
2960 /* Get tp offset for a symbol. */
2961 {"tprel_hi12", 0,
2962 0, /* adr_type */
2963 0,
2964 0,
2965 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2966 0,
2967 0},
2968
2969 /* Get tp offset for a symbol. */
2970 {"tprel_lo12_nc", 0,
2971 0, /* adr_type */
2972 0,
2973 0,
2974 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2975 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2976 0},
2977
2978 /* Most significant bits 32-47 of address/value: MOVZ. */
2979 {"tprel_g2", 0,
2980 0, /* adr_type */
2981 0,
2982 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2983 0,
2984 0,
2985 0},
2986
2987 /* Most significant bits 16-31 of address/value: MOVZ. */
2988 {"tprel_g1", 0,
2989 0, /* adr_type */
2990 0,
2991 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2992 0,
2993 0,
2994 0},
2995
2996 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2997 {"tprel_g1_nc", 0,
2998 0, /* adr_type */
2999 0,
3000 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3001 0,
3002 0,
3003 0},
3004
3005 /* Most significant bits 0-15 of address/value: MOVZ. */
3006 {"tprel_g0", 0,
3007 0, /* adr_type */
3008 0,
3009 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3010 0,
3011 0,
3012 0},
3013
3014 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3015 {"tprel_g0_nc", 0,
3016 0, /* adr_type */
3017 0,
3018 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3019 0,
3020 0,
3021 0},
3022
3023 /* 15bit offset from got entry to base address of GOT table. */
3024 {"gotpage_lo15", 0,
3025 0,
3026 0,
3027 0,
3028 0,
3029 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3030 0},
3031
3032 /* 14bit offset from got entry to base address of GOT table. */
3033 {"gotpage_lo14", 0,
3034 0,
3035 0,
3036 0,
3037 0,
3038 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3039 0},
3040 };
3041
3042 /* Given the address of a pointer pointing to the textual name of a
3043 relocation as may appear in assembler source, attempt to find its
3044 details in reloc_table. The pointer will be updated to the character
3045 after the trailing colon. On failure, NULL will be returned;
3046 otherwise return the reloc_table_entry. */
3047
3048 static struct reloc_table_entry *
3049 find_reloc_table_entry (char **str)
3050 {
3051 unsigned int i;
3052 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3053 {
3054 int length = strlen (reloc_table[i].name);
3055
3056 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3057 && (*str)[length] == ':')
3058 {
3059 *str += (length + 1);
3060 return &reloc_table[i];
3061 }
3062 }
3063
3064 return NULL;
3065 }
3066
3067 /* Returns 0 if the relocation should never be forced,
3068 1 if the relocation must be forced, and -1 if either
3069 result is OK. */
3070
3071 static signed int
3072 aarch64_force_reloc (unsigned int type)
3073 {
3074 switch (type)
3075 {
3076 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3077 /* Perform these "immediate" internal relocations
3078 even if the symbol is extern or weak. */
3079 return 0;
3080
3081 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3082 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3083 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3084 /* Pseudo relocs that need to be fixed up according to
3085 ilp32_p. */
3086 return 0;
3087
3088 case BFD_RELOC_AARCH64_ADD_LO12:
3089 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3090 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3091 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3092 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3093 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3094 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3095 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3096 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3097 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3098 case BFD_RELOC_AARCH64_LDST128_LO12:
3099 case BFD_RELOC_AARCH64_LDST16_LO12:
3100 case BFD_RELOC_AARCH64_LDST32_LO12:
3101 case BFD_RELOC_AARCH64_LDST64_LO12:
3102 case BFD_RELOC_AARCH64_LDST8_LO12:
3103 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3104 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3105 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3106 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3107 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3108 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3109 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3110 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3111 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3112 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3113 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3114 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3115 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3116 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3117 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3118 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3119 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3120 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3121 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3122 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3123 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3124 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3125 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3126 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3127 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3128 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3129 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3130 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3131 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3132 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3133 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3134 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3135 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3136 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3137 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3138 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3139 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3140 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3141 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3142 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3143 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3144 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3145 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3146 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3147 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3148 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3149 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3150 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3151 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3152 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3153 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3154 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3155 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3156 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3157 /* Always leave these relocations for the linker. */
3158 return 1;
3159
3160 default:
3161 return -1;
3162 }
3163 }
3164
3165 int
3166 aarch64_force_relocation (struct fix *fixp)
3167 {
3168 int res = aarch64_force_reloc (fixp->fx_r_type);
3169
3170 if (res == -1)
3171 return generic_force_reloc (fixp);
3172 return res;
3173 }
3174
3175 /* Mode argument to parse_shift and parser_shifter_operand. */
3176 enum parse_shift_mode
3177 {
3178 SHIFTED_NONE, /* no shifter allowed */
3179 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3180 "#imm{,lsl #n}" */
3181 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3182 "#imm" */
3183 SHIFTED_LSL, /* bare "lsl #n" */
3184 SHIFTED_MUL, /* bare "mul #n" */
3185 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3186 SHIFTED_MUL_VL, /* "mul vl" */
3187 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3188 };
3189
3190 /* Parse a <shift> operator on an AArch64 data processing instruction.
3191 Return TRUE on success; otherwise return FALSE. */
3192 static bool
3193 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3194 {
3195 const struct aarch64_name_value_pair *shift_op;
3196 enum aarch64_modifier_kind kind;
3197 expressionS exp;
3198 int exp_has_prefix;
3199 char *s = *str;
3200 char *p = s;
3201
3202 for (p = *str; ISALPHA (*p); p++)
3203 ;
3204
3205 if (p == *str)
3206 {
3207 set_syntax_error (_("shift expression expected"));
3208 return false;
3209 }
3210
3211 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3212
3213 if (shift_op == NULL)
3214 {
3215 set_syntax_error (_("shift operator expected"));
3216 return false;
3217 }
3218
3219 kind = aarch64_get_operand_modifier (shift_op);
3220
3221 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3222 {
3223 set_syntax_error (_("invalid use of 'MSL'"));
3224 return false;
3225 }
3226
3227 if (kind == AARCH64_MOD_MUL
3228 && mode != SHIFTED_MUL
3229 && mode != SHIFTED_MUL_VL)
3230 {
3231 set_syntax_error (_("invalid use of 'MUL'"));
3232 return false;
3233 }
3234
3235 switch (mode)
3236 {
3237 case SHIFTED_LOGIC_IMM:
3238 if (aarch64_extend_operator_p (kind))
3239 {
3240 set_syntax_error (_("extending shift is not permitted"));
3241 return false;
3242 }
3243 break;
3244
3245 case SHIFTED_ARITH_IMM:
3246 if (kind == AARCH64_MOD_ROR)
3247 {
3248 set_syntax_error (_("'ROR' shift is not permitted"));
3249 return false;
3250 }
3251 break;
3252
3253 case SHIFTED_LSL:
3254 if (kind != AARCH64_MOD_LSL)
3255 {
3256 set_syntax_error (_("only 'LSL' shift is permitted"));
3257 return false;
3258 }
3259 break;
3260
3261 case SHIFTED_MUL:
3262 if (kind != AARCH64_MOD_MUL)
3263 {
3264 set_syntax_error (_("only 'MUL' is permitted"));
3265 return false;
3266 }
3267 break;
3268
3269 case SHIFTED_MUL_VL:
3270 /* "MUL VL" consists of two separate tokens. Require the first
3271 token to be "MUL" and look for a following "VL". */
3272 if (kind == AARCH64_MOD_MUL)
3273 {
3274 skip_whitespace (p);
3275 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3276 {
3277 p += 2;
3278 kind = AARCH64_MOD_MUL_VL;
3279 break;
3280 }
3281 }
3282 set_syntax_error (_("only 'MUL VL' is permitted"));
3283 return false;
3284
3285 case SHIFTED_REG_OFFSET:
3286 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3287 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3288 {
3289 set_fatal_syntax_error
3290 (_("invalid shift for the register offset addressing mode"));
3291 return false;
3292 }
3293 break;
3294
3295 case SHIFTED_LSL_MSL:
3296 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3297 {
3298 set_syntax_error (_("invalid shift operator"));
3299 return false;
3300 }
3301 break;
3302
3303 default:
3304 abort ();
3305 }
3306
3307 /* Whitespace can appear here if the next thing is a bare digit. */
3308 skip_whitespace (p);
3309
3310 /* Parse shift amount. */
3311 exp_has_prefix = 0;
3312 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3313 exp.X_op = O_absent;
3314 else
3315 {
3316 if (is_immediate_prefix (*p))
3317 {
3318 p++;
3319 exp_has_prefix = 1;
3320 }
3321 (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3322 NORMAL_RESOLUTION);
3323 }
3324 if (kind == AARCH64_MOD_MUL_VL)
3325 /* For consistency, give MUL VL the same shift amount as an implicit
3326 MUL #1. */
3327 operand->shifter.amount = 1;
3328 else if (exp.X_op == O_absent)
3329 {
3330 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3331 {
3332 set_syntax_error (_("missing shift amount"));
3333 return false;
3334 }
3335 operand->shifter.amount = 0;
3336 }
3337 else if (exp.X_op != O_constant)
3338 {
3339 set_syntax_error (_("constant shift amount required"));
3340 return false;
3341 }
3342 /* For parsing purposes, MUL #n has no inherent range. The range
3343 depends on the operand and will be checked by operand-specific
3344 routines. */
3345 else if (kind != AARCH64_MOD_MUL
3346 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3347 {
3348 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3349 return false;
3350 }
3351 else
3352 {
3353 operand->shifter.amount = exp.X_add_number;
3354 operand->shifter.amount_present = 1;
3355 }
3356
3357 operand->shifter.operator_present = 1;
3358 operand->shifter.kind = kind;
3359
3360 *str = p;
3361 return true;
3362 }
3363
3364 /* Parse a <shifter_operand> for a data processing instruction:
3365
3366 #<immediate>
3367 #<immediate>, LSL #imm
3368
3369 Validation of immediate operands is deferred to md_apply_fix.
3370
3371 Return TRUE on success; otherwise return FALSE. */
3372
3373 static bool
3374 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3375 enum parse_shift_mode mode)
3376 {
3377 char *p;
3378
3379 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3380 return false;
3381
3382 p = *str;
3383
3384 /* Accept an immediate expression. */
3385 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3386 REJECT_ABSENT, NORMAL_RESOLUTION))
3387 return false;
3388
3389 /* Accept optional LSL for arithmetic immediate values. */
3390 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3391 if (! parse_shift (&p, operand, SHIFTED_LSL))
3392 return false;
3393
3394 /* Not accept any shifter for logical immediate values. */
3395 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3396 && parse_shift (&p, operand, mode))
3397 {
3398 set_syntax_error (_("unexpected shift operator"));
3399 return false;
3400 }
3401
3402 *str = p;
3403 return true;
3404 }
3405
3406 /* Parse a <shifter_operand> for a data processing instruction:
3407
3408 <Rm>
3409 <Rm>, <shift>
3410 #<immediate>
3411 #<immediate>, LSL #imm
3412
3413 where <shift> is handled by parse_shift above, and the last two
3414 cases are handled by the function above.
3415
3416 Validation of immediate operands is deferred to md_apply_fix.
3417
3418 Return TRUE on success; otherwise return FALSE. */
3419
3420 static bool
3421 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3422 enum parse_shift_mode mode)
3423 {
3424 const reg_entry *reg;
3425 aarch64_opnd_qualifier_t qualifier;
3426 enum aarch64_operand_class opd_class
3427 = aarch64_get_operand_class (operand->type);
3428
3429 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3430 if (reg)
3431 {
3432 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3433 {
3434 set_syntax_error (_("unexpected register in the immediate operand"));
3435 return false;
3436 }
3437
3438 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3439 {
3440 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3441 return false;
3442 }
3443
3444 operand->reg.regno = reg->number;
3445 operand->qualifier = qualifier;
3446
3447 /* Accept optional shift operation on register. */
3448 if (! skip_past_comma (str))
3449 return true;
3450
3451 if (! parse_shift (str, operand, mode))
3452 return false;
3453
3454 return true;
3455 }
3456 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3457 {
3458 set_syntax_error
3459 (_("integer register expected in the extended/shifted operand "
3460 "register"));
3461 return false;
3462 }
3463
3464 /* We have a shifted immediate variable. */
3465 return parse_shifter_operand_imm (str, operand, mode);
3466 }
3467
3468 /* Return TRUE on success; return FALSE otherwise. */
3469
3470 static bool
3471 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3472 enum parse_shift_mode mode)
3473 {
3474 char *p = *str;
3475
3476 /* Determine if we have the sequence of characters #: or just :
3477 coming next. If we do, then we check for a :rello: relocation
3478 modifier. If we don't, punt the whole lot to
3479 parse_shifter_operand. */
3480
3481 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3482 {
3483 struct reloc_table_entry *entry;
3484
3485 if (p[0] == '#')
3486 p += 2;
3487 else
3488 p++;
3489 *str = p;
3490
3491 /* Try to parse a relocation. Anything else is an error. */
3492 if (!(entry = find_reloc_table_entry (str)))
3493 {
3494 set_syntax_error (_("unknown relocation modifier"));
3495 return false;
3496 }
3497
3498 if (entry->add_type == 0)
3499 {
3500 set_syntax_error
3501 (_("this relocation modifier is not allowed on this instruction"));
3502 return false;
3503 }
3504
3505 /* Save str before we decompose it. */
3506 p = *str;
3507
3508 /* Next, we parse the expression. */
3509 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3510 REJECT_ABSENT,
3511 aarch64_force_reloc (entry->add_type) == 1))
3512 return false;
3513
3514 /* Record the relocation type (use the ADD variant here). */
3515 inst.reloc.type = entry->add_type;
3516 inst.reloc.pc_rel = entry->pc_rel;
3517
3518 /* If str is empty, we've reached the end, stop here. */
3519 if (**str == '\0')
3520 return true;
3521
3522 /* Otherwise, we have a shifted reloc modifier, so rewind to
3523 recover the variable name and continue parsing for the shifter. */
3524 *str = p;
3525 return parse_shifter_operand_imm (str, operand, mode);
3526 }
3527
3528 return parse_shifter_operand (str, operand, mode);
3529 }
3530
3531 /* Parse all forms of an address expression. Information is written
3532 to *OPERAND and/or inst.reloc.
3533
3534 The A64 instruction set has the following addressing modes:
3535
3536 Offset
3537 [base] // in SIMD ld/st structure
3538 [base{,#0}] // in ld/st exclusive
3539 [base{,#imm}]
3540 [base,Xm{,LSL #imm}]
3541 [base,Xm,SXTX {#imm}]
3542 [base,Wm,(S|U)XTW {#imm}]
3543 Pre-indexed
3544 [base]! // in ldraa/ldrab exclusive
3545 [base,#imm]!
3546 Post-indexed
3547 [base],#imm
3548 [base],Xm // in SIMD ld/st structure
3549 PC-relative (literal)
3550 label
3551 SVE:
3552 [base,#imm,MUL VL]
3553 [base,Zm.D{,LSL #imm}]
3554 [base,Zm.S,(S|U)XTW {#imm}]
3555 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3556 [Zn.S,#imm]
3557 [Zn.D,#imm]
3558 [Zn.S{, Xm}]
3559 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3560 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3561 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3562
3563 (As a convenience, the notation "=immediate" is permitted in conjunction
3564 with the pc-relative literal load instructions to automatically place an
3565 immediate value or symbolic address in a nearby literal pool and generate
3566 a hidden label which references it.)
3567
3568 Upon a successful parsing, the address structure in *OPERAND will be
3569 filled in the following way:
3570
3571 .base_regno = <base>
3572 .offset.is_reg // 1 if the offset is a register
3573 .offset.imm = <imm>
3574 .offset.regno = <Rm>
3575
3576 For different addressing modes defined in the A64 ISA:
3577
3578 Offset
3579 .pcrel=0; .preind=1; .postind=0; .writeback=0
3580 Pre-indexed
3581 .pcrel=0; .preind=1; .postind=0; .writeback=1
3582 Post-indexed
3583 .pcrel=0; .preind=0; .postind=1; .writeback=1
3584 PC-relative (literal)
3585 .pcrel=1; .preind=1; .postind=0; .writeback=0
3586
3587 The shift/extension information, if any, will be stored in .shifter.
3588 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3589 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3590 corresponding register.
3591
3592 BASE_TYPE says which types of base register should be accepted and
3593 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3594 is the type of shifter that is allowed for immediate offsets,
3595 or SHIFTED_NONE if none.
3596
3597 In all other respects, it is the caller's responsibility to check
3598 for addressing modes not supported by the instruction, and to set
3599 inst.reloc.type. */
3600
3601 static bool
3602 parse_address_main (char **str, aarch64_opnd_info *operand,
3603 aarch64_opnd_qualifier_t *base_qualifier,
3604 aarch64_opnd_qualifier_t *offset_qualifier,
3605 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3606 enum parse_shift_mode imm_shift_mode)
3607 {
3608 char *p = *str;
3609 const reg_entry *reg;
3610 expressionS *exp = &inst.reloc.exp;
3611
3612 *base_qualifier = AARCH64_OPND_QLF_NIL;
3613 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3614 if (! skip_past_char (&p, '['))
3615 {
3616 /* =immediate or label. */
3617 operand->addr.pcrel = 1;
3618 operand->addr.preind = 1;
3619
3620 /* #:<reloc_op>:<symbol> */
3621 skip_past_char (&p, '#');
3622 if (skip_past_char (&p, ':'))
3623 {
3624 bfd_reloc_code_real_type ty;
3625 struct reloc_table_entry *entry;
3626
3627 /* Try to parse a relocation modifier. Anything else is
3628 an error. */
3629 entry = find_reloc_table_entry (&p);
3630 if (! entry)
3631 {
3632 set_syntax_error (_("unknown relocation modifier"));
3633 return false;
3634 }
3635
3636 switch (operand->type)
3637 {
3638 case AARCH64_OPND_ADDR_PCREL21:
3639 /* adr */
3640 ty = entry->adr_type;
3641 break;
3642
3643 default:
3644 ty = entry->ld_literal_type;
3645 break;
3646 }
3647
3648 if (ty == 0)
3649 {
3650 set_syntax_error
3651 (_("this relocation modifier is not allowed on this "
3652 "instruction"));
3653 return false;
3654 }
3655
3656 /* #:<reloc_op>: */
3657 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3658 aarch64_force_reloc (entry->add_type) == 1))
3659 {
3660 set_syntax_error (_("invalid relocation expression"));
3661 return false;
3662 }
3663 /* #:<reloc_op>:<expr> */
3664 /* Record the relocation type. */
3665 inst.reloc.type = ty;
3666 inst.reloc.pc_rel = entry->pc_rel;
3667 }
3668 else
3669 {
3670 if (skip_past_char (&p, '='))
3671 /* =immediate; need to generate the literal in the literal pool. */
3672 inst.gen_lit_pool = 1;
3673
3674 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3675 NORMAL_RESOLUTION))
3676 {
3677 set_syntax_error (_("invalid address"));
3678 return false;
3679 }
3680 }
3681
3682 *str = p;
3683 return true;
3684 }
3685
3686 /* [ */
3687
3688 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3689 if (!reg || !aarch64_check_reg_type (reg, base_type))
3690 {
3691 set_syntax_error (_(get_reg_expected_msg (base_type)));
3692 return false;
3693 }
3694 operand->addr.base_regno = reg->number;
3695
3696 /* [Xn */
3697 if (skip_past_comma (&p))
3698 {
3699 /* [Xn, */
3700 operand->addr.preind = 1;
3701
3702 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3703 if (reg)
3704 {
3705 if (!aarch64_check_reg_type (reg, offset_type))
3706 {
3707 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3708 return false;
3709 }
3710
3711 /* [Xn,Rm */
3712 operand->addr.offset.regno = reg->number;
3713 operand->addr.offset.is_reg = 1;
3714 /* Shifted index. */
3715 if (skip_past_comma (&p))
3716 {
3717 /* [Xn,Rm, */
3718 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3719 /* Use the diagnostics set in parse_shift, so not set new
3720 error message here. */
3721 return false;
3722 }
3723 /* We only accept:
3724 [base,Xm] # For vector plus scalar SVE2 indexing.
3725 [base,Xm{,LSL #imm}]
3726 [base,Xm,SXTX {#imm}]
3727 [base,Wm,(S|U)XTW {#imm}] */
3728 if (operand->shifter.kind == AARCH64_MOD_NONE
3729 || operand->shifter.kind == AARCH64_MOD_LSL
3730 || operand->shifter.kind == AARCH64_MOD_SXTX)
3731 {
3732 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3733 {
3734 set_syntax_error (_("invalid use of 32-bit register offset"));
3735 return false;
3736 }
3737 if (aarch64_get_qualifier_esize (*base_qualifier)
3738 != aarch64_get_qualifier_esize (*offset_qualifier)
3739 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3740 || *base_qualifier != AARCH64_OPND_QLF_S_S
3741 || *offset_qualifier != AARCH64_OPND_QLF_X))
3742 {
3743 set_syntax_error (_("offset has different size from base"));
3744 return false;
3745 }
3746 }
3747 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3748 {
3749 set_syntax_error (_("invalid use of 64-bit register offset"));
3750 return false;
3751 }
3752 }
3753 else
3754 {
3755 /* [Xn,#:<reloc_op>:<symbol> */
3756 skip_past_char (&p, '#');
3757 if (skip_past_char (&p, ':'))
3758 {
3759 struct reloc_table_entry *entry;
3760
3761 /* Try to parse a relocation modifier. Anything else is
3762 an error. */
3763 if (!(entry = find_reloc_table_entry (&p)))
3764 {
3765 set_syntax_error (_("unknown relocation modifier"));
3766 return false;
3767 }
3768
3769 if (entry->ldst_type == 0)
3770 {
3771 set_syntax_error
3772 (_("this relocation modifier is not allowed on this "
3773 "instruction"));
3774 return false;
3775 }
3776
3777 /* [Xn,#:<reloc_op>: */
3778 /* We now have the group relocation table entry corresponding to
3779 the name in the assembler source. Next, we parse the
3780 expression. */
3781 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3782 aarch64_force_reloc (entry->add_type) == 1))
3783 {
3784 set_syntax_error (_("invalid relocation expression"));
3785 return false;
3786 }
3787
3788 /* [Xn,#:<reloc_op>:<expr> */
3789 /* Record the load/store relocation type. */
3790 inst.reloc.type = entry->ldst_type;
3791 inst.reloc.pc_rel = entry->pc_rel;
3792 }
3793 else
3794 {
3795 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3796 NORMAL_RESOLUTION))
3797 {
3798 set_syntax_error (_("invalid expression in the address"));
3799 return false;
3800 }
3801 /* [Xn,<expr> */
3802 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3803 /* [Xn,<expr>,<shifter> */
3804 if (! parse_shift (&p, operand, imm_shift_mode))
3805 return false;
3806 }
3807 }
3808 }
3809
3810 if (! skip_past_char (&p, ']'))
3811 {
3812 set_syntax_error (_("']' expected"));
3813 return false;
3814 }
3815
3816 if (skip_past_char (&p, '!'))
3817 {
3818 if (operand->addr.preind && operand->addr.offset.is_reg)
3819 {
3820 set_syntax_error (_("register offset not allowed in pre-indexed "
3821 "addressing mode"));
3822 return false;
3823 }
3824 /* [Xn]! */
3825 operand->addr.writeback = 1;
3826 }
3827 else if (skip_past_comma (&p))
3828 {
3829 /* [Xn], */
3830 operand->addr.postind = 1;
3831 operand->addr.writeback = 1;
3832
3833 if (operand->addr.preind)
3834 {
3835 set_syntax_error (_("cannot combine pre- and post-indexing"));
3836 return false;
3837 }
3838
3839 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3840 if (reg)
3841 {
3842 /* [Xn],Xm */
3843 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3844 {
3845 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3846 return false;
3847 }
3848
3849 operand->addr.offset.regno = reg->number;
3850 operand->addr.offset.is_reg = 1;
3851 }
3852 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3853 NORMAL_RESOLUTION))
3854 {
3855 /* [Xn],#expr */
3856 set_syntax_error (_("invalid expression in the address"));
3857 return false;
3858 }
3859 }
3860
3861 /* If at this point neither .preind nor .postind is set, we have a
3862 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3863 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3864 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3865 [Zn.<T>, xzr]. */
3866 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3867 {
3868 if (operand->addr.writeback)
3869 {
3870 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3871 {
3872 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3873 operand->addr.offset.is_reg = 0;
3874 operand->addr.offset.imm = 0;
3875 operand->addr.preind = 1;
3876 }
3877 else
3878 {
3879 /* Reject [Rn]! */
3880 set_syntax_error (_("missing offset in the pre-indexed address"));
3881 return false;
3882 }
3883 }
3884 else
3885 {
3886 operand->addr.preind = 1;
3887 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3888 {
3889 operand->addr.offset.is_reg = 1;
3890 operand->addr.offset.regno = REG_ZR;
3891 *offset_qualifier = AARCH64_OPND_QLF_X;
3892 }
3893 else
3894 {
3895 inst.reloc.exp.X_op = O_constant;
3896 inst.reloc.exp.X_add_number = 0;
3897 }
3898 }
3899 }
3900
3901 *str = p;
3902 return true;
3903 }
3904
3905 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3906 on success. */
3907 static bool
3908 parse_address (char **str, aarch64_opnd_info *operand)
3909 {
3910 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3911 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3912 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3913 }
3914
3915 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3916 The arguments have the same meaning as for parse_address_main.
3917 Return TRUE on success. */
3918 static bool
3919 parse_sve_address (char **str, aarch64_opnd_info *operand,
3920 aarch64_opnd_qualifier_t *base_qualifier,
3921 aarch64_opnd_qualifier_t *offset_qualifier)
3922 {
3923 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3924 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3925 SHIFTED_MUL_VL);
3926 }
3927
3928 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3929 Return TRUE on success; otherwise return FALSE. */
3930 static bool
3931 parse_half (char **str, int *internal_fixup_p)
3932 {
3933 char *p = *str;
3934
3935 skip_past_char (&p, '#');
3936
3937 gas_assert (internal_fixup_p);
3938 *internal_fixup_p = 0;
3939
3940 if (*p == ':')
3941 {
3942 struct reloc_table_entry *entry;
3943
3944 /* Try to parse a relocation. Anything else is an error. */
3945 ++p;
3946
3947 if (!(entry = find_reloc_table_entry (&p)))
3948 {
3949 set_syntax_error (_("unknown relocation modifier"));
3950 return false;
3951 }
3952
3953 if (entry->movw_type == 0)
3954 {
3955 set_syntax_error
3956 (_("this relocation modifier is not allowed on this instruction"));
3957 return false;
3958 }
3959
3960 inst.reloc.type = entry->movw_type;
3961 }
3962 else
3963 *internal_fixup_p = 1;
3964
3965 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3966 aarch64_force_reloc (inst.reloc.type) == 1))
3967 return false;
3968
3969 *str = p;
3970 return true;
3971 }
3972
3973 /* Parse an operand for an ADRP instruction:
3974 ADRP <Xd>, <label>
3975 Return TRUE on success; otherwise return FALSE. */
3976
3977 static bool
3978 parse_adrp (char **str)
3979 {
3980 char *p;
3981
3982 p = *str;
3983 if (*p == ':')
3984 {
3985 struct reloc_table_entry *entry;
3986
3987 /* Try to parse a relocation. Anything else is an error. */
3988 ++p;
3989 if (!(entry = find_reloc_table_entry (&p)))
3990 {
3991 set_syntax_error (_("unknown relocation modifier"));
3992 return false;
3993 }
3994
3995 if (entry->adrp_type == 0)
3996 {
3997 set_syntax_error
3998 (_("this relocation modifier is not allowed on this instruction"));
3999 return false;
4000 }
4001
4002 inst.reloc.type = entry->adrp_type;
4003 }
4004 else
4005 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4006
4007 inst.reloc.pc_rel = 1;
4008 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4009 aarch64_force_reloc (inst.reloc.type) == 1))
4010 return false;
4011 *str = p;
4012 return true;
4013 }
4014
4015 /* Miscellaneous. */
4016
4017 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4018 of SIZE tokens in which index I gives the token for field value I,
4019 or is null if field value I is invalid. REG_TYPE says which register
4020 names should be treated as registers rather than as symbolic immediates.
4021
4022 Return true on success, moving *STR past the operand and storing the
4023 field value in *VAL. */
4024
4025 static int
4026 parse_enum_string (char **str, int64_t *val, const char *const *array,
4027 size_t size, aarch64_reg_type reg_type)
4028 {
4029 expressionS exp;
4030 char *p, *q;
4031 size_t i;
4032
4033 /* Match C-like tokens. */
4034 p = q = *str;
4035 while (ISALNUM (*q))
4036 q++;
4037
4038 for (i = 0; i < size; ++i)
4039 if (array[i]
4040 && strncasecmp (array[i], p, q - p) == 0
4041 && array[i][q - p] == 0)
4042 {
4043 *val = i;
4044 *str = q;
4045 return true;
4046 }
4047
4048 if (!parse_immediate_expression (&p, &exp, reg_type))
4049 return false;
4050
4051 if (exp.X_op == O_constant
4052 && (uint64_t) exp.X_add_number < size)
4053 {
4054 *val = exp.X_add_number;
4055 *str = p;
4056 return true;
4057 }
4058
4059 /* Use the default error for this operand. */
4060 return false;
4061 }
4062
4063 /* Parse an option for a preload instruction. Returns the encoding for the
4064 option, or PARSE_FAIL. */
4065
4066 static int
4067 parse_pldop (char **str)
4068 {
4069 char *p, *q;
4070 const struct aarch64_name_value_pair *o;
4071
4072 p = q = *str;
4073 while (ISALNUM (*q))
4074 q++;
4075
4076 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4077 if (!o)
4078 return PARSE_FAIL;
4079
4080 *str = q;
4081 return o->value;
4082 }
4083
4084 /* Parse an option for a barrier instruction. Returns the encoding for the
4085 option, or PARSE_FAIL. */
4086
4087 static int
4088 parse_barrier (char **str)
4089 {
4090 char *p, *q;
4091 const struct aarch64_name_value_pair *o;
4092
4093 p = q = *str;
4094 while (ISALPHA (*q))
4095 q++;
4096
4097 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4098 if (!o)
4099 return PARSE_FAIL;
4100
4101 *str = q;
4102 return o->value;
4103 }
4104
4105 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4106 return 0 if successful. Otherwise return PARSE_FAIL. */
4107
4108 static int
4109 parse_barrier_psb (char **str,
4110 const struct aarch64_name_value_pair ** hint_opt)
4111 {
4112 char *p, *q;
4113 const struct aarch64_name_value_pair *o;
4114
4115 p = q = *str;
4116 while (ISALPHA (*q))
4117 q++;
4118
4119 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4120 if (!o)
4121 {
4122 set_fatal_syntax_error
4123 ( _("unknown or missing option to PSB/TSB"));
4124 return PARSE_FAIL;
4125 }
4126
4127 if (o->value != 0x11)
4128 {
4129 /* PSB only accepts option name 'CSYNC'. */
4130 set_syntax_error
4131 (_("the specified option is not accepted for PSB/TSB"));
4132 return PARSE_FAIL;
4133 }
4134
4135 *str = q;
4136 *hint_opt = o;
4137 return 0;
4138 }
4139
4140 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4141 return 0 if successful. Otherwise return PARSE_FAIL. */
4142
4143 static int
4144 parse_bti_operand (char **str,
4145 const struct aarch64_name_value_pair ** hint_opt)
4146 {
4147 char *p, *q;
4148 const struct aarch64_name_value_pair *o;
4149
4150 p = q = *str;
4151 while (ISALPHA (*q))
4152 q++;
4153
4154 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4155 if (!o)
4156 {
4157 set_fatal_syntax_error
4158 ( _("unknown option to BTI"));
4159 return PARSE_FAIL;
4160 }
4161
4162 switch (o->value)
4163 {
4164 /* Valid BTI operands. */
4165 case HINT_OPD_C:
4166 case HINT_OPD_J:
4167 case HINT_OPD_JC:
4168 break;
4169
4170 default:
4171 set_syntax_error
4172 (_("unknown option to BTI"));
4173 return PARSE_FAIL;
4174 }
4175
4176 *str = q;
4177 *hint_opt = o;
4178 return 0;
4179 }
4180
4181 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4182 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4183 on failure. Format:
4184
4185 REG_TYPE.QUALIFIER
4186
4187 Side effect: Update STR with current parse position of success.
4188 */
4189
4190 static const reg_entry *
4191 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4192 aarch64_opnd_qualifier_t *qualifier)
4193 {
4194 char *q;
4195
4196 reg_entry *reg = parse_reg (str);
4197 if (reg != NULL && reg->type == reg_type)
4198 {
4199 if (!skip_past_char (str, '.'))
4200 {
4201 set_syntax_error (_("missing ZA tile element size separator"));
4202 return NULL;
4203 }
4204
4205 q = *str;
4206 switch (TOLOWER (*q))
4207 {
4208 case 'b':
4209 *qualifier = AARCH64_OPND_QLF_S_B;
4210 break;
4211 case 'h':
4212 *qualifier = AARCH64_OPND_QLF_S_H;
4213 break;
4214 case 's':
4215 *qualifier = AARCH64_OPND_QLF_S_S;
4216 break;
4217 case 'd':
4218 *qualifier = AARCH64_OPND_QLF_S_D;
4219 break;
4220 case 'q':
4221 *qualifier = AARCH64_OPND_QLF_S_Q;
4222 break;
4223 default:
4224 return NULL;
4225 }
4226 q++;
4227
4228 *str = q;
4229 return reg;
4230 }
4231
4232 return NULL;
4233 }
4234
4235 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4236 Function return tile QUALIFIER on success.
4237
4238 Tiles are in example format: za[0-9]\.[bhsd]
4239
4240 Function returns <ZAda> register number or PARSE_FAIL.
4241 */
4242 static int
4243 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4244 {
4245 int regno;
4246 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4247
4248 if (reg == NULL)
4249 return PARSE_FAIL;
4250 regno = reg->number;
4251
4252 switch (*qualifier)
4253 {
4254 case AARCH64_OPND_QLF_S_B:
4255 if (regno != 0x00)
4256 {
4257 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4258 return PARSE_FAIL;
4259 }
4260 break;
4261 case AARCH64_OPND_QLF_S_H:
4262 if (regno > 0x01)
4263 {
4264 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4265 return PARSE_FAIL;
4266 }
4267 break;
4268 case AARCH64_OPND_QLF_S_S:
4269 if (regno > 0x03)
4270 {
4271 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4272 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4273 return PARSE_FAIL;
4274 }
4275 break;
4276 case AARCH64_OPND_QLF_S_D:
4277 if (regno > 0x07)
4278 {
4279 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4280 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4281 return PARSE_FAIL;
4282 }
4283 break;
4284 default:
4285 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4286 return PARSE_FAIL;
4287 }
4288
4289 return regno;
4290 }
4291
4292 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4293
4294 #<imm>
4295 <imm>
4296
4297 Function return TRUE if immediate was found, or FALSE.
4298 */
4299 static bool
4300 parse_sme_immediate (char **str, int64_t *imm)
4301 {
4302 int64_t val;
4303 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4304 return false;
4305
4306 *imm = val;
4307 return true;
4308 }
4309
4310 /* Parse index with vector select register and immediate:
4311
4312 [<Wv>, <imm>]
4313 [<Wv>, #<imm>]
4314 where <Wv> is in W12-W15 range and # is optional for immediate.
4315
4316 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4317 is set to true.
4318
4319 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4320 IMM output.
4321 */
4322 static bool
4323 parse_sme_za_hv_tiles_operand_index (char **str,
4324 int *vector_select_register,
4325 int64_t *imm)
4326 {
4327 const reg_entry *reg;
4328
4329 if (!skip_past_char (str, '['))
4330 {
4331 set_syntax_error (_("expected '['"));
4332 return false;
4333 }
4334
4335 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4336 reg = parse_reg (str);
4337 if (reg == NULL || reg->type != REG_TYPE_R_32
4338 || reg->number < 12 || reg->number > 15)
4339 {
4340 set_syntax_error (_("expected vector select register W12-W15"));
4341 return false;
4342 }
4343 *vector_select_register = reg->number;
4344
4345 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4346 {
4347 set_syntax_error (_("expected ','"));
4348 return false;
4349 }
4350
4351 if (!parse_sme_immediate (str, imm))
4352 {
4353 set_syntax_error (_("index offset immediate expected"));
4354 return false;
4355 }
4356
4357 if (!skip_past_char (str, ']'))
4358 {
4359 set_syntax_error (_("expected ']'"));
4360 return false;
4361 }
4362
4363 return true;
4364 }
4365
4366 /* Parse SME ZA horizontal or vertical vector access to tiles.
4367 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4368 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4369 contains <Wv> select register and corresponding optional IMMEDIATE.
4370 In addition QUALIFIER is extracted.
4371
4372 Field format examples:
4373
4374 ZA0<HV>.B[<Wv>, #<imm>]
4375 <ZAn><HV>.H[<Wv>, #<imm>]
4376 <ZAn><HV>.S[<Wv>, #<imm>]
4377 <ZAn><HV>.D[<Wv>, #<imm>]
4378 <ZAn><HV>.Q[<Wv>, #<imm>]
4379
4380 Function returns <ZAda> register number or PARSE_FAIL.
4381 */
4382 static int
4383 parse_sme_za_hv_tiles_operand (char **str,
4384 enum sme_hv_slice *slice_indicator,
4385 int *vector_select_register,
4386 int *imm,
4387 aarch64_opnd_qualifier_t *qualifier)
4388 {
4389 char *qh, *qv;
4390 int regno;
4391 int regno_limit;
4392 int64_t imm_limit;
4393 int64_t imm_value;
4394 const reg_entry *reg;
4395
4396 qh = qv = *str;
4397 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4398 {
4399 *slice_indicator = HV_horizontal;
4400 *str = qh;
4401 }
4402 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4403 {
4404 *slice_indicator = HV_vertical;
4405 *str = qv;
4406 }
4407 else
4408 return PARSE_FAIL;
4409 regno = reg->number;
4410
4411 switch (*qualifier)
4412 {
4413 case AARCH64_OPND_QLF_S_B:
4414 regno_limit = 0;
4415 imm_limit = 15;
4416 break;
4417 case AARCH64_OPND_QLF_S_H:
4418 regno_limit = 1;
4419 imm_limit = 7;
4420 break;
4421 case AARCH64_OPND_QLF_S_S:
4422 regno_limit = 3;
4423 imm_limit = 3;
4424 break;
4425 case AARCH64_OPND_QLF_S_D:
4426 regno_limit = 7;
4427 imm_limit = 1;
4428 break;
4429 case AARCH64_OPND_QLF_S_Q:
4430 regno_limit = 15;
4431 imm_limit = 0;
4432 break;
4433 default:
4434 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4435 return PARSE_FAIL;
4436 }
4437
4438 /* Check if destination register ZA tile vector is in range for given
4439 instruction variant. */
4440 if (regno < 0 || regno > regno_limit)
4441 {
4442 set_syntax_error (_("ZA tile vector out of range"));
4443 return PARSE_FAIL;
4444 }
4445
4446 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4447 &imm_value))
4448 return PARSE_FAIL;
4449
4450 /* Check if optional index offset is in the range for instruction
4451 variant. */
4452 if (imm_value < 0 || imm_value > imm_limit)
4453 {
4454 set_syntax_error (_("index offset out of range"));
4455 return PARSE_FAIL;
4456 }
4457
4458 *imm = imm_value;
4459
4460 return regno;
4461 }
4462
4463
4464 static int
4465 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4466 enum sme_hv_slice *slice_indicator,
4467 int *vector_select_register,
4468 int *imm,
4469 aarch64_opnd_qualifier_t *qualifier)
4470 {
4471 int regno;
4472
4473 if (!skip_past_char (str, '{'))
4474 {
4475 set_syntax_error (_("expected '{'"));
4476 return PARSE_FAIL;
4477 }
4478
4479 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4480 vector_select_register, imm,
4481 qualifier);
4482
4483 if (regno == PARSE_FAIL)
4484 return PARSE_FAIL;
4485
4486 if (!skip_past_char (str, '}'))
4487 {
4488 set_syntax_error (_("expected '}'"));
4489 return PARSE_FAIL;
4490 }
4491
4492 return regno;
4493 }
4494
4495 /* Parse list of up to eight 64-bit element tile names separated by commas in
4496 SME's ZERO instruction:
4497
4498 ZERO { <mask> }
4499
4500 Function returns <mask>:
4501
4502 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4503 */
4504 static int
4505 parse_sme_zero_mask(char **str)
4506 {
4507 char *q;
4508 int mask;
4509 aarch64_opnd_qualifier_t qualifier;
4510
4511 mask = 0x00;
4512 q = *str;
4513 do
4514 {
4515 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4516 if (reg)
4517 {
4518 int regno = reg->number;
4519 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4520 {
4521 /* { ZA0.B } is assembled as all-ones immediate. */
4522 mask = 0xff;
4523 }
4524 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4525 mask |= 0x55 << regno;
4526 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4527 mask |= 0x11 << regno;
4528 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4529 mask |= 0x01 << regno;
4530 else
4531 {
4532 set_syntax_error (_("wrong ZA tile element format"));
4533 return PARSE_FAIL;
4534 }
4535 continue;
4536 }
4537 else if (strncasecmp (q, "za", 2) == 0
4538 && !ISALNUM (q[2]))
4539 {
4540 /* { ZA } is assembled as all-ones immediate. */
4541 mask = 0xff;
4542 q += 2;
4543 continue;
4544 }
4545 else
4546 {
4547 set_syntax_error (_("wrong ZA tile element format"));
4548 return PARSE_FAIL;
4549 }
4550 }
4551 while (skip_past_char (&q, ','));
4552
4553 *str = q;
4554 return mask;
4555 }
4556
4557 /* Wraps in curly braces <mask> operand ZERO instruction:
4558
4559 ZERO { <mask> }
4560
4561 Function returns value of <mask> bit-field.
4562 */
4563 static int
4564 parse_sme_list_of_64bit_tiles (char **str)
4565 {
4566 int regno;
4567
4568 if (!skip_past_char (str, '{'))
4569 {
4570 set_syntax_error (_("expected '{'"));
4571 return PARSE_FAIL;
4572 }
4573
4574 /* Empty <mask> list is an all-zeros immediate. */
4575 if (!skip_past_char (str, '}'))
4576 {
4577 regno = parse_sme_zero_mask (str);
4578 if (regno == PARSE_FAIL)
4579 return PARSE_FAIL;
4580
4581 if (!skip_past_char (str, '}'))
4582 {
4583 set_syntax_error (_("expected '}'"));
4584 return PARSE_FAIL;
4585 }
4586 }
4587 else
4588 regno = 0x00;
4589
4590 return regno;
4591 }
4592
4593 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4594 Operand format:
4595
4596 ZA[<Wv>, <imm>]
4597 ZA[<Wv>, #<imm>]
4598
4599 Function returns <Wv> or PARSE_FAIL.
4600 */
4601 static int
4602 parse_sme_za_array (char **str, int *imm)
4603 {
4604 char *p, *q;
4605 int regno;
4606 int64_t imm_value;
4607
4608 p = q = *str;
4609 while (ISALPHA (*q))
4610 q++;
4611
4612 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4613 {
4614 set_syntax_error (_("expected ZA array"));
4615 return PARSE_FAIL;
4616 }
4617
4618 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4619 return PARSE_FAIL;
4620
4621 if (imm_value < 0 || imm_value > 15)
4622 {
4623 set_syntax_error (_("offset out of range"));
4624 return PARSE_FAIL;
4625 }
4626
4627 *imm = imm_value;
4628 *str = q;
4629 return regno;
4630 }
4631
4632 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4633 Returns the encoding for the option, or PARSE_FAIL.
4634
4635 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4636 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4637
4638 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4639 field, otherwise as a system register.
4640 */
4641
4642 static int
4643 parse_sys_reg (char **str, htab_t sys_regs,
4644 int imple_defined_p, int pstatefield_p,
4645 uint32_t* flags)
4646 {
4647 char *p, *q;
4648 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4649 const aarch64_sys_reg *o;
4650 int value;
4651
4652 p = buf;
4653 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4654 if (p < buf + (sizeof (buf) - 1))
4655 *p++ = TOLOWER (*q);
4656 *p = '\0';
4657
4658 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4659 valid system register. This is enforced by construction of the hash
4660 table. */
4661 if (p - buf != q - *str)
4662 return PARSE_FAIL;
4663
4664 o = str_hash_find (sys_regs, buf);
4665 if (!o)
4666 {
4667 if (!imple_defined_p)
4668 return PARSE_FAIL;
4669 else
4670 {
4671 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4672 unsigned int op0, op1, cn, cm, op2;
4673
4674 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4675 != 5)
4676 return PARSE_FAIL;
4677 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4678 return PARSE_FAIL;
4679 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4680 if (flags)
4681 *flags = 0;
4682 }
4683 }
4684 else
4685 {
4686 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4687 as_bad (_("selected processor does not support PSTATE field "
4688 "name '%s'"), buf);
4689 if (!pstatefield_p
4690 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4691 o->value, o->flags, o->features))
4692 as_bad (_("selected processor does not support system register "
4693 "name '%s'"), buf);
4694 if (aarch64_sys_reg_deprecated_p (o->flags))
4695 as_warn (_("system register name '%s' is deprecated and may be "
4696 "removed in a future release"), buf);
4697 value = o->value;
4698 if (flags)
4699 *flags = o->flags;
4700 }
4701
4702 *str = q;
4703 return value;
4704 }
4705
4706 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4707 for the option, or NULL. */
4708
4709 static const aarch64_sys_ins_reg *
4710 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4711 {
4712 char *p, *q;
4713 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4714 const aarch64_sys_ins_reg *o;
4715
4716 p = buf;
4717 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4718 if (p < buf + (sizeof (buf) - 1))
4719 *p++ = TOLOWER (*q);
4720 *p = '\0';
4721
4722 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4723 valid system register. This is enforced by construction of the hash
4724 table. */
4725 if (p - buf != q - *str)
4726 return NULL;
4727
4728 o = str_hash_find (sys_ins_regs, buf);
4729 if (!o)
4730 return NULL;
4731
4732 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4733 o->name, o->value, o->flags, 0))
4734 as_bad (_("selected processor does not support system register "
4735 "name '%s'"), buf);
4736 if (aarch64_sys_reg_deprecated_p (o->flags))
4737 as_warn (_("system register name '%s' is deprecated and may be "
4738 "removed in a future release"), buf);
4739
4740 *str = q;
4741 return o;
4742 }
4743 \f
4744 #define po_char_or_fail(chr) do { \
4745 if (! skip_past_char (&str, chr)) \
4746 goto failure; \
4747 } while (0)
4748
4749 #define po_reg_or_fail(regtype) do { \
4750 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4751 if (val == PARSE_FAIL) \
4752 { \
4753 set_default_error (); \
4754 goto failure; \
4755 } \
4756 } while (0)
4757
4758 #define po_int_reg_or_fail(reg_type) do { \
4759 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4760 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4761 { \
4762 set_default_error (); \
4763 goto failure; \
4764 } \
4765 info->reg.regno = reg->number; \
4766 info->qualifier = qualifier; \
4767 } while (0)
4768
4769 #define po_imm_nc_or_fail() do { \
4770 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4771 goto failure; \
4772 } while (0)
4773
4774 #define po_imm_or_fail(min, max) do { \
4775 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4776 goto failure; \
4777 if (val < min || val > max) \
4778 { \
4779 set_fatal_syntax_error (_("immediate value out of range "\
4780 #min " to "#max)); \
4781 goto failure; \
4782 } \
4783 } while (0)
4784
4785 #define po_enum_or_fail(array) do { \
4786 if (!parse_enum_string (&str, &val, array, \
4787 ARRAY_SIZE (array), imm_reg_type)) \
4788 goto failure; \
4789 } while (0)
4790
4791 #define po_misc_or_fail(expr) do { \
4792 if (!expr) \
4793 goto failure; \
4794 } while (0)
4795 \f
4796 /* encode the 12-bit imm field of Add/sub immediate */
4797 static inline uint32_t
4798 encode_addsub_imm (uint32_t imm)
4799 {
4800 return imm << 10;
4801 }
4802
4803 /* encode the shift amount field of Add/sub immediate */
4804 static inline uint32_t
4805 encode_addsub_imm_shift_amount (uint32_t cnt)
4806 {
4807 return cnt << 22;
4808 }
4809
4810
4811 /* encode the imm field of Adr instruction */
4812 static inline uint32_t
4813 encode_adr_imm (uint32_t imm)
4814 {
4815 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4816 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4817 }
4818
4819 /* encode the immediate field of Move wide immediate */
4820 static inline uint32_t
4821 encode_movw_imm (uint32_t imm)
4822 {
4823 return imm << 5;
4824 }
4825
4826 /* encode the 26-bit offset of unconditional branch */
4827 static inline uint32_t
4828 encode_branch_ofs_26 (uint32_t ofs)
4829 {
4830 return ofs & ((1 << 26) - 1);
4831 }
4832
4833 /* encode the 19-bit offset of conditional branch and compare & branch */
4834 static inline uint32_t
4835 encode_cond_branch_ofs_19 (uint32_t ofs)
4836 {
4837 return (ofs & ((1 << 19) - 1)) << 5;
4838 }
4839
4840 /* encode the 19-bit offset of ld literal */
4841 static inline uint32_t
4842 encode_ld_lit_ofs_19 (uint32_t ofs)
4843 {
4844 return (ofs & ((1 << 19) - 1)) << 5;
4845 }
4846
4847 /* Encode the 14-bit offset of test & branch. */
4848 static inline uint32_t
4849 encode_tst_branch_ofs_14 (uint32_t ofs)
4850 {
4851 return (ofs & ((1 << 14) - 1)) << 5;
4852 }
4853
4854 /* Encode the 16-bit imm field of svc/hvc/smc. */
4855 static inline uint32_t
4856 encode_svc_imm (uint32_t imm)
4857 {
4858 return imm << 5;
4859 }
4860
4861 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4862 static inline uint32_t
4863 reencode_addsub_switch_add_sub (uint32_t opcode)
4864 {
4865 return opcode ^ (1 << 30);
4866 }
4867
4868 static inline uint32_t
4869 reencode_movzn_to_movz (uint32_t opcode)
4870 {
4871 return opcode | (1 << 30);
4872 }
4873
4874 static inline uint32_t
4875 reencode_movzn_to_movn (uint32_t opcode)
4876 {
4877 return opcode & ~(1 << 30);
4878 }
4879
4880 /* Overall per-instruction processing. */
4881
4882 /* We need to be able to fix up arbitrary expressions in some statements.
4883 This is so that we can handle symbols that are an arbitrary distance from
4884 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4885 which returns part of an address in a form which will be valid for
4886 a data instruction. We do this by pushing the expression into a symbol
4887 in the expr_section, and creating a fix for that. */
4888
4889 static fixS *
4890 fix_new_aarch64 (fragS * frag,
4891 int where,
4892 short int size,
4893 expressionS * exp,
4894 int pc_rel,
4895 int reloc)
4896 {
4897 fixS *new_fix;
4898
4899 switch (exp->X_op)
4900 {
4901 case O_constant:
4902 case O_symbol:
4903 case O_add:
4904 case O_subtract:
4905 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4906 break;
4907
4908 default:
4909 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4910 pc_rel, reloc);
4911 break;
4912 }
4913 return new_fix;
4914 }
4915 \f
4916 /* Diagnostics on operands errors. */
4917
4918 /* By default, output verbose error message.
4919 Disable the verbose error message by -mno-verbose-error. */
4920 static int verbose_error_p = 1;
4921
4922 #ifdef DEBUG_AARCH64
4923 /* N.B. this is only for the purpose of debugging. */
4924 const char* operand_mismatch_kind_names[] =
4925 {
4926 "AARCH64_OPDE_NIL",
4927 "AARCH64_OPDE_RECOVERABLE",
4928 "AARCH64_OPDE_SYNTAX_ERROR",
4929 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4930 "AARCH64_OPDE_INVALID_VARIANT",
4931 "AARCH64_OPDE_OUT_OF_RANGE",
4932 "AARCH64_OPDE_UNALIGNED",
4933 "AARCH64_OPDE_REG_LIST",
4934 "AARCH64_OPDE_OTHER_ERROR",
4935 };
4936 #endif /* DEBUG_AARCH64 */
4937
4938 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4939
4940 When multiple errors of different kinds are found in the same assembly
4941 line, only the error of the highest severity will be picked up for
4942 issuing the diagnostics. */
4943
4944 static inline bool
4945 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4946 enum aarch64_operand_error_kind rhs)
4947 {
4948 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4949 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4950 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4951 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4952 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4953 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4954 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4955 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4956 return lhs > rhs;
4957 }
4958
4959 /* Helper routine to get the mnemonic name from the assembly instruction
4960 line; should only be called for the diagnosis purpose, as there is
4961 string copy operation involved, which may affect the runtime
4962 performance if used in elsewhere. */
4963
4964 static const char*
4965 get_mnemonic_name (const char *str)
4966 {
4967 static char mnemonic[32];
4968 char *ptr;
4969
4970 /* Get the first 15 bytes and assume that the full name is included. */
4971 strncpy (mnemonic, str, 31);
4972 mnemonic[31] = '\0';
4973
4974 /* Scan up to the end of the mnemonic, which must end in white space,
4975 '.', or end of string. */
4976 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4977 ;
4978
4979 *ptr = '\0';
4980
4981 /* Append '...' to the truncated long name. */
4982 if (ptr - mnemonic == 31)
4983 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4984
4985 return mnemonic;
4986 }
4987
4988 static void
4989 reset_aarch64_instruction (aarch64_instruction *instruction)
4990 {
4991 memset (instruction, '\0', sizeof (aarch64_instruction));
4992 instruction->reloc.type = BFD_RELOC_UNUSED;
4993 }
4994
4995 /* Data structures storing one user error in the assembly code related to
4996 operands. */
4997
4998 struct operand_error_record
4999 {
5000 const aarch64_opcode *opcode;
5001 aarch64_operand_error detail;
5002 struct operand_error_record *next;
5003 };
5004
5005 typedef struct operand_error_record operand_error_record;
5006
5007 struct operand_errors
5008 {
5009 operand_error_record *head;
5010 operand_error_record *tail;
5011 };
5012
5013 typedef struct operand_errors operand_errors;
5014
5015 /* Top-level data structure reporting user errors for the current line of
5016 the assembly code.
5017 The way md_assemble works is that all opcodes sharing the same mnemonic
5018 name are iterated to find a match to the assembly line. In this data
5019 structure, each of the such opcodes will have one operand_error_record
5020 allocated and inserted. In other words, excessive errors related with
5021 a single opcode are disregarded. */
5022 operand_errors operand_error_report;
5023
5024 /* Free record nodes. */
5025 static operand_error_record *free_opnd_error_record_nodes = NULL;
5026
5027 /* Initialize the data structure that stores the operand mismatch
5028 information on assembling one line of the assembly code. */
5029 static void
5030 init_operand_error_report (void)
5031 {
5032 if (operand_error_report.head != NULL)
5033 {
5034 gas_assert (operand_error_report.tail != NULL);
5035 operand_error_report.tail->next = free_opnd_error_record_nodes;
5036 free_opnd_error_record_nodes = operand_error_report.head;
5037 operand_error_report.head = NULL;
5038 operand_error_report.tail = NULL;
5039 return;
5040 }
5041 gas_assert (operand_error_report.tail == NULL);
5042 }
5043
5044 /* Return TRUE if some operand error has been recorded during the
5045 parsing of the current assembly line using the opcode *OPCODE;
5046 otherwise return FALSE. */
5047 static inline bool
5048 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5049 {
5050 operand_error_record *record = operand_error_report.head;
5051 return record && record->opcode == opcode;
5052 }
5053
5054 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5055 OPCODE field is initialized with OPCODE.
5056 N.B. only one record for each opcode, i.e. the maximum of one error is
5057 recorded for each instruction template. */
5058
5059 static void
5060 add_operand_error_record (const operand_error_record* new_record)
5061 {
5062 const aarch64_opcode *opcode = new_record->opcode;
5063 operand_error_record* record = operand_error_report.head;
5064
5065 /* The record may have been created for this opcode. If not, we need
5066 to prepare one. */
5067 if (! opcode_has_operand_error_p (opcode))
5068 {
5069 /* Get one empty record. */
5070 if (free_opnd_error_record_nodes == NULL)
5071 {
5072 record = XNEW (operand_error_record);
5073 }
5074 else
5075 {
5076 record = free_opnd_error_record_nodes;
5077 free_opnd_error_record_nodes = record->next;
5078 }
5079 record->opcode = opcode;
5080 /* Insert at the head. */
5081 record->next = operand_error_report.head;
5082 operand_error_report.head = record;
5083 if (operand_error_report.tail == NULL)
5084 operand_error_report.tail = record;
5085 }
5086 else if (record->detail.kind != AARCH64_OPDE_NIL
5087 && record->detail.index <= new_record->detail.index
5088 && operand_error_higher_severity_p (record->detail.kind,
5089 new_record->detail.kind))
5090 {
5091 /* In the case of multiple errors found on operands related with a
5092 single opcode, only record the error of the leftmost operand and
5093 only if the error is of higher severity. */
5094 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5095 " the existing error %s on operand %d",
5096 operand_mismatch_kind_names[new_record->detail.kind],
5097 new_record->detail.index,
5098 operand_mismatch_kind_names[record->detail.kind],
5099 record->detail.index);
5100 return;
5101 }
5102
5103 record->detail = new_record->detail;
5104 }
5105
5106 static inline void
5107 record_operand_error_info (const aarch64_opcode *opcode,
5108 aarch64_operand_error *error_info)
5109 {
5110 operand_error_record record;
5111 record.opcode = opcode;
5112 record.detail = *error_info;
5113 add_operand_error_record (&record);
5114 }
5115
5116 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5117 error message *ERROR, for operand IDX (count from 0). */
5118
5119 static void
5120 record_operand_error (const aarch64_opcode *opcode, int idx,
5121 enum aarch64_operand_error_kind kind,
5122 const char* error)
5123 {
5124 aarch64_operand_error info;
5125 memset(&info, 0, sizeof (info));
5126 info.index = idx;
5127 info.kind = kind;
5128 info.error = error;
5129 info.non_fatal = false;
5130 record_operand_error_info (opcode, &info);
5131 }
5132
5133 static void
5134 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5135 enum aarch64_operand_error_kind kind,
5136 const char* error, const int *extra_data)
5137 {
5138 aarch64_operand_error info;
5139 info.index = idx;
5140 info.kind = kind;
5141 info.error = error;
5142 info.data[0] = extra_data[0];
5143 info.data[1] = extra_data[1];
5144 info.data[2] = extra_data[2];
5145 info.non_fatal = false;
5146 record_operand_error_info (opcode, &info);
5147 }
5148
5149 static void
5150 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5151 const char* error, int lower_bound,
5152 int upper_bound)
5153 {
5154 int data[3] = {lower_bound, upper_bound, 0};
5155 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5156 error, data);
5157 }
5158
5159 /* Remove the operand error record for *OPCODE. */
5160 static void ATTRIBUTE_UNUSED
5161 remove_operand_error_record (const aarch64_opcode *opcode)
5162 {
5163 if (opcode_has_operand_error_p (opcode))
5164 {
5165 operand_error_record* record = operand_error_report.head;
5166 gas_assert (record != NULL && operand_error_report.tail != NULL);
5167 operand_error_report.head = record->next;
5168 record->next = free_opnd_error_record_nodes;
5169 free_opnd_error_record_nodes = record;
5170 if (operand_error_report.head == NULL)
5171 {
5172 gas_assert (operand_error_report.tail == record);
5173 operand_error_report.tail = NULL;
5174 }
5175 }
5176 }
5177
5178 /* Given the instruction in *INSTR, return the index of the best matched
5179 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5180
5181 Return -1 if there is no qualifier sequence; return the first match
5182 if there is multiple matches found. */
5183
5184 static int
5185 find_best_match (const aarch64_inst *instr,
5186 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5187 {
5188 int i, num_opnds, max_num_matched, idx;
5189
5190 num_opnds = aarch64_num_of_operands (instr->opcode);
5191 if (num_opnds == 0)
5192 {
5193 DEBUG_TRACE ("no operand");
5194 return -1;
5195 }
5196
5197 max_num_matched = 0;
5198 idx = 0;
5199
5200 /* For each pattern. */
5201 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5202 {
5203 int j, num_matched;
5204 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5205
5206 /* Most opcodes has much fewer patterns in the list. */
5207 if (empty_qualifier_sequence_p (qualifiers))
5208 {
5209 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5210 break;
5211 }
5212
5213 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5214 if (*qualifiers == instr->operands[j].qualifier)
5215 ++num_matched;
5216
5217 if (num_matched > max_num_matched)
5218 {
5219 max_num_matched = num_matched;
5220 idx = i;
5221 }
5222 }
5223
5224 DEBUG_TRACE ("return with %d", idx);
5225 return idx;
5226 }
5227
5228 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5229 corresponding operands in *INSTR. */
5230
5231 static inline void
5232 assign_qualifier_sequence (aarch64_inst *instr,
5233 const aarch64_opnd_qualifier_t *qualifiers)
5234 {
5235 int i = 0;
5236 int num_opnds = aarch64_num_of_operands (instr->opcode);
5237 gas_assert (num_opnds);
5238 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5239 instr->operands[i].qualifier = *qualifiers;
5240 }
5241
5242 /* Print operands for the diagnosis purpose. */
5243
5244 static void
5245 print_operands (char *buf, const aarch64_opcode *opcode,
5246 const aarch64_opnd_info *opnds)
5247 {
5248 int i;
5249
5250 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5251 {
5252 char str[128];
5253
5254 /* We regard the opcode operand info more, however we also look into
5255 the inst->operands to support the disassembling of the optional
5256 operand.
5257 The two operand code should be the same in all cases, apart from
5258 when the operand can be optional. */
5259 if (opcode->operands[i] == AARCH64_OPND_NIL
5260 || opnds[i].type == AARCH64_OPND_NIL)
5261 break;
5262
5263 /* Generate the operand string in STR. */
5264 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5265 NULL, cpu_variant);
5266
5267 /* Delimiter. */
5268 if (str[0] != '\0')
5269 strcat (buf, i == 0 ? " " : ", ");
5270
5271 /* Append the operand string. */
5272 strcat (buf, str);
5273 }
5274 }
5275
5276 /* Send to stderr a string as information. */
5277
5278 static void
5279 output_info (const char *format, ...)
5280 {
5281 const char *file;
5282 unsigned int line;
5283 va_list args;
5284
5285 file = as_where (&line);
5286 if (file)
5287 {
5288 if (line != 0)
5289 fprintf (stderr, "%s:%u: ", file, line);
5290 else
5291 fprintf (stderr, "%s: ", file);
5292 }
5293 fprintf (stderr, _("Info: "));
5294 va_start (args, format);
5295 vfprintf (stderr, format, args);
5296 va_end (args);
5297 (void) putc ('\n', stderr);
5298 }
5299
5300 /* Output one operand error record. */
5301
5302 static void
5303 output_operand_error_record (const operand_error_record *record, char *str)
5304 {
5305 const aarch64_operand_error *detail = &record->detail;
5306 int idx = detail->index;
5307 const aarch64_opcode *opcode = record->opcode;
5308 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5309 : AARCH64_OPND_NIL);
5310
5311 typedef void (*handler_t)(const char *format, ...);
5312 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5313
5314 switch (detail->kind)
5315 {
5316 case AARCH64_OPDE_NIL:
5317 gas_assert (0);
5318 break;
5319 case AARCH64_OPDE_SYNTAX_ERROR:
5320 case AARCH64_OPDE_RECOVERABLE:
5321 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5322 case AARCH64_OPDE_OTHER_ERROR:
5323 /* Use the prepared error message if there is, otherwise use the
5324 operand description string to describe the error. */
5325 if (detail->error != NULL)
5326 {
5327 if (idx < 0)
5328 handler (_("%s -- `%s'"), detail->error, str);
5329 else
5330 handler (_("%s at operand %d -- `%s'"),
5331 detail->error, idx + 1, str);
5332 }
5333 else
5334 {
5335 gas_assert (idx >= 0);
5336 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5337 aarch64_get_operand_desc (opd_code), str);
5338 }
5339 break;
5340
5341 case AARCH64_OPDE_INVALID_VARIANT:
5342 handler (_("operand mismatch -- `%s'"), str);
5343 if (verbose_error_p)
5344 {
5345 /* We will try to correct the erroneous instruction and also provide
5346 more information e.g. all other valid variants.
5347
5348 The string representation of the corrected instruction and other
5349 valid variants are generated by
5350
5351 1) obtaining the intermediate representation of the erroneous
5352 instruction;
5353 2) manipulating the IR, e.g. replacing the operand qualifier;
5354 3) printing out the instruction by calling the printer functions
5355 shared with the disassembler.
5356
5357 The limitation of this method is that the exact input assembly
5358 line cannot be accurately reproduced in some cases, for example an
5359 optional operand present in the actual assembly line will be
5360 omitted in the output; likewise for the optional syntax rules,
5361 e.g. the # before the immediate. Another limitation is that the
5362 assembly symbols and relocation operations in the assembly line
5363 currently cannot be printed out in the error report. Last but not
5364 least, when there is other error(s) co-exist with this error, the
5365 'corrected' instruction may be still incorrect, e.g. given
5366 'ldnp h0,h1,[x0,#6]!'
5367 this diagnosis will provide the version:
5368 'ldnp s0,s1,[x0,#6]!'
5369 which is still not right. */
5370 size_t len = strlen (get_mnemonic_name (str));
5371 int i, qlf_idx;
5372 bool result;
5373 char buf[2048];
5374 aarch64_inst *inst_base = &inst.base;
5375 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5376
5377 /* Init inst. */
5378 reset_aarch64_instruction (&inst);
5379 inst_base->opcode = opcode;
5380
5381 /* Reset the error report so that there is no side effect on the
5382 following operand parsing. */
5383 init_operand_error_report ();
5384
5385 /* Fill inst. */
5386 result = parse_operands (str + len, opcode)
5387 && programmer_friendly_fixup (&inst);
5388 gas_assert (result);
5389 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5390 NULL, NULL, insn_sequence);
5391 gas_assert (!result);
5392
5393 /* Find the most matched qualifier sequence. */
5394 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5395 gas_assert (qlf_idx > -1);
5396
5397 /* Assign the qualifiers. */
5398 assign_qualifier_sequence (inst_base,
5399 opcode->qualifiers_list[qlf_idx]);
5400
5401 /* Print the hint. */
5402 output_info (_(" did you mean this?"));
5403 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5404 print_operands (buf, opcode, inst_base->operands);
5405 output_info (_(" %s"), buf);
5406
5407 /* Print out other variant(s) if there is any. */
5408 if (qlf_idx != 0 ||
5409 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5410 output_info (_(" other valid variant(s):"));
5411
5412 /* For each pattern. */
5413 qualifiers_list = opcode->qualifiers_list;
5414 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5415 {
5416 /* Most opcodes has much fewer patterns in the list.
5417 First NIL qualifier indicates the end in the list. */
5418 if (empty_qualifier_sequence_p (*qualifiers_list))
5419 break;
5420
5421 if (i != qlf_idx)
5422 {
5423 /* Mnemonics name. */
5424 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5425
5426 /* Assign the qualifiers. */
5427 assign_qualifier_sequence (inst_base, *qualifiers_list);
5428
5429 /* Print instruction. */
5430 print_operands (buf, opcode, inst_base->operands);
5431
5432 output_info (_(" %s"), buf);
5433 }
5434 }
5435 }
5436 break;
5437
5438 case AARCH64_OPDE_UNTIED_IMMS:
5439 handler (_("operand %d must have the same immediate value "
5440 "as operand 1 -- `%s'"),
5441 detail->index + 1, str);
5442 break;
5443
5444 case AARCH64_OPDE_UNTIED_OPERAND:
5445 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5446 detail->index + 1, str);
5447 break;
5448
5449 case AARCH64_OPDE_OUT_OF_RANGE:
5450 if (detail->data[0] != detail->data[1])
5451 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5452 detail->error ? detail->error : _("immediate value"),
5453 detail->data[0], detail->data[1], idx + 1, str);
5454 else
5455 handler (_("%s must be %d at operand %d -- `%s'"),
5456 detail->error ? detail->error : _("immediate value"),
5457 detail->data[0], idx + 1, str);
5458 break;
5459
5460 case AARCH64_OPDE_REG_LIST:
5461 if (detail->data[0] == 1)
5462 handler (_("invalid number of registers in the list; "
5463 "only 1 register is expected at operand %d -- `%s'"),
5464 idx + 1, str);
5465 else
5466 handler (_("invalid number of registers in the list; "
5467 "%d registers are expected at operand %d -- `%s'"),
5468 detail->data[0], idx + 1, str);
5469 break;
5470
5471 case AARCH64_OPDE_UNALIGNED:
5472 handler (_("immediate value must be a multiple of "
5473 "%d at operand %d -- `%s'"),
5474 detail->data[0], idx + 1, str);
5475 break;
5476
5477 default:
5478 gas_assert (0);
5479 break;
5480 }
5481 }
5482
5483 /* Process and output the error message about the operand mismatching.
5484
5485 When this function is called, the operand error information had
5486 been collected for an assembly line and there will be multiple
5487 errors in the case of multiple instruction templates; output the
5488 error message that most closely describes the problem.
5489
5490 The errors to be printed can be filtered on printing all errors
5491 or only non-fatal errors. This distinction has to be made because
5492 the error buffer may already be filled with fatal errors we don't want to
5493 print due to the different instruction templates. */
5494
5495 static void
5496 output_operand_error_report (char *str, bool non_fatal_only)
5497 {
5498 int largest_error_pos;
5499 const char *msg = NULL;
5500 enum aarch64_operand_error_kind kind;
5501 operand_error_record *curr;
5502 operand_error_record *head = operand_error_report.head;
5503 operand_error_record *record = NULL;
5504
5505 /* No error to report. */
5506 if (head == NULL)
5507 return;
5508
5509 gas_assert (head != NULL && operand_error_report.tail != NULL);
5510
5511 /* Only one error. */
5512 if (head == operand_error_report.tail)
5513 {
5514 /* If the only error is a non-fatal one and we don't want to print it,
5515 just exit. */
5516 if (!non_fatal_only || head->detail.non_fatal)
5517 {
5518 DEBUG_TRACE ("single opcode entry with error kind: %s",
5519 operand_mismatch_kind_names[head->detail.kind]);
5520 output_operand_error_record (head, str);
5521 }
5522 return;
5523 }
5524
5525 /* Find the error kind of the highest severity. */
5526 DEBUG_TRACE ("multiple opcode entries with error kind");
5527 kind = AARCH64_OPDE_NIL;
5528 for (curr = head; curr != NULL; curr = curr->next)
5529 {
5530 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5531 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5532 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5533 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5534 kind = curr->detail.kind;
5535 }
5536
5537 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5538
5539 /* Pick up one of errors of KIND to report. */
5540 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5541 for (curr = head; curr != NULL; curr = curr->next)
5542 {
5543 /* If we don't want to print non-fatal errors then don't consider them
5544 at all. */
5545 if (curr->detail.kind != kind
5546 || (non_fatal_only && !curr->detail.non_fatal))
5547 continue;
5548 /* If there are multiple errors, pick up the one with the highest
5549 mismatching operand index. In the case of multiple errors with
5550 the equally highest operand index, pick up the first one or the
5551 first one with non-NULL error message. */
5552 if (curr->detail.index > largest_error_pos
5553 || (curr->detail.index == largest_error_pos && msg == NULL
5554 && curr->detail.error != NULL))
5555 {
5556 largest_error_pos = curr->detail.index;
5557 record = curr;
5558 msg = record->detail.error;
5559 }
5560 }
5561
5562 /* The way errors are collected in the back-end is a bit non-intuitive. But
5563 essentially, because each operand template is tried recursively you may
5564 always have errors collected from the previous tried OPND. These are
5565 usually skipped if there is one successful match. However now with the
5566 non-fatal errors we have to ignore those previously collected hard errors
5567 when we're only interested in printing the non-fatal ones. This condition
5568 prevents us from printing errors that are not appropriate, since we did
5569 match a condition, but it also has warnings that it wants to print. */
5570 if (non_fatal_only && !record)
5571 return;
5572
5573 gas_assert (largest_error_pos != -2 && record != NULL);
5574 DEBUG_TRACE ("Pick up error kind %s to report",
5575 operand_mismatch_kind_names[record->detail.kind]);
5576
5577 /* Output. */
5578 output_operand_error_record (record, str);
5579 }
5580 \f
5581 /* Write an AARCH64 instruction to buf - always little-endian. */
5582 static void
5583 put_aarch64_insn (char *buf, uint32_t insn)
5584 {
5585 unsigned char *where = (unsigned char *) buf;
5586 where[0] = insn;
5587 where[1] = insn >> 8;
5588 where[2] = insn >> 16;
5589 where[3] = insn >> 24;
5590 }
5591
5592 static uint32_t
5593 get_aarch64_insn (char *buf)
5594 {
5595 unsigned char *where = (unsigned char *) buf;
5596 uint32_t result;
5597 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5598 | ((uint32_t) where[3] << 24)));
5599 return result;
5600 }
5601
5602 static void
5603 output_inst (struct aarch64_inst *new_inst)
5604 {
5605 char *to = NULL;
5606
5607 to = frag_more (INSN_SIZE);
5608
5609 frag_now->tc_frag_data.recorded = 1;
5610
5611 put_aarch64_insn (to, inst.base.value);
5612
5613 if (inst.reloc.type != BFD_RELOC_UNUSED)
5614 {
5615 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5616 INSN_SIZE, &inst.reloc.exp,
5617 inst.reloc.pc_rel,
5618 inst.reloc.type);
5619 DEBUG_TRACE ("Prepared relocation fix up");
5620 /* Don't check the addend value against the instruction size,
5621 that's the job of our code in md_apply_fix(). */
5622 fixp->fx_no_overflow = 1;
5623 if (new_inst != NULL)
5624 fixp->tc_fix_data.inst = new_inst;
5625 if (aarch64_gas_internal_fixup_p ())
5626 {
5627 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5628 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5629 fixp->fx_addnumber = inst.reloc.flags;
5630 }
5631 }
5632
5633 dwarf2_emit_insn (INSN_SIZE);
5634 }
5635
5636 /* Link together opcodes of the same name. */
5637
5638 struct templates
5639 {
5640 const aarch64_opcode *opcode;
5641 struct templates *next;
5642 };
5643
5644 typedef struct templates templates;
5645
5646 static templates *
5647 lookup_mnemonic (const char *start, int len)
5648 {
5649 templates *templ = NULL;
5650
5651 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5652 return templ;
5653 }
5654
5655 /* Subroutine of md_assemble, responsible for looking up the primary
5656 opcode from the mnemonic the user wrote. STR points to the
5657 beginning of the mnemonic. */
5658
5659 static templates *
5660 opcode_lookup (char **str)
5661 {
5662 char *end, *base, *dot;
5663 const aarch64_cond *cond;
5664 char condname[16];
5665 int len;
5666
5667 /* Scan up to the end of the mnemonic, which must end in white space,
5668 '.', or end of string. */
5669 dot = 0;
5670 for (base = end = *str; is_part_of_name(*end); end++)
5671 if (*end == '.' && !dot)
5672 dot = end;
5673
5674 if (end == base || dot == base)
5675 return 0;
5676
5677 inst.cond = COND_ALWAYS;
5678
5679 /* Handle a possible condition. */
5680 if (dot)
5681 {
5682 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5683 if (cond)
5684 {
5685 inst.cond = cond->value;
5686 *str = end;
5687 }
5688 else
5689 {
5690 *str = dot;
5691 return 0;
5692 }
5693 len = dot - base;
5694 }
5695 else
5696 {
5697 *str = end;
5698 len = end - base;
5699 }
5700
5701 if (inst.cond == COND_ALWAYS)
5702 {
5703 /* Look for unaffixed mnemonic. */
5704 return lookup_mnemonic (base, len);
5705 }
5706 else if (len <= 13)
5707 {
5708 /* append ".c" to mnemonic if conditional */
5709 memcpy (condname, base, len);
5710 memcpy (condname + len, ".c", 2);
5711 base = condname;
5712 len += 2;
5713 return lookup_mnemonic (base, len);
5714 }
5715
5716 return NULL;
5717 }
5718
5719 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5720 to a corresponding operand qualifier. */
5721
5722 static inline aarch64_opnd_qualifier_t
5723 vectype_to_qualifier (const struct vector_type_el *vectype)
5724 {
5725 /* Element size in bytes indexed by vector_el_type. */
5726 const unsigned char ele_size[5]
5727 = {1, 2, 4, 8, 16};
5728 const unsigned int ele_base [5] =
5729 {
5730 AARCH64_OPND_QLF_V_4B,
5731 AARCH64_OPND_QLF_V_2H,
5732 AARCH64_OPND_QLF_V_2S,
5733 AARCH64_OPND_QLF_V_1D,
5734 AARCH64_OPND_QLF_V_1Q
5735 };
5736
5737 if (!vectype->defined || vectype->type == NT_invtype)
5738 goto vectype_conversion_fail;
5739
5740 if (vectype->type == NT_zero)
5741 return AARCH64_OPND_QLF_P_Z;
5742 if (vectype->type == NT_merge)
5743 return AARCH64_OPND_QLF_P_M;
5744
5745 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5746
5747 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5748 {
5749 /* Special case S_4B. */
5750 if (vectype->type == NT_b && vectype->width == 4)
5751 return AARCH64_OPND_QLF_S_4B;
5752
5753 /* Special case S_2H. */
5754 if (vectype->type == NT_h && vectype->width == 2)
5755 return AARCH64_OPND_QLF_S_2H;
5756
5757 /* Vector element register. */
5758 return AARCH64_OPND_QLF_S_B + vectype->type;
5759 }
5760 else
5761 {
5762 /* Vector register. */
5763 int reg_size = ele_size[vectype->type] * vectype->width;
5764 unsigned offset;
5765 unsigned shift;
5766 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5767 goto vectype_conversion_fail;
5768
5769 /* The conversion is by calculating the offset from the base operand
5770 qualifier for the vector type. The operand qualifiers are regular
5771 enough that the offset can established by shifting the vector width by
5772 a vector-type dependent amount. */
5773 shift = 0;
5774 if (vectype->type == NT_b)
5775 shift = 3;
5776 else if (vectype->type == NT_h || vectype->type == NT_s)
5777 shift = 2;
5778 else if (vectype->type >= NT_d)
5779 shift = 1;
5780 else
5781 gas_assert (0);
5782
5783 offset = ele_base [vectype->type] + (vectype->width >> shift);
5784 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5785 && offset <= AARCH64_OPND_QLF_V_1Q);
5786 return offset;
5787 }
5788
5789 vectype_conversion_fail:
5790 first_error (_("bad vector arrangement type"));
5791 return AARCH64_OPND_QLF_NIL;
5792 }
5793
5794 /* Process an optional operand that is found omitted from the assembly line.
5795 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5796 instruction's opcode entry while IDX is the index of this omitted operand.
5797 */
5798
5799 static void
5800 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5801 int idx, aarch64_opnd_info *operand)
5802 {
5803 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5804 gas_assert (optional_operand_p (opcode, idx));
5805 gas_assert (!operand->present);
5806
5807 switch (type)
5808 {
5809 case AARCH64_OPND_Rd:
5810 case AARCH64_OPND_Rn:
5811 case AARCH64_OPND_Rm:
5812 case AARCH64_OPND_Rt:
5813 case AARCH64_OPND_Rt2:
5814 case AARCH64_OPND_Rt_LS64:
5815 case AARCH64_OPND_Rt_SP:
5816 case AARCH64_OPND_Rs:
5817 case AARCH64_OPND_Ra:
5818 case AARCH64_OPND_Rt_SYS:
5819 case AARCH64_OPND_Rd_SP:
5820 case AARCH64_OPND_Rn_SP:
5821 case AARCH64_OPND_Rm_SP:
5822 case AARCH64_OPND_Fd:
5823 case AARCH64_OPND_Fn:
5824 case AARCH64_OPND_Fm:
5825 case AARCH64_OPND_Fa:
5826 case AARCH64_OPND_Ft:
5827 case AARCH64_OPND_Ft2:
5828 case AARCH64_OPND_Sd:
5829 case AARCH64_OPND_Sn:
5830 case AARCH64_OPND_Sm:
5831 case AARCH64_OPND_Va:
5832 case AARCH64_OPND_Vd:
5833 case AARCH64_OPND_Vn:
5834 case AARCH64_OPND_Vm:
5835 case AARCH64_OPND_VdD1:
5836 case AARCH64_OPND_VnD1:
5837 operand->reg.regno = default_value;
5838 break;
5839
5840 case AARCH64_OPND_Ed:
5841 case AARCH64_OPND_En:
5842 case AARCH64_OPND_Em:
5843 case AARCH64_OPND_Em16:
5844 case AARCH64_OPND_SM3_IMM2:
5845 operand->reglane.regno = default_value;
5846 break;
5847
5848 case AARCH64_OPND_IDX:
5849 case AARCH64_OPND_BIT_NUM:
5850 case AARCH64_OPND_IMMR:
5851 case AARCH64_OPND_IMMS:
5852 case AARCH64_OPND_SHLL_IMM:
5853 case AARCH64_OPND_IMM_VLSL:
5854 case AARCH64_OPND_IMM_VLSR:
5855 case AARCH64_OPND_CCMP_IMM:
5856 case AARCH64_OPND_FBITS:
5857 case AARCH64_OPND_UIMM4:
5858 case AARCH64_OPND_UIMM3_OP1:
5859 case AARCH64_OPND_UIMM3_OP2:
5860 case AARCH64_OPND_IMM:
5861 case AARCH64_OPND_IMM_2:
5862 case AARCH64_OPND_WIDTH:
5863 case AARCH64_OPND_UIMM7:
5864 case AARCH64_OPND_NZCV:
5865 case AARCH64_OPND_SVE_PATTERN:
5866 case AARCH64_OPND_SVE_PRFOP:
5867 operand->imm.value = default_value;
5868 break;
5869
5870 case AARCH64_OPND_SVE_PATTERN_SCALED:
5871 operand->imm.value = default_value;
5872 operand->shifter.kind = AARCH64_MOD_MUL;
5873 operand->shifter.amount = 1;
5874 break;
5875
5876 case AARCH64_OPND_EXCEPTION:
5877 inst.reloc.type = BFD_RELOC_UNUSED;
5878 break;
5879
5880 case AARCH64_OPND_BARRIER_ISB:
5881 operand->barrier = aarch64_barrier_options + default_value;
5882 break;
5883
5884 case AARCH64_OPND_BTI_TARGET:
5885 operand->hint_option = aarch64_hint_options + default_value;
5886 break;
5887
5888 default:
5889 break;
5890 }
5891 }
5892
5893 /* Process the relocation type for move wide instructions.
5894 Return TRUE on success; otherwise return FALSE. */
5895
5896 static bool
5897 process_movw_reloc_info (void)
5898 {
5899 int is32;
5900 unsigned shift;
5901
5902 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5903
5904 if (inst.base.opcode->op == OP_MOVK)
5905 switch (inst.reloc.type)
5906 {
5907 case BFD_RELOC_AARCH64_MOVW_G0_S:
5908 case BFD_RELOC_AARCH64_MOVW_G1_S:
5909 case BFD_RELOC_AARCH64_MOVW_G2_S:
5910 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5911 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5912 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5913 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5914 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5915 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5916 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5917 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5918 set_syntax_error
5919 (_("the specified relocation type is not allowed for MOVK"));
5920 return false;
5921 default:
5922 break;
5923 }
5924
5925 switch (inst.reloc.type)
5926 {
5927 case BFD_RELOC_AARCH64_MOVW_G0:
5928 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5929 case BFD_RELOC_AARCH64_MOVW_G0_S:
5930 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5931 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5932 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5933 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5934 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5935 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5936 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5937 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5938 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5939 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5940 shift = 0;
5941 break;
5942 case BFD_RELOC_AARCH64_MOVW_G1:
5943 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5944 case BFD_RELOC_AARCH64_MOVW_G1_S:
5945 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5946 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5947 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5948 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5949 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5950 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5951 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5952 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5953 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5954 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5955 shift = 16;
5956 break;
5957 case BFD_RELOC_AARCH64_MOVW_G2:
5958 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5959 case BFD_RELOC_AARCH64_MOVW_G2_S:
5960 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5961 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5962 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5963 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5964 if (is32)
5965 {
5966 set_fatal_syntax_error
5967 (_("the specified relocation type is not allowed for 32-bit "
5968 "register"));
5969 return false;
5970 }
5971 shift = 32;
5972 break;
5973 case BFD_RELOC_AARCH64_MOVW_G3:
5974 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5975 if (is32)
5976 {
5977 set_fatal_syntax_error
5978 (_("the specified relocation type is not allowed for 32-bit "
5979 "register"));
5980 return false;
5981 }
5982 shift = 48;
5983 break;
5984 default:
5985 /* More cases should be added when more MOVW-related relocation types
5986 are supported in GAS. */
5987 gas_assert (aarch64_gas_internal_fixup_p ());
5988 /* The shift amount should have already been set by the parser. */
5989 return true;
5990 }
5991 inst.base.operands[1].shifter.amount = shift;
5992 return true;
5993 }
5994
5995 /* A primitive log calculator. */
5996
5997 static inline unsigned int
5998 get_logsz (unsigned int size)
5999 {
6000 const unsigned char ls[16] =
6001 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6002 if (size > 16)
6003 {
6004 gas_assert (0);
6005 return -1;
6006 }
6007 gas_assert (ls[size - 1] != (unsigned char)-1);
6008 return ls[size - 1];
6009 }
6010
6011 /* Determine and return the real reloc type code for an instruction
6012 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6013
6014 static inline bfd_reloc_code_real_type
6015 ldst_lo12_determine_real_reloc_type (void)
6016 {
6017 unsigned logsz, max_logsz;
6018 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6019 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6020
6021 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6022 {
6023 BFD_RELOC_AARCH64_LDST8_LO12,
6024 BFD_RELOC_AARCH64_LDST16_LO12,
6025 BFD_RELOC_AARCH64_LDST32_LO12,
6026 BFD_RELOC_AARCH64_LDST64_LO12,
6027 BFD_RELOC_AARCH64_LDST128_LO12
6028 },
6029 {
6030 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6031 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6032 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6033 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6034 BFD_RELOC_AARCH64_NONE
6035 },
6036 {
6037 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6038 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6039 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6040 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6041 BFD_RELOC_AARCH64_NONE
6042 },
6043 {
6044 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6045 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6046 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6047 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6048 BFD_RELOC_AARCH64_NONE
6049 },
6050 {
6051 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6052 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6053 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6054 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6055 BFD_RELOC_AARCH64_NONE
6056 }
6057 };
6058
6059 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6060 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6061 || (inst.reloc.type
6062 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6063 || (inst.reloc.type
6064 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6065 || (inst.reloc.type
6066 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6067 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6068
6069 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6070 opd1_qlf =
6071 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6072 1, opd0_qlf, 0);
6073 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6074
6075 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6076
6077 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6078 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6079 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6080 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6081 max_logsz = 3;
6082 else
6083 max_logsz = 4;
6084
6085 if (logsz > max_logsz)
6086 {
6087 /* SEE PR 27904 for an example of this. */
6088 set_fatal_syntax_error
6089 (_("relocation qualifier does not match instruction size"));
6090 return BFD_RELOC_AARCH64_NONE;
6091 }
6092
6093 /* In reloc.c, these pseudo relocation types should be defined in similar
6094 order as above reloc_ldst_lo12 array. Because the array index calculation
6095 below relies on this. */
6096 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6097 }
6098
6099 /* Check whether a register list REGINFO is valid. The registers must be
6100 numbered in increasing order (modulo 32), in increments of one or two.
6101
6102 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6103 increments of two.
6104
6105 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6106
6107 static bool
6108 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6109 {
6110 uint32_t i, nb_regs, prev_regno, incr;
6111
6112 nb_regs = 1 + (reginfo & 0x3);
6113 reginfo >>= 2;
6114 prev_regno = reginfo & 0x1f;
6115 incr = accept_alternate ? 2 : 1;
6116
6117 for (i = 1; i < nb_regs; ++i)
6118 {
6119 uint32_t curr_regno;
6120 reginfo >>= 5;
6121 curr_regno = reginfo & 0x1f;
6122 if (curr_regno != ((prev_regno + incr) & 0x1f))
6123 return false;
6124 prev_regno = curr_regno;
6125 }
6126
6127 return true;
6128 }
6129
6130 /* Generic instruction operand parser. This does no encoding and no
6131 semantic validation; it merely squirrels values away in the inst
6132 structure. Returns TRUE or FALSE depending on whether the
6133 specified grammar matched. */
6134
6135 static bool
6136 parse_operands (char *str, const aarch64_opcode *opcode)
6137 {
6138 int i;
6139 char *backtrack_pos = 0;
6140 const enum aarch64_opnd *operands = opcode->operands;
6141 aarch64_reg_type imm_reg_type;
6142
6143 clear_error ();
6144 skip_whitespace (str);
6145
6146 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6147 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6148 else
6149 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6150
6151 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6152 {
6153 int64_t val;
6154 const reg_entry *reg;
6155 int comma_skipped_p = 0;
6156 aarch64_reg_type rtype;
6157 struct vector_type_el vectype;
6158 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6159 aarch64_opnd_info *info = &inst.base.operands[i];
6160 aarch64_reg_type reg_type;
6161
6162 DEBUG_TRACE ("parse operand %d", i);
6163
6164 /* Assign the operand code. */
6165 info->type = operands[i];
6166
6167 if (optional_operand_p (opcode, i))
6168 {
6169 /* Remember where we are in case we need to backtrack. */
6170 gas_assert (!backtrack_pos);
6171 backtrack_pos = str;
6172 }
6173
6174 /* Expect comma between operands; the backtrack mechanism will take
6175 care of cases of omitted optional operand. */
6176 if (i > 0 && ! skip_past_char (&str, ','))
6177 {
6178 set_syntax_error (_("comma expected between operands"));
6179 goto failure;
6180 }
6181 else
6182 comma_skipped_p = 1;
6183
6184 switch (operands[i])
6185 {
6186 case AARCH64_OPND_Rd:
6187 case AARCH64_OPND_Rn:
6188 case AARCH64_OPND_Rm:
6189 case AARCH64_OPND_Rt:
6190 case AARCH64_OPND_Rt2:
6191 case AARCH64_OPND_Rs:
6192 case AARCH64_OPND_Ra:
6193 case AARCH64_OPND_Rt_LS64:
6194 case AARCH64_OPND_Rt_SYS:
6195 case AARCH64_OPND_PAIRREG:
6196 case AARCH64_OPND_SVE_Rm:
6197 po_int_reg_or_fail (REG_TYPE_R_Z);
6198
6199 /* In LS64 load/store instructions Rt register number must be even
6200 and <=22. */
6201 if (operands[i] == AARCH64_OPND_Rt_LS64)
6202 {
6203 /* We've already checked if this is valid register.
6204 This will check if register number (Rt) is not undefined for LS64
6205 instructions:
6206 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6207 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6208 {
6209 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6210 goto failure;
6211 }
6212 }
6213 break;
6214
6215 case AARCH64_OPND_Rd_SP:
6216 case AARCH64_OPND_Rn_SP:
6217 case AARCH64_OPND_Rt_SP:
6218 case AARCH64_OPND_SVE_Rn_SP:
6219 case AARCH64_OPND_Rm_SP:
6220 po_int_reg_or_fail (REG_TYPE_R_SP);
6221 break;
6222
6223 case AARCH64_OPND_Rm_EXT:
6224 case AARCH64_OPND_Rm_SFT:
6225 po_misc_or_fail (parse_shifter_operand
6226 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6227 ? SHIFTED_ARITH_IMM
6228 : SHIFTED_LOGIC_IMM)));
6229 if (!info->shifter.operator_present)
6230 {
6231 /* Default to LSL if not present. Libopcodes prefers shifter
6232 kind to be explicit. */
6233 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6234 info->shifter.kind = AARCH64_MOD_LSL;
6235 /* For Rm_EXT, libopcodes will carry out further check on whether
6236 or not stack pointer is used in the instruction (Recall that
6237 "the extend operator is not optional unless at least one of
6238 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6239 }
6240 break;
6241
6242 case AARCH64_OPND_Fd:
6243 case AARCH64_OPND_Fn:
6244 case AARCH64_OPND_Fm:
6245 case AARCH64_OPND_Fa:
6246 case AARCH64_OPND_Ft:
6247 case AARCH64_OPND_Ft2:
6248 case AARCH64_OPND_Sd:
6249 case AARCH64_OPND_Sn:
6250 case AARCH64_OPND_Sm:
6251 case AARCH64_OPND_SVE_VZn:
6252 case AARCH64_OPND_SVE_Vd:
6253 case AARCH64_OPND_SVE_Vm:
6254 case AARCH64_OPND_SVE_Vn:
6255 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6256 if (val == PARSE_FAIL)
6257 {
6258 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6259 goto failure;
6260 }
6261 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6262
6263 info->reg.regno = val;
6264 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6265 break;
6266
6267 case AARCH64_OPND_SVE_Pd:
6268 case AARCH64_OPND_SVE_Pg3:
6269 case AARCH64_OPND_SVE_Pg4_5:
6270 case AARCH64_OPND_SVE_Pg4_10:
6271 case AARCH64_OPND_SVE_Pg4_16:
6272 case AARCH64_OPND_SVE_Pm:
6273 case AARCH64_OPND_SVE_Pn:
6274 case AARCH64_OPND_SVE_Pt:
6275 case AARCH64_OPND_SME_Pm:
6276 reg_type = REG_TYPE_PN;
6277 goto vector_reg;
6278
6279 case AARCH64_OPND_SVE_Za_5:
6280 case AARCH64_OPND_SVE_Za_16:
6281 case AARCH64_OPND_SVE_Zd:
6282 case AARCH64_OPND_SVE_Zm_5:
6283 case AARCH64_OPND_SVE_Zm_16:
6284 case AARCH64_OPND_SVE_Zn:
6285 case AARCH64_OPND_SVE_Zt:
6286 reg_type = REG_TYPE_ZN;
6287 goto vector_reg;
6288
6289 case AARCH64_OPND_Va:
6290 case AARCH64_OPND_Vd:
6291 case AARCH64_OPND_Vn:
6292 case AARCH64_OPND_Vm:
6293 reg_type = REG_TYPE_VN;
6294 vector_reg:
6295 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6296 if (val == PARSE_FAIL)
6297 {
6298 first_error (_(get_reg_expected_msg (reg_type)));
6299 goto failure;
6300 }
6301 if (vectype.defined & NTA_HASINDEX)
6302 goto failure;
6303
6304 info->reg.regno = val;
6305 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6306 && vectype.type == NT_invtype)
6307 /* Unqualified Pn and Zn registers are allowed in certain
6308 contexts. Rely on F_STRICT qualifier checking to catch
6309 invalid uses. */
6310 info->qualifier = AARCH64_OPND_QLF_NIL;
6311 else
6312 {
6313 info->qualifier = vectype_to_qualifier (&vectype);
6314 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6315 goto failure;
6316 }
6317 break;
6318
6319 case AARCH64_OPND_VdD1:
6320 case AARCH64_OPND_VnD1:
6321 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6322 if (val == PARSE_FAIL)
6323 {
6324 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6325 goto failure;
6326 }
6327 if (vectype.type != NT_d || vectype.index != 1)
6328 {
6329 set_fatal_syntax_error
6330 (_("the top half of a 128-bit FP/SIMD register is expected"));
6331 goto failure;
6332 }
6333 info->reg.regno = val;
6334 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6335 here; it is correct for the purpose of encoding/decoding since
6336 only the register number is explicitly encoded in the related
6337 instructions, although this appears a bit hacky. */
6338 info->qualifier = AARCH64_OPND_QLF_S_D;
6339 break;
6340
6341 case AARCH64_OPND_SVE_Zm3_INDEX:
6342 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6343 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6344 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6345 case AARCH64_OPND_SVE_Zm4_INDEX:
6346 case AARCH64_OPND_SVE_Zn_INDEX:
6347 reg_type = REG_TYPE_ZN;
6348 goto vector_reg_index;
6349
6350 case AARCH64_OPND_Ed:
6351 case AARCH64_OPND_En:
6352 case AARCH64_OPND_Em:
6353 case AARCH64_OPND_Em16:
6354 case AARCH64_OPND_SM3_IMM2:
6355 reg_type = REG_TYPE_VN;
6356 vector_reg_index:
6357 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6358 if (val == PARSE_FAIL)
6359 {
6360 first_error (_(get_reg_expected_msg (reg_type)));
6361 goto failure;
6362 }
6363 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6364 goto failure;
6365
6366 info->reglane.regno = val;
6367 info->reglane.index = vectype.index;
6368 info->qualifier = vectype_to_qualifier (&vectype);
6369 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6370 goto failure;
6371 break;
6372
6373 case AARCH64_OPND_SVE_ZnxN:
6374 case AARCH64_OPND_SVE_ZtxN:
6375 reg_type = REG_TYPE_ZN;
6376 goto vector_reg_list;
6377
6378 case AARCH64_OPND_LVn:
6379 case AARCH64_OPND_LVt:
6380 case AARCH64_OPND_LVt_AL:
6381 case AARCH64_OPND_LEt:
6382 reg_type = REG_TYPE_VN;
6383 vector_reg_list:
6384 if (reg_type == REG_TYPE_ZN
6385 && get_opcode_dependent_value (opcode) == 1
6386 && *str != '{')
6387 {
6388 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6389 if (val == PARSE_FAIL)
6390 {
6391 first_error (_(get_reg_expected_msg (reg_type)));
6392 goto failure;
6393 }
6394 info->reglist.first_regno = val;
6395 info->reglist.num_regs = 1;
6396 }
6397 else
6398 {
6399 val = parse_vector_reg_list (&str, reg_type, &vectype);
6400 if (val == PARSE_FAIL)
6401 goto failure;
6402
6403 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6404 {
6405 set_fatal_syntax_error (_("invalid register list"));
6406 goto failure;
6407 }
6408
6409 if (vectype.width != 0 && *str != ',')
6410 {
6411 set_fatal_syntax_error
6412 (_("expected element type rather than vector type"));
6413 goto failure;
6414 }
6415
6416 info->reglist.first_regno = (val >> 2) & 0x1f;
6417 info->reglist.num_regs = (val & 0x3) + 1;
6418 }
6419 if (operands[i] == AARCH64_OPND_LEt)
6420 {
6421 if (!(vectype.defined & NTA_HASINDEX))
6422 goto failure;
6423 info->reglist.has_index = 1;
6424 info->reglist.index = vectype.index;
6425 }
6426 else
6427 {
6428 if (vectype.defined & NTA_HASINDEX)
6429 goto failure;
6430 if (!(vectype.defined & NTA_HASTYPE))
6431 {
6432 if (reg_type == REG_TYPE_ZN)
6433 set_fatal_syntax_error (_("missing type suffix"));
6434 goto failure;
6435 }
6436 }
6437 info->qualifier = vectype_to_qualifier (&vectype);
6438 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6439 goto failure;
6440 break;
6441
6442 case AARCH64_OPND_CRn:
6443 case AARCH64_OPND_CRm:
6444 {
6445 char prefix = *(str++);
6446 if (prefix != 'c' && prefix != 'C')
6447 goto failure;
6448
6449 po_imm_nc_or_fail ();
6450 if (val > 15)
6451 {
6452 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6453 goto failure;
6454 }
6455 info->qualifier = AARCH64_OPND_QLF_CR;
6456 info->imm.value = val;
6457 break;
6458 }
6459
6460 case AARCH64_OPND_SHLL_IMM:
6461 case AARCH64_OPND_IMM_VLSR:
6462 po_imm_or_fail (1, 64);
6463 info->imm.value = val;
6464 break;
6465
6466 case AARCH64_OPND_CCMP_IMM:
6467 case AARCH64_OPND_SIMM5:
6468 case AARCH64_OPND_FBITS:
6469 case AARCH64_OPND_TME_UIMM16:
6470 case AARCH64_OPND_UIMM4:
6471 case AARCH64_OPND_UIMM4_ADDG:
6472 case AARCH64_OPND_UIMM10:
6473 case AARCH64_OPND_UIMM3_OP1:
6474 case AARCH64_OPND_UIMM3_OP2:
6475 case AARCH64_OPND_IMM_VLSL:
6476 case AARCH64_OPND_IMM:
6477 case AARCH64_OPND_IMM_2:
6478 case AARCH64_OPND_WIDTH:
6479 case AARCH64_OPND_SVE_INV_LIMM:
6480 case AARCH64_OPND_SVE_LIMM:
6481 case AARCH64_OPND_SVE_LIMM_MOV:
6482 case AARCH64_OPND_SVE_SHLIMM_PRED:
6483 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6484 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6485 case AARCH64_OPND_SVE_SHRIMM_PRED:
6486 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6487 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6488 case AARCH64_OPND_SVE_SIMM5:
6489 case AARCH64_OPND_SVE_SIMM5B:
6490 case AARCH64_OPND_SVE_SIMM6:
6491 case AARCH64_OPND_SVE_SIMM8:
6492 case AARCH64_OPND_SVE_UIMM3:
6493 case AARCH64_OPND_SVE_UIMM7:
6494 case AARCH64_OPND_SVE_UIMM8:
6495 case AARCH64_OPND_SVE_UIMM8_53:
6496 case AARCH64_OPND_IMM_ROT1:
6497 case AARCH64_OPND_IMM_ROT2:
6498 case AARCH64_OPND_IMM_ROT3:
6499 case AARCH64_OPND_SVE_IMM_ROT1:
6500 case AARCH64_OPND_SVE_IMM_ROT2:
6501 case AARCH64_OPND_SVE_IMM_ROT3:
6502 po_imm_nc_or_fail ();
6503 info->imm.value = val;
6504 break;
6505
6506 case AARCH64_OPND_SVE_AIMM:
6507 case AARCH64_OPND_SVE_ASIMM:
6508 po_imm_nc_or_fail ();
6509 info->imm.value = val;
6510 skip_whitespace (str);
6511 if (skip_past_comma (&str))
6512 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6513 else
6514 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6515 break;
6516
6517 case AARCH64_OPND_SVE_PATTERN:
6518 po_enum_or_fail (aarch64_sve_pattern_array);
6519 info->imm.value = val;
6520 break;
6521
6522 case AARCH64_OPND_SVE_PATTERN_SCALED:
6523 po_enum_or_fail (aarch64_sve_pattern_array);
6524 info->imm.value = val;
6525 if (skip_past_comma (&str)
6526 && !parse_shift (&str, info, SHIFTED_MUL))
6527 goto failure;
6528 if (!info->shifter.operator_present)
6529 {
6530 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6531 info->shifter.kind = AARCH64_MOD_MUL;
6532 info->shifter.amount = 1;
6533 }
6534 break;
6535
6536 case AARCH64_OPND_SVE_PRFOP:
6537 po_enum_or_fail (aarch64_sve_prfop_array);
6538 info->imm.value = val;
6539 break;
6540
6541 case AARCH64_OPND_UIMM7:
6542 po_imm_or_fail (0, 127);
6543 info->imm.value = val;
6544 break;
6545
6546 case AARCH64_OPND_IDX:
6547 case AARCH64_OPND_MASK:
6548 case AARCH64_OPND_BIT_NUM:
6549 case AARCH64_OPND_IMMR:
6550 case AARCH64_OPND_IMMS:
6551 po_imm_or_fail (0, 63);
6552 info->imm.value = val;
6553 break;
6554
6555 case AARCH64_OPND_IMM0:
6556 po_imm_nc_or_fail ();
6557 if (val != 0)
6558 {
6559 set_fatal_syntax_error (_("immediate zero expected"));
6560 goto failure;
6561 }
6562 info->imm.value = 0;
6563 break;
6564
6565 case AARCH64_OPND_FPIMM0:
6566 {
6567 int qfloat;
6568 bool res1 = false, res2 = false;
6569 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6570 it is probably not worth the effort to support it. */
6571 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6572 imm_reg_type))
6573 && (error_p ()
6574 || !(res2 = parse_constant_immediate (&str, &val,
6575 imm_reg_type))))
6576 goto failure;
6577 if ((res1 && qfloat == 0) || (res2 && val == 0))
6578 {
6579 info->imm.value = 0;
6580 info->imm.is_fp = 1;
6581 break;
6582 }
6583 set_fatal_syntax_error (_("immediate zero expected"));
6584 goto failure;
6585 }
6586
6587 case AARCH64_OPND_IMM_MOV:
6588 {
6589 char *saved = str;
6590 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6591 reg_name_p (str, REG_TYPE_VN))
6592 goto failure;
6593 str = saved;
6594 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6595 GE_OPT_PREFIX, REJECT_ABSENT,
6596 NORMAL_RESOLUTION));
6597 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6598 later. fix_mov_imm_insn will try to determine a machine
6599 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6600 message if the immediate cannot be moved by a single
6601 instruction. */
6602 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6603 inst.base.operands[i].skip = 1;
6604 }
6605 break;
6606
6607 case AARCH64_OPND_SIMD_IMM:
6608 case AARCH64_OPND_SIMD_IMM_SFT:
6609 if (! parse_big_immediate (&str, &val, imm_reg_type))
6610 goto failure;
6611 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6612 /* addr_off_p */ 0,
6613 /* need_libopcodes_p */ 1,
6614 /* skip_p */ 1);
6615 /* Parse shift.
6616 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6617 shift, we don't check it here; we leave the checking to
6618 the libopcodes (operand_general_constraint_met_p). By
6619 doing this, we achieve better diagnostics. */
6620 if (skip_past_comma (&str)
6621 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6622 goto failure;
6623 if (!info->shifter.operator_present
6624 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6625 {
6626 /* Default to LSL if not present. Libopcodes prefers shifter
6627 kind to be explicit. */
6628 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6629 info->shifter.kind = AARCH64_MOD_LSL;
6630 }
6631 break;
6632
6633 case AARCH64_OPND_FPIMM:
6634 case AARCH64_OPND_SIMD_FPIMM:
6635 case AARCH64_OPND_SVE_FPIMM8:
6636 {
6637 int qfloat;
6638 bool dp_p;
6639
6640 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6641 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6642 || !aarch64_imm_float_p (qfloat))
6643 {
6644 if (!error_p ())
6645 set_fatal_syntax_error (_("invalid floating-point"
6646 " constant"));
6647 goto failure;
6648 }
6649 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6650 inst.base.operands[i].imm.is_fp = 1;
6651 }
6652 break;
6653
6654 case AARCH64_OPND_SVE_I1_HALF_ONE:
6655 case AARCH64_OPND_SVE_I1_HALF_TWO:
6656 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6657 {
6658 int qfloat;
6659 bool dp_p;
6660
6661 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6662 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6663 {
6664 if (!error_p ())
6665 set_fatal_syntax_error (_("invalid floating-point"
6666 " constant"));
6667 goto failure;
6668 }
6669 inst.base.operands[i].imm.value = qfloat;
6670 inst.base.operands[i].imm.is_fp = 1;
6671 }
6672 break;
6673
6674 case AARCH64_OPND_LIMM:
6675 po_misc_or_fail (parse_shifter_operand (&str, info,
6676 SHIFTED_LOGIC_IMM));
6677 if (info->shifter.operator_present)
6678 {
6679 set_fatal_syntax_error
6680 (_("shift not allowed for bitmask immediate"));
6681 goto failure;
6682 }
6683 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6684 /* addr_off_p */ 0,
6685 /* need_libopcodes_p */ 1,
6686 /* skip_p */ 1);
6687 break;
6688
6689 case AARCH64_OPND_AIMM:
6690 if (opcode->op == OP_ADD)
6691 /* ADD may have relocation types. */
6692 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6693 SHIFTED_ARITH_IMM));
6694 else
6695 po_misc_or_fail (parse_shifter_operand (&str, info,
6696 SHIFTED_ARITH_IMM));
6697 switch (inst.reloc.type)
6698 {
6699 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6700 info->shifter.amount = 12;
6701 break;
6702 case BFD_RELOC_UNUSED:
6703 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6704 if (info->shifter.kind != AARCH64_MOD_NONE)
6705 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6706 inst.reloc.pc_rel = 0;
6707 break;
6708 default:
6709 break;
6710 }
6711 info->imm.value = 0;
6712 if (!info->shifter.operator_present)
6713 {
6714 /* Default to LSL if not present. Libopcodes prefers shifter
6715 kind to be explicit. */
6716 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6717 info->shifter.kind = AARCH64_MOD_LSL;
6718 }
6719 break;
6720
6721 case AARCH64_OPND_HALF:
6722 {
6723 /* #<imm16> or relocation. */
6724 int internal_fixup_p;
6725 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6726 if (internal_fixup_p)
6727 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6728 skip_whitespace (str);
6729 if (skip_past_comma (&str))
6730 {
6731 /* {, LSL #<shift>} */
6732 if (! aarch64_gas_internal_fixup_p ())
6733 {
6734 set_fatal_syntax_error (_("can't mix relocation modifier "
6735 "with explicit shift"));
6736 goto failure;
6737 }
6738 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6739 }
6740 else
6741 inst.base.operands[i].shifter.amount = 0;
6742 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6743 inst.base.operands[i].imm.value = 0;
6744 if (! process_movw_reloc_info ())
6745 goto failure;
6746 }
6747 break;
6748
6749 case AARCH64_OPND_EXCEPTION:
6750 case AARCH64_OPND_UNDEFINED:
6751 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6752 imm_reg_type));
6753 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6754 /* addr_off_p */ 0,
6755 /* need_libopcodes_p */ 0,
6756 /* skip_p */ 1);
6757 break;
6758
6759 case AARCH64_OPND_NZCV:
6760 {
6761 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6762 if (nzcv != NULL)
6763 {
6764 str += 4;
6765 info->imm.value = nzcv->value;
6766 break;
6767 }
6768 po_imm_or_fail (0, 15);
6769 info->imm.value = val;
6770 }
6771 break;
6772
6773 case AARCH64_OPND_COND:
6774 case AARCH64_OPND_COND1:
6775 {
6776 char *start = str;
6777 do
6778 str++;
6779 while (ISALPHA (*str));
6780 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6781 if (info->cond == NULL)
6782 {
6783 set_syntax_error (_("invalid condition"));
6784 goto failure;
6785 }
6786 else if (operands[i] == AARCH64_OPND_COND1
6787 && (info->cond->value & 0xe) == 0xe)
6788 {
6789 /* Do not allow AL or NV. */
6790 set_default_error ();
6791 goto failure;
6792 }
6793 }
6794 break;
6795
6796 case AARCH64_OPND_ADDR_ADRP:
6797 po_misc_or_fail (parse_adrp (&str));
6798 /* Clear the value as operand needs to be relocated. */
6799 info->imm.value = 0;
6800 break;
6801
6802 case AARCH64_OPND_ADDR_PCREL14:
6803 case AARCH64_OPND_ADDR_PCREL19:
6804 case AARCH64_OPND_ADDR_PCREL21:
6805 case AARCH64_OPND_ADDR_PCREL26:
6806 po_misc_or_fail (parse_address (&str, info));
6807 if (!info->addr.pcrel)
6808 {
6809 set_syntax_error (_("invalid pc-relative address"));
6810 goto failure;
6811 }
6812 if (inst.gen_lit_pool
6813 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6814 {
6815 /* Only permit "=value" in the literal load instructions.
6816 The literal will be generated by programmer_friendly_fixup. */
6817 set_syntax_error (_("invalid use of \"=immediate\""));
6818 goto failure;
6819 }
6820 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6821 {
6822 set_syntax_error (_("unrecognized relocation suffix"));
6823 goto failure;
6824 }
6825 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6826 {
6827 info->imm.value = inst.reloc.exp.X_add_number;
6828 inst.reloc.type = BFD_RELOC_UNUSED;
6829 }
6830 else
6831 {
6832 info->imm.value = 0;
6833 if (inst.reloc.type == BFD_RELOC_UNUSED)
6834 switch (opcode->iclass)
6835 {
6836 case compbranch:
6837 case condbranch:
6838 /* e.g. CBZ or B.COND */
6839 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6840 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6841 break;
6842 case testbranch:
6843 /* e.g. TBZ */
6844 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6845 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6846 break;
6847 case branch_imm:
6848 /* e.g. B or BL */
6849 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6850 inst.reloc.type =
6851 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6852 : BFD_RELOC_AARCH64_JUMP26;
6853 break;
6854 case loadlit:
6855 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6856 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6857 break;
6858 case pcreladdr:
6859 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6860 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6861 break;
6862 default:
6863 gas_assert (0);
6864 abort ();
6865 }
6866 inst.reloc.pc_rel = 1;
6867 }
6868 break;
6869
6870 case AARCH64_OPND_ADDR_SIMPLE:
6871 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6872 {
6873 /* [<Xn|SP>{, #<simm>}] */
6874 char *start = str;
6875 /* First use the normal address-parsing routines, to get
6876 the usual syntax errors. */
6877 po_misc_or_fail (parse_address (&str, info));
6878 if (info->addr.pcrel || info->addr.offset.is_reg
6879 || !info->addr.preind || info->addr.postind
6880 || info->addr.writeback)
6881 {
6882 set_syntax_error (_("invalid addressing mode"));
6883 goto failure;
6884 }
6885
6886 /* Then retry, matching the specific syntax of these addresses. */
6887 str = start;
6888 po_char_or_fail ('[');
6889 po_reg_or_fail (REG_TYPE_R64_SP);
6890 /* Accept optional ", #0". */
6891 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6892 && skip_past_char (&str, ','))
6893 {
6894 skip_past_char (&str, '#');
6895 if (! skip_past_char (&str, '0'))
6896 {
6897 set_fatal_syntax_error
6898 (_("the optional immediate offset can only be 0"));
6899 goto failure;
6900 }
6901 }
6902 po_char_or_fail (']');
6903 break;
6904 }
6905
6906 case AARCH64_OPND_ADDR_REGOFF:
6907 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6908 po_misc_or_fail (parse_address (&str, info));
6909 regoff_addr:
6910 if (info->addr.pcrel || !info->addr.offset.is_reg
6911 || !info->addr.preind || info->addr.postind
6912 || info->addr.writeback)
6913 {
6914 set_syntax_error (_("invalid addressing mode"));
6915 goto failure;
6916 }
6917 if (!info->shifter.operator_present)
6918 {
6919 /* Default to LSL if not present. Libopcodes prefers shifter
6920 kind to be explicit. */
6921 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6922 info->shifter.kind = AARCH64_MOD_LSL;
6923 }
6924 /* Qualifier to be deduced by libopcodes. */
6925 break;
6926
6927 case AARCH64_OPND_ADDR_SIMM7:
6928 po_misc_or_fail (parse_address (&str, info));
6929 if (info->addr.pcrel || info->addr.offset.is_reg
6930 || (!info->addr.preind && !info->addr.postind))
6931 {
6932 set_syntax_error (_("invalid addressing mode"));
6933 goto failure;
6934 }
6935 if (inst.reloc.type != BFD_RELOC_UNUSED)
6936 {
6937 set_syntax_error (_("relocation not allowed"));
6938 goto failure;
6939 }
6940 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6941 /* addr_off_p */ 1,
6942 /* need_libopcodes_p */ 1,
6943 /* skip_p */ 0);
6944 break;
6945
6946 case AARCH64_OPND_ADDR_SIMM9:
6947 case AARCH64_OPND_ADDR_SIMM9_2:
6948 case AARCH64_OPND_ADDR_SIMM11:
6949 case AARCH64_OPND_ADDR_SIMM13:
6950 po_misc_or_fail (parse_address (&str, info));
6951 if (info->addr.pcrel || info->addr.offset.is_reg
6952 || (!info->addr.preind && !info->addr.postind)
6953 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6954 && info->addr.writeback))
6955 {
6956 set_syntax_error (_("invalid addressing mode"));
6957 goto failure;
6958 }
6959 if (inst.reloc.type != BFD_RELOC_UNUSED)
6960 {
6961 set_syntax_error (_("relocation not allowed"));
6962 goto failure;
6963 }
6964 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6965 /* addr_off_p */ 1,
6966 /* need_libopcodes_p */ 1,
6967 /* skip_p */ 0);
6968 break;
6969
6970 case AARCH64_OPND_ADDR_SIMM10:
6971 case AARCH64_OPND_ADDR_OFFSET:
6972 po_misc_or_fail (parse_address (&str, info));
6973 if (info->addr.pcrel || info->addr.offset.is_reg
6974 || !info->addr.preind || info->addr.postind)
6975 {
6976 set_syntax_error (_("invalid addressing mode"));
6977 goto failure;
6978 }
6979 if (inst.reloc.type != BFD_RELOC_UNUSED)
6980 {
6981 set_syntax_error (_("relocation not allowed"));
6982 goto failure;
6983 }
6984 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6985 /* addr_off_p */ 1,
6986 /* need_libopcodes_p */ 1,
6987 /* skip_p */ 0);
6988 break;
6989
6990 case AARCH64_OPND_ADDR_UIMM12:
6991 po_misc_or_fail (parse_address (&str, info));
6992 if (info->addr.pcrel || info->addr.offset.is_reg
6993 || !info->addr.preind || info->addr.writeback)
6994 {
6995 set_syntax_error (_("invalid addressing mode"));
6996 goto failure;
6997 }
6998 if (inst.reloc.type == BFD_RELOC_UNUSED)
6999 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7000 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7001 || (inst.reloc.type
7002 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7003 || (inst.reloc.type
7004 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7005 || (inst.reloc.type
7006 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7007 || (inst.reloc.type
7008 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7009 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7010 /* Leave qualifier to be determined by libopcodes. */
7011 break;
7012
7013 case AARCH64_OPND_SIMD_ADDR_POST:
7014 /* [<Xn|SP>], <Xm|#<amount>> */
7015 po_misc_or_fail (parse_address (&str, info));
7016 if (!info->addr.postind || !info->addr.writeback)
7017 {
7018 set_syntax_error (_("invalid addressing mode"));
7019 goto failure;
7020 }
7021 if (!info->addr.offset.is_reg)
7022 {
7023 if (inst.reloc.exp.X_op == O_constant)
7024 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7025 else
7026 {
7027 set_fatal_syntax_error
7028 (_("writeback value must be an immediate constant"));
7029 goto failure;
7030 }
7031 }
7032 /* No qualifier. */
7033 break;
7034
7035 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7036 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7037 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7038 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7039 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7040 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7041 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7042 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7043 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7044 case AARCH64_OPND_SVE_ADDR_RI_U6:
7045 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7046 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7047 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7048 /* [X<n>{, #imm, MUL VL}]
7049 [X<n>{, #imm}]
7050 but recognizing SVE registers. */
7051 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7052 &offset_qualifier));
7053 if (base_qualifier != AARCH64_OPND_QLF_X)
7054 {
7055 set_syntax_error (_("invalid addressing mode"));
7056 goto failure;
7057 }
7058 sve_regimm:
7059 if (info->addr.pcrel || info->addr.offset.is_reg
7060 || !info->addr.preind || info->addr.writeback)
7061 {
7062 set_syntax_error (_("invalid addressing mode"));
7063 goto failure;
7064 }
7065 if (inst.reloc.type != BFD_RELOC_UNUSED
7066 || inst.reloc.exp.X_op != O_constant)
7067 {
7068 /* Make sure this has priority over
7069 "invalid addressing mode". */
7070 set_fatal_syntax_error (_("constant offset required"));
7071 goto failure;
7072 }
7073 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7074 break;
7075
7076 case AARCH64_OPND_SVE_ADDR_R:
7077 /* [<Xn|SP>{, <R><m>}]
7078 but recognizing SVE registers. */
7079 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7080 &offset_qualifier));
7081 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7082 {
7083 offset_qualifier = AARCH64_OPND_QLF_X;
7084 info->addr.offset.is_reg = 1;
7085 info->addr.offset.regno = 31;
7086 }
7087 else if (base_qualifier != AARCH64_OPND_QLF_X
7088 || offset_qualifier != AARCH64_OPND_QLF_X)
7089 {
7090 set_syntax_error (_("invalid addressing mode"));
7091 goto failure;
7092 }
7093 goto regoff_addr;
7094
7095 case AARCH64_OPND_SVE_ADDR_RR:
7096 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7097 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7098 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7099 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7100 case AARCH64_OPND_SVE_ADDR_RX:
7101 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7102 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7103 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7104 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7105 but recognizing SVE registers. */
7106 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7107 &offset_qualifier));
7108 if (base_qualifier != AARCH64_OPND_QLF_X
7109 || offset_qualifier != AARCH64_OPND_QLF_X)
7110 {
7111 set_syntax_error (_("invalid addressing mode"));
7112 goto failure;
7113 }
7114 goto regoff_addr;
7115
7116 case AARCH64_OPND_SVE_ADDR_RZ:
7117 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7118 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7119 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7120 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7121 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7122 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7123 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7124 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7125 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7126 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7127 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7128 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7129 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7130 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7131 &offset_qualifier));
7132 if (base_qualifier != AARCH64_OPND_QLF_X
7133 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7134 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7135 {
7136 set_syntax_error (_("invalid addressing mode"));
7137 goto failure;
7138 }
7139 info->qualifier = offset_qualifier;
7140 goto regoff_addr;
7141
7142 case AARCH64_OPND_SVE_ADDR_ZX:
7143 /* [Zn.<T>{, <Xm>}]. */
7144 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7145 &offset_qualifier));
7146 /* Things to check:
7147 base_qualifier either S_S or S_D
7148 offset_qualifier must be X
7149 */
7150 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7151 && base_qualifier != AARCH64_OPND_QLF_S_D)
7152 || offset_qualifier != AARCH64_OPND_QLF_X)
7153 {
7154 set_syntax_error (_("invalid addressing mode"));
7155 goto failure;
7156 }
7157 info->qualifier = base_qualifier;
7158 if (!info->addr.offset.is_reg || info->addr.pcrel
7159 || !info->addr.preind || info->addr.writeback
7160 || info->shifter.operator_present != 0)
7161 {
7162 set_syntax_error (_("invalid addressing mode"));
7163 goto failure;
7164 }
7165 info->shifter.kind = AARCH64_MOD_LSL;
7166 break;
7167
7168
7169 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7170 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7171 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7172 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7173 /* [Z<n>.<T>{, #imm}] */
7174 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7175 &offset_qualifier));
7176 if (base_qualifier != AARCH64_OPND_QLF_S_S
7177 && base_qualifier != AARCH64_OPND_QLF_S_D)
7178 {
7179 set_syntax_error (_("invalid addressing mode"));
7180 goto failure;
7181 }
7182 info->qualifier = base_qualifier;
7183 goto sve_regimm;
7184
7185 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7186 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7187 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7188 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7189 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7190
7191 We don't reject:
7192
7193 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7194
7195 here since we get better error messages by leaving it to
7196 the qualifier checking routines. */
7197 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7198 &offset_qualifier));
7199 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7200 && base_qualifier != AARCH64_OPND_QLF_S_D)
7201 || offset_qualifier != base_qualifier)
7202 {
7203 set_syntax_error (_("invalid addressing mode"));
7204 goto failure;
7205 }
7206 info->qualifier = base_qualifier;
7207 goto regoff_addr;
7208
7209 case AARCH64_OPND_SYSREG:
7210 {
7211 uint32_t sysreg_flags;
7212 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7213 &sysreg_flags)) == PARSE_FAIL)
7214 {
7215 set_syntax_error (_("unknown or missing system register name"));
7216 goto failure;
7217 }
7218 inst.base.operands[i].sysreg.value = val;
7219 inst.base.operands[i].sysreg.flags = sysreg_flags;
7220 break;
7221 }
7222
7223 case AARCH64_OPND_PSTATEFIELD:
7224 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
7225 == PARSE_FAIL)
7226 {
7227 set_syntax_error (_("unknown or missing PSTATE field name"));
7228 goto failure;
7229 }
7230 inst.base.operands[i].pstatefield = val;
7231 break;
7232
7233 case AARCH64_OPND_SYSREG_IC:
7234 inst.base.operands[i].sysins_op =
7235 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7236 goto sys_reg_ins;
7237
7238 case AARCH64_OPND_SYSREG_DC:
7239 inst.base.operands[i].sysins_op =
7240 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7241 goto sys_reg_ins;
7242
7243 case AARCH64_OPND_SYSREG_AT:
7244 inst.base.operands[i].sysins_op =
7245 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7246 goto sys_reg_ins;
7247
7248 case AARCH64_OPND_SYSREG_SR:
7249 inst.base.operands[i].sysins_op =
7250 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7251 goto sys_reg_ins;
7252
7253 case AARCH64_OPND_SYSREG_TLBI:
7254 inst.base.operands[i].sysins_op =
7255 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7256 sys_reg_ins:
7257 if (inst.base.operands[i].sysins_op == NULL)
7258 {
7259 set_fatal_syntax_error ( _("unknown or missing operation name"));
7260 goto failure;
7261 }
7262 break;
7263
7264 case AARCH64_OPND_BARRIER:
7265 case AARCH64_OPND_BARRIER_ISB:
7266 val = parse_barrier (&str);
7267 if (val != PARSE_FAIL
7268 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7269 {
7270 /* ISB only accepts options name 'sy'. */
7271 set_syntax_error
7272 (_("the specified option is not accepted in ISB"));
7273 /* Turn off backtrack as this optional operand is present. */
7274 backtrack_pos = 0;
7275 goto failure;
7276 }
7277 if (val != PARSE_FAIL
7278 && operands[i] == AARCH64_OPND_BARRIER)
7279 {
7280 /* Regular barriers accept options CRm (C0-C15).
7281 DSB nXS barrier variant accepts values > 15. */
7282 if (val < 0 || val > 15)
7283 {
7284 set_syntax_error (_("the specified option is not accepted in DSB"));
7285 goto failure;
7286 }
7287 }
7288 /* This is an extension to accept a 0..15 immediate. */
7289 if (val == PARSE_FAIL)
7290 po_imm_or_fail (0, 15);
7291 info->barrier = aarch64_barrier_options + val;
7292 break;
7293
7294 case AARCH64_OPND_BARRIER_DSB_NXS:
7295 val = parse_barrier (&str);
7296 if (val != PARSE_FAIL)
7297 {
7298 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7299 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7300 {
7301 set_syntax_error (_("the specified option is not accepted in DSB"));
7302 /* Turn off backtrack as this optional operand is present. */
7303 backtrack_pos = 0;
7304 goto failure;
7305 }
7306 }
7307 else
7308 {
7309 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7310 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7311 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7312 goto failure;
7313 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7314 {
7315 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7316 goto failure;
7317 }
7318 }
7319 /* Option index is encoded as 2-bit value in val<3:2>. */
7320 val = (val >> 2) - 4;
7321 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7322 break;
7323
7324 case AARCH64_OPND_PRFOP:
7325 val = parse_pldop (&str);
7326 /* This is an extension to accept a 0..31 immediate. */
7327 if (val == PARSE_FAIL)
7328 po_imm_or_fail (0, 31);
7329 inst.base.operands[i].prfop = aarch64_prfops + val;
7330 break;
7331
7332 case AARCH64_OPND_BARRIER_PSB:
7333 val = parse_barrier_psb (&str, &(info->hint_option));
7334 if (val == PARSE_FAIL)
7335 goto failure;
7336 break;
7337
7338 case AARCH64_OPND_BTI_TARGET:
7339 val = parse_bti_operand (&str, &(info->hint_option));
7340 if (val == PARSE_FAIL)
7341 goto failure;
7342 break;
7343
7344 case AARCH64_OPND_SME_ZAda_2b:
7345 case AARCH64_OPND_SME_ZAda_3b:
7346 val = parse_sme_zada_operand (&str, &qualifier);
7347 if (val == PARSE_FAIL)
7348 goto failure;
7349 info->reg.regno = val;
7350 info->qualifier = qualifier;
7351 break;
7352
7353 case AARCH64_OPND_SME_ZA_HV_idx_src:
7354 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7355 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7356 {
7357 enum sme_hv_slice slice_indicator;
7358 int vector_select_register;
7359 int imm;
7360
7361 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7362 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7363 &slice_indicator,
7364 &vector_select_register,
7365 &imm,
7366 &qualifier);
7367 else
7368 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7369 &vector_select_register,
7370 &imm,
7371 &qualifier);
7372 if (val == PARSE_FAIL)
7373 goto failure;
7374 info->za_tile_vector.regno = val;
7375 info->za_tile_vector.index.regno = vector_select_register;
7376 info->za_tile_vector.index.imm = imm;
7377 info->za_tile_vector.v = slice_indicator;
7378 info->qualifier = qualifier;
7379 break;
7380 }
7381
7382 case AARCH64_OPND_SME_list_of_64bit_tiles:
7383 val = parse_sme_list_of_64bit_tiles (&str);
7384 if (val == PARSE_FAIL)
7385 goto failure;
7386 info->imm.value = val;
7387 break;
7388
7389 case AARCH64_OPND_SME_ZA_array:
7390 {
7391 int imm;
7392 val = parse_sme_za_array (&str, &imm);
7393 if (val == PARSE_FAIL)
7394 goto failure;
7395 info->za_tile_vector.index.regno = val;
7396 info->za_tile_vector.index.imm = imm;
7397 break;
7398 }
7399
7400 default:
7401 as_fatal (_("unhandled operand code %d"), operands[i]);
7402 }
7403
7404 /* If we get here, this operand was successfully parsed. */
7405 inst.base.operands[i].present = 1;
7406 continue;
7407
7408 failure:
7409 /* The parse routine should already have set the error, but in case
7410 not, set a default one here. */
7411 if (! error_p ())
7412 set_default_error ();
7413
7414 if (! backtrack_pos)
7415 goto parse_operands_return;
7416
7417 {
7418 /* We reach here because this operand is marked as optional, and
7419 either no operand was supplied or the operand was supplied but it
7420 was syntactically incorrect. In the latter case we report an
7421 error. In the former case we perform a few more checks before
7422 dropping through to the code to insert the default operand. */
7423
7424 char *tmp = backtrack_pos;
7425 char endchar = END_OF_INSN;
7426
7427 if (i != (aarch64_num_of_operands (opcode) - 1))
7428 endchar = ',';
7429 skip_past_char (&tmp, ',');
7430
7431 if (*tmp != endchar)
7432 /* The user has supplied an operand in the wrong format. */
7433 goto parse_operands_return;
7434
7435 /* Make sure there is not a comma before the optional operand.
7436 For example the fifth operand of 'sys' is optional:
7437
7438 sys #0,c0,c0,#0, <--- wrong
7439 sys #0,c0,c0,#0 <--- correct. */
7440 if (comma_skipped_p && i && endchar == END_OF_INSN)
7441 {
7442 set_fatal_syntax_error
7443 (_("unexpected comma before the omitted optional operand"));
7444 goto parse_operands_return;
7445 }
7446 }
7447
7448 /* Reaching here means we are dealing with an optional operand that is
7449 omitted from the assembly line. */
7450 gas_assert (optional_operand_p (opcode, i));
7451 info->present = 0;
7452 process_omitted_operand (operands[i], opcode, i, info);
7453
7454 /* Try again, skipping the optional operand at backtrack_pos. */
7455 str = backtrack_pos;
7456 backtrack_pos = 0;
7457
7458 /* Clear any error record after the omitted optional operand has been
7459 successfully handled. */
7460 clear_error ();
7461 }
7462
7463 /* Check if we have parsed all the operands. */
7464 if (*str != '\0' && ! error_p ())
7465 {
7466 /* Set I to the index of the last present operand; this is
7467 for the purpose of diagnostics. */
7468 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7469 ;
7470 set_fatal_syntax_error
7471 (_("unexpected characters following instruction"));
7472 }
7473
7474 parse_operands_return:
7475
7476 if (error_p ())
7477 {
7478 DEBUG_TRACE ("parsing FAIL: %s - %s",
7479 operand_mismatch_kind_names[get_error_kind ()],
7480 get_error_message ());
7481 /* Record the operand error properly; this is useful when there
7482 are multiple instruction templates for a mnemonic name, so that
7483 later on, we can select the error that most closely describes
7484 the problem. */
7485 record_operand_error (opcode, i, get_error_kind (),
7486 get_error_message ());
7487 return false;
7488 }
7489 else
7490 {
7491 DEBUG_TRACE ("parsing SUCCESS");
7492 return true;
7493 }
7494 }
7495
7496 /* It does some fix-up to provide some programmer friendly feature while
7497 keeping the libopcodes happy, i.e. libopcodes only accepts
7498 the preferred architectural syntax.
7499 Return FALSE if there is any failure; otherwise return TRUE. */
7500
7501 static bool
7502 programmer_friendly_fixup (aarch64_instruction *instr)
7503 {
7504 aarch64_inst *base = &instr->base;
7505 const aarch64_opcode *opcode = base->opcode;
7506 enum aarch64_op op = opcode->op;
7507 aarch64_opnd_info *operands = base->operands;
7508
7509 DEBUG_TRACE ("enter");
7510
7511 switch (opcode->iclass)
7512 {
7513 case testbranch:
7514 /* TBNZ Xn|Wn, #uimm6, label
7515 Test and Branch Not Zero: conditionally jumps to label if bit number
7516 uimm6 in register Xn is not zero. The bit number implies the width of
7517 the register, which may be written and should be disassembled as Wn if
7518 uimm is less than 32. */
7519 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7520 {
7521 if (operands[1].imm.value >= 32)
7522 {
7523 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7524 0, 31);
7525 return false;
7526 }
7527 operands[0].qualifier = AARCH64_OPND_QLF_X;
7528 }
7529 break;
7530 case loadlit:
7531 /* LDR Wt, label | =value
7532 As a convenience assemblers will typically permit the notation
7533 "=value" in conjunction with the pc-relative literal load instructions
7534 to automatically place an immediate value or symbolic address in a
7535 nearby literal pool and generate a hidden label which references it.
7536 ISREG has been set to 0 in the case of =value. */
7537 if (instr->gen_lit_pool
7538 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7539 {
7540 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7541 if (op == OP_LDRSW_LIT)
7542 size = 4;
7543 if (instr->reloc.exp.X_op != O_constant
7544 && instr->reloc.exp.X_op != O_big
7545 && instr->reloc.exp.X_op != O_symbol)
7546 {
7547 record_operand_error (opcode, 1,
7548 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7549 _("constant expression expected"));
7550 return false;
7551 }
7552 if (! add_to_lit_pool (&instr->reloc.exp, size))
7553 {
7554 record_operand_error (opcode, 1,
7555 AARCH64_OPDE_OTHER_ERROR,
7556 _("literal pool insertion failed"));
7557 return false;
7558 }
7559 }
7560 break;
7561 case log_shift:
7562 case bitfield:
7563 /* UXT[BHW] Wd, Wn
7564 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7565 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7566 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7567 A programmer-friendly assembler should accept a destination Xd in
7568 place of Wd, however that is not the preferred form for disassembly.
7569 */
7570 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7571 && operands[1].qualifier == AARCH64_OPND_QLF_W
7572 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7573 operands[0].qualifier = AARCH64_OPND_QLF_W;
7574 break;
7575
7576 case addsub_ext:
7577 {
7578 /* In the 64-bit form, the final register operand is written as Wm
7579 for all but the (possibly omitted) UXTX/LSL and SXTX
7580 operators.
7581 As a programmer-friendly assembler, we accept e.g.
7582 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7583 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7584 int idx = aarch64_operand_index (opcode->operands,
7585 AARCH64_OPND_Rm_EXT);
7586 gas_assert (idx == 1 || idx == 2);
7587 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7588 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7589 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7590 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7591 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7592 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7593 }
7594 break;
7595
7596 default:
7597 break;
7598 }
7599
7600 DEBUG_TRACE ("exit with SUCCESS");
7601 return true;
7602 }
7603
7604 /* Check for loads and stores that will cause unpredictable behavior. */
7605
7606 static void
7607 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7608 {
7609 aarch64_inst *base = &instr->base;
7610 const aarch64_opcode *opcode = base->opcode;
7611 const aarch64_opnd_info *opnds = base->operands;
7612 switch (opcode->iclass)
7613 {
7614 case ldst_pos:
7615 case ldst_imm9:
7616 case ldst_imm10:
7617 case ldst_unscaled:
7618 case ldst_unpriv:
7619 /* Loading/storing the base register is unpredictable if writeback. */
7620 if ((aarch64_get_operand_class (opnds[0].type)
7621 == AARCH64_OPND_CLASS_INT_REG)
7622 && opnds[0].reg.regno == opnds[1].addr.base_regno
7623 && opnds[1].addr.base_regno != REG_SP
7624 /* Exempt STG/STZG/ST2G/STZ2G. */
7625 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7626 && opnds[1].addr.writeback)
7627 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7628 break;
7629
7630 case ldstpair_off:
7631 case ldstnapair_offs:
7632 case ldstpair_indexed:
7633 /* Loading/storing the base register is unpredictable if writeback. */
7634 if ((aarch64_get_operand_class (opnds[0].type)
7635 == AARCH64_OPND_CLASS_INT_REG)
7636 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7637 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7638 && opnds[2].addr.base_regno != REG_SP
7639 /* Exempt STGP. */
7640 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7641 && opnds[2].addr.writeback)
7642 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7643 /* Load operations must load different registers. */
7644 if ((opcode->opcode & (1 << 22))
7645 && opnds[0].reg.regno == opnds[1].reg.regno)
7646 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7647 break;
7648
7649 case ldstexcl:
7650 if ((aarch64_get_operand_class (opnds[0].type)
7651 == AARCH64_OPND_CLASS_INT_REG)
7652 && (aarch64_get_operand_class (opnds[1].type)
7653 == AARCH64_OPND_CLASS_INT_REG))
7654 {
7655 if ((opcode->opcode & (1 << 22)))
7656 {
7657 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7658 if ((opcode->opcode & (1 << 21))
7659 && opnds[0].reg.regno == opnds[1].reg.regno)
7660 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7661 }
7662 else
7663 {
7664 /* Store-Exclusive is unpredictable if Rt == Rs. */
7665 if (opnds[0].reg.regno == opnds[1].reg.regno)
7666 as_warn
7667 (_("unpredictable: identical transfer and status registers"
7668 " --`%s'"),str);
7669
7670 if (opnds[0].reg.regno == opnds[2].reg.regno)
7671 {
7672 if (!(opcode->opcode & (1 << 21)))
7673 /* Store-Exclusive is unpredictable if Rn == Rs. */
7674 as_warn
7675 (_("unpredictable: identical base and status registers"
7676 " --`%s'"),str);
7677 else
7678 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7679 as_warn
7680 (_("unpredictable: "
7681 "identical transfer and status registers"
7682 " --`%s'"),str);
7683 }
7684
7685 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7686 if ((opcode->opcode & (1 << 21))
7687 && opnds[0].reg.regno == opnds[3].reg.regno
7688 && opnds[3].reg.regno != REG_SP)
7689 as_warn (_("unpredictable: identical base and status registers"
7690 " --`%s'"),str);
7691 }
7692 }
7693 break;
7694
7695 default:
7696 break;
7697 }
7698 }
7699
7700 static void
7701 force_automatic_sequence_close (void)
7702 {
7703 if (now_instr_sequence.instr)
7704 {
7705 as_warn (_("previous `%s' sequence has not been closed"),
7706 now_instr_sequence.instr->opcode->name);
7707 init_insn_sequence (NULL, &now_instr_sequence);
7708 }
7709 }
7710
7711 /* A wrapper function to interface with libopcodes on encoding and
7712 record the error message if there is any.
7713
7714 Return TRUE on success; otherwise return FALSE. */
7715
7716 static bool
7717 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7718 aarch64_insn *code)
7719 {
7720 aarch64_operand_error error_info;
7721 memset (&error_info, '\0', sizeof (error_info));
7722 error_info.kind = AARCH64_OPDE_NIL;
7723 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7724 && !error_info.non_fatal)
7725 return true;
7726
7727 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7728 record_operand_error_info (opcode, &error_info);
7729 return error_info.non_fatal;
7730 }
7731
7732 #ifdef DEBUG_AARCH64
7733 static inline void
7734 dump_opcode_operands (const aarch64_opcode *opcode)
7735 {
7736 int i = 0;
7737 while (opcode->operands[i] != AARCH64_OPND_NIL)
7738 {
7739 aarch64_verbose ("\t\t opnd%d: %s", i,
7740 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7741 ? aarch64_get_operand_name (opcode->operands[i])
7742 : aarch64_get_operand_desc (opcode->operands[i]));
7743 ++i;
7744 }
7745 }
7746 #endif /* DEBUG_AARCH64 */
7747
7748 /* This is the guts of the machine-dependent assembler. STR points to a
7749 machine dependent instruction. This function is supposed to emit
7750 the frags/bytes it assembles to. */
7751
7752 void
7753 md_assemble (char *str)
7754 {
7755 char *p = str;
7756 templates *template;
7757 const aarch64_opcode *opcode;
7758 aarch64_inst *inst_base;
7759 unsigned saved_cond;
7760
7761 /* Align the previous label if needed. */
7762 if (last_label_seen != NULL)
7763 {
7764 symbol_set_frag (last_label_seen, frag_now);
7765 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7766 S_SET_SEGMENT (last_label_seen, now_seg);
7767 }
7768
7769 /* Update the current insn_sequence from the segment. */
7770 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7771
7772 inst.reloc.type = BFD_RELOC_UNUSED;
7773
7774 DEBUG_TRACE ("\n\n");
7775 DEBUG_TRACE ("==============================");
7776 DEBUG_TRACE ("Enter md_assemble with %s", str);
7777
7778 template = opcode_lookup (&p);
7779 if (!template)
7780 {
7781 /* It wasn't an instruction, but it might be a register alias of
7782 the form alias .req reg directive. */
7783 if (!create_register_alias (str, p))
7784 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7785 str);
7786 return;
7787 }
7788
7789 skip_whitespace (p);
7790 if (*p == ',')
7791 {
7792 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7793 get_mnemonic_name (str), str);
7794 return;
7795 }
7796
7797 init_operand_error_report ();
7798
7799 /* Sections are assumed to start aligned. In executable section, there is no
7800 MAP_DATA symbol pending. So we only align the address during
7801 MAP_DATA --> MAP_INSN transition.
7802 For other sections, this is not guaranteed. */
7803 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7804 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7805 frag_align_code (2, 0);
7806
7807 saved_cond = inst.cond;
7808 reset_aarch64_instruction (&inst);
7809 inst.cond = saved_cond;
7810
7811 /* Iterate through all opcode entries with the same mnemonic name. */
7812 do
7813 {
7814 opcode = template->opcode;
7815
7816 DEBUG_TRACE ("opcode %s found", opcode->name);
7817 #ifdef DEBUG_AARCH64
7818 if (debug_dump)
7819 dump_opcode_operands (opcode);
7820 #endif /* DEBUG_AARCH64 */
7821
7822 mapping_state (MAP_INSN);
7823
7824 inst_base = &inst.base;
7825 inst_base->opcode = opcode;
7826
7827 /* Truly conditionally executed instructions, e.g. b.cond. */
7828 if (opcode->flags & F_COND)
7829 {
7830 gas_assert (inst.cond != COND_ALWAYS);
7831 inst_base->cond = get_cond_from_value (inst.cond);
7832 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7833 }
7834 else if (inst.cond != COND_ALWAYS)
7835 {
7836 /* It shouldn't arrive here, where the assembly looks like a
7837 conditional instruction but the found opcode is unconditional. */
7838 gas_assert (0);
7839 continue;
7840 }
7841
7842 if (parse_operands (p, opcode)
7843 && programmer_friendly_fixup (&inst)
7844 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7845 {
7846 /* Check that this instruction is supported for this CPU. */
7847 if (!opcode->avariant
7848 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7849 {
7850 as_bad (_("selected processor does not support `%s'"), str);
7851 return;
7852 }
7853
7854 warn_unpredictable_ldst (&inst, str);
7855
7856 if (inst.reloc.type == BFD_RELOC_UNUSED
7857 || !inst.reloc.need_libopcodes_p)
7858 output_inst (NULL);
7859 else
7860 {
7861 /* If there is relocation generated for the instruction,
7862 store the instruction information for the future fix-up. */
7863 struct aarch64_inst *copy;
7864 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7865 copy = XNEW (struct aarch64_inst);
7866 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7867 output_inst (copy);
7868 }
7869
7870 /* Issue non-fatal messages if any. */
7871 output_operand_error_report (str, true);
7872 return;
7873 }
7874
7875 template = template->next;
7876 if (template != NULL)
7877 {
7878 reset_aarch64_instruction (&inst);
7879 inst.cond = saved_cond;
7880 }
7881 }
7882 while (template != NULL);
7883
7884 /* Issue the error messages if any. */
7885 output_operand_error_report (str, false);
7886 }
7887
7888 /* Various frobbings of labels and their addresses. */
7889
7890 void
7891 aarch64_start_line_hook (void)
7892 {
7893 last_label_seen = NULL;
7894 }
7895
7896 void
7897 aarch64_frob_label (symbolS * sym)
7898 {
7899 last_label_seen = sym;
7900
7901 dwarf2_emit_label (sym);
7902 }
7903
7904 void
7905 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7906 {
7907 /* Check to see if we have a block to close. */
7908 force_automatic_sequence_close ();
7909 }
7910
7911 int
7912 aarch64_data_in_code (void)
7913 {
7914 if (startswith (input_line_pointer + 1, "data:"))
7915 {
7916 *input_line_pointer = '/';
7917 input_line_pointer += 5;
7918 *input_line_pointer = 0;
7919 return 1;
7920 }
7921
7922 return 0;
7923 }
7924
7925 char *
7926 aarch64_canonicalize_symbol_name (char *name)
7927 {
7928 int len;
7929
7930 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7931 *(name + len - 5) = 0;
7932
7933 return name;
7934 }
7935 \f
7936 /* Table of all register names defined by default. The user can
7937 define additional names with .req. Note that all register names
7938 should appear in both upper and lowercase variants. Some registers
7939 also have mixed-case names. */
7940
7941 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
7942 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
7943 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7944 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
7945 #define REGSET16(p,t) \
7946 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7947 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7948 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7949 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7950 #define REGSET16S(p,s,t) \
7951 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
7952 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
7953 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
7954 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
7955 #define REGSET31(p,t) \
7956 REGSET16(p, t), \
7957 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7958 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7959 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7960 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7961 #define REGSET(p,t) \
7962 REGSET31(p,t), REGNUM(p,31,t)
7963
7964 /* These go into aarch64_reg_hsh hash-table. */
7965 static const reg_entry reg_names[] = {
7966 /* Integer registers. */
7967 REGSET31 (x, R_64), REGSET31 (X, R_64),
7968 REGSET31 (w, R_32), REGSET31 (W, R_32),
7969
7970 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7971 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7972 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7973 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7974 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7975 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7976
7977 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7978 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7979
7980 /* Floating-point single precision registers. */
7981 REGSET (s, FP_S), REGSET (S, FP_S),
7982
7983 /* Floating-point double precision registers. */
7984 REGSET (d, FP_D), REGSET (D, FP_D),
7985
7986 /* Floating-point half precision registers. */
7987 REGSET (h, FP_H), REGSET (H, FP_H),
7988
7989 /* Floating-point byte precision registers. */
7990 REGSET (b, FP_B), REGSET (B, FP_B),
7991
7992 /* Floating-point quad precision registers. */
7993 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7994
7995 /* FP/SIMD registers. */
7996 REGSET (v, VN), REGSET (V, VN),
7997
7998 /* SVE vector registers. */
7999 REGSET (z, ZN), REGSET (Z, ZN),
8000
8001 /* SVE predicate registers. */
8002 REGSET16 (p, PN), REGSET16 (P, PN),
8003
8004 /* SME ZA tile registers. */
8005 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8006
8007 /* SME ZA tile registers (horizontal slice). */
8008 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8009
8010 /* SME ZA tile registers (vertical slice). */
8011 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8012 };
8013
8014 #undef REGDEF
8015 #undef REGDEF_ALIAS
8016 #undef REGNUM
8017 #undef REGSET16
8018 #undef REGSET31
8019 #undef REGSET
8020
8021 #define N 1
8022 #define n 0
8023 #define Z 1
8024 #define z 0
8025 #define C 1
8026 #define c 0
8027 #define V 1
8028 #define v 0
8029 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8030 static const asm_nzcv nzcv_names[] = {
8031 {"nzcv", B (n, z, c, v)},
8032 {"nzcV", B (n, z, c, V)},
8033 {"nzCv", B (n, z, C, v)},
8034 {"nzCV", B (n, z, C, V)},
8035 {"nZcv", B (n, Z, c, v)},
8036 {"nZcV", B (n, Z, c, V)},
8037 {"nZCv", B (n, Z, C, v)},
8038 {"nZCV", B (n, Z, C, V)},
8039 {"Nzcv", B (N, z, c, v)},
8040 {"NzcV", B (N, z, c, V)},
8041 {"NzCv", B (N, z, C, v)},
8042 {"NzCV", B (N, z, C, V)},
8043 {"NZcv", B (N, Z, c, v)},
8044 {"NZcV", B (N, Z, c, V)},
8045 {"NZCv", B (N, Z, C, v)},
8046 {"NZCV", B (N, Z, C, V)}
8047 };
8048
8049 #undef N
8050 #undef n
8051 #undef Z
8052 #undef z
8053 #undef C
8054 #undef c
8055 #undef V
8056 #undef v
8057 #undef B
8058 \f
8059 /* MD interface: bits in the object file. */
8060
8061 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8062 for use in the a.out file, and stores them in the array pointed to by buf.
8063 This knows about the endian-ness of the target machine and does
8064 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8065 2 (short) and 4 (long) Floating numbers are put out as a series of
8066 LITTLENUMS (shorts, here at least). */
8067
8068 void
8069 md_number_to_chars (char *buf, valueT val, int n)
8070 {
8071 if (target_big_endian)
8072 number_to_chars_bigendian (buf, val, n);
8073 else
8074 number_to_chars_littleendian (buf, val, n);
8075 }
8076
8077 /* MD interface: Sections. */
8078
8079 /* Estimate the size of a frag before relaxing. Assume everything fits in
8080 4 bytes. */
8081
8082 int
8083 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8084 {
8085 fragp->fr_var = 4;
8086 return 4;
8087 }
8088
8089 /* Round up a section size to the appropriate boundary. */
8090
8091 valueT
8092 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8093 {
8094 return size;
8095 }
8096
8097 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8098 of an rs_align_code fragment.
8099
8100 Here we fill the frag with the appropriate info for padding the
8101 output stream. The resulting frag will consist of a fixed (fr_fix)
8102 and of a repeating (fr_var) part.
8103
8104 The fixed content is always emitted before the repeating content and
8105 these two parts are used as follows in constructing the output:
8106 - the fixed part will be used to align to a valid instruction word
8107 boundary, in case that we start at a misaligned address; as no
8108 executable instruction can live at the misaligned location, we
8109 simply fill with zeros;
8110 - the variable part will be used to cover the remaining padding and
8111 we fill using the AArch64 NOP instruction.
8112
8113 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8114 enough storage space for up to 3 bytes for padding the back to a valid
8115 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8116
8117 void
8118 aarch64_handle_align (fragS * fragP)
8119 {
8120 /* NOP = d503201f */
8121 /* AArch64 instructions are always little-endian. */
8122 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8123
8124 int bytes, fix, noop_size;
8125 char *p;
8126
8127 if (fragP->fr_type != rs_align_code)
8128 return;
8129
8130 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8131 p = fragP->fr_literal + fragP->fr_fix;
8132
8133 #ifdef OBJ_ELF
8134 gas_assert (fragP->tc_frag_data.recorded);
8135 #endif
8136
8137 noop_size = sizeof (aarch64_noop);
8138
8139 fix = bytes & (noop_size - 1);
8140 if (fix)
8141 {
8142 #ifdef OBJ_ELF
8143 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8144 #endif
8145 memset (p, 0, fix);
8146 p += fix;
8147 fragP->fr_fix += fix;
8148 }
8149
8150 if (noop_size)
8151 memcpy (p, aarch64_noop, noop_size);
8152 fragP->fr_var = noop_size;
8153 }
8154
8155 /* Perform target specific initialisation of a frag.
8156 Note - despite the name this initialisation is not done when the frag
8157 is created, but only when its type is assigned. A frag can be created
8158 and used a long time before its type is set, so beware of assuming that
8159 this initialisation is performed first. */
8160
8161 #ifndef OBJ_ELF
8162 void
8163 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8164 int max_chars ATTRIBUTE_UNUSED)
8165 {
8166 }
8167
8168 #else /* OBJ_ELF is defined. */
8169 void
8170 aarch64_init_frag (fragS * fragP, int max_chars)
8171 {
8172 /* Record a mapping symbol for alignment frags. We will delete this
8173 later if the alignment ends up empty. */
8174 if (!fragP->tc_frag_data.recorded)
8175 fragP->tc_frag_data.recorded = 1;
8176
8177 /* PR 21809: Do not set a mapping state for debug sections
8178 - it just confuses other tools. */
8179 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8180 return;
8181
8182 switch (fragP->fr_type)
8183 {
8184 case rs_align_test:
8185 case rs_fill:
8186 mapping_state_2 (MAP_DATA, max_chars);
8187 break;
8188 case rs_align:
8189 /* PR 20364: We can get alignment frags in code sections,
8190 so do not just assume that we should use the MAP_DATA state. */
8191 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8192 break;
8193 case rs_align_code:
8194 mapping_state_2 (MAP_INSN, max_chars);
8195 break;
8196 default:
8197 break;
8198 }
8199 }
8200 \f
8201 /* Initialize the DWARF-2 unwind information for this procedure. */
8202
8203 void
8204 tc_aarch64_frame_initial_instructions (void)
8205 {
8206 cfi_add_CFA_def_cfa (REG_SP, 0);
8207 }
8208 #endif /* OBJ_ELF */
8209
8210 /* Convert REGNAME to a DWARF-2 register number. */
8211
8212 int
8213 tc_aarch64_regname_to_dw2regnum (char *regname)
8214 {
8215 const reg_entry *reg = parse_reg (&regname);
8216 if (reg == NULL)
8217 return -1;
8218
8219 switch (reg->type)
8220 {
8221 case REG_TYPE_SP_32:
8222 case REG_TYPE_SP_64:
8223 case REG_TYPE_R_32:
8224 case REG_TYPE_R_64:
8225 return reg->number;
8226
8227 case REG_TYPE_FP_B:
8228 case REG_TYPE_FP_H:
8229 case REG_TYPE_FP_S:
8230 case REG_TYPE_FP_D:
8231 case REG_TYPE_FP_Q:
8232 return reg->number + 64;
8233
8234 default:
8235 break;
8236 }
8237 return -1;
8238 }
8239
8240 /* Implement DWARF2_ADDR_SIZE. */
8241
8242 int
8243 aarch64_dwarf2_addr_size (void)
8244 {
8245 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8246 if (ilp32_p)
8247 return 4;
8248 #endif
8249 return bfd_arch_bits_per_address (stdoutput) / 8;
8250 }
8251
8252 /* MD interface: Symbol and relocation handling. */
8253
8254 /* Return the address within the segment that a PC-relative fixup is
8255 relative to. For AArch64 PC-relative fixups applied to instructions
8256 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8257
8258 long
8259 md_pcrel_from_section (fixS * fixP, segT seg)
8260 {
8261 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8262
8263 /* If this is pc-relative and we are going to emit a relocation
8264 then we just want to put out any pipeline compensation that the linker
8265 will need. Otherwise we want to use the calculated base. */
8266 if (fixP->fx_pcrel
8267 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8268 || aarch64_force_relocation (fixP)))
8269 base = 0;
8270
8271 /* AArch64 should be consistent for all pc-relative relocations. */
8272 return base + AARCH64_PCREL_OFFSET;
8273 }
8274
8275 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8276 Otherwise we have no need to default values of symbols. */
8277
8278 symbolS *
8279 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8280 {
8281 #ifdef OBJ_ELF
8282 if (name[0] == '_' && name[1] == 'G'
8283 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8284 {
8285 if (!GOT_symbol)
8286 {
8287 if (symbol_find (name))
8288 as_bad (_("GOT already in the symbol table"));
8289
8290 GOT_symbol = symbol_new (name, undefined_section,
8291 &zero_address_frag, 0);
8292 }
8293
8294 return GOT_symbol;
8295 }
8296 #endif
8297
8298 return 0;
8299 }
8300
8301 /* Return non-zero if the indicated VALUE has overflowed the maximum
8302 range expressible by a unsigned number with the indicated number of
8303 BITS. */
8304
8305 static bool
8306 unsigned_overflow (valueT value, unsigned bits)
8307 {
8308 valueT lim;
8309 if (bits >= sizeof (valueT) * 8)
8310 return false;
8311 lim = (valueT) 1 << bits;
8312 return (value >= lim);
8313 }
8314
8315
8316 /* Return non-zero if the indicated VALUE has overflowed the maximum
8317 range expressible by an signed number with the indicated number of
8318 BITS. */
8319
8320 static bool
8321 signed_overflow (offsetT value, unsigned bits)
8322 {
8323 offsetT lim;
8324 if (bits >= sizeof (offsetT) * 8)
8325 return false;
8326 lim = (offsetT) 1 << (bits - 1);
8327 return (value < -lim || value >= lim);
8328 }
8329
8330 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8331 unsigned immediate offset load/store instruction, try to encode it as
8332 an unscaled, 9-bit, signed immediate offset load/store instruction.
8333 Return TRUE if it is successful; otherwise return FALSE.
8334
8335 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8336 in response to the standard LDR/STR mnemonics when the immediate offset is
8337 unambiguous, i.e. when it is negative or unaligned. */
8338
8339 static bool
8340 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8341 {
8342 int idx;
8343 enum aarch64_op new_op;
8344 const aarch64_opcode *new_opcode;
8345
8346 gas_assert (instr->opcode->iclass == ldst_pos);
8347
8348 switch (instr->opcode->op)
8349 {
8350 case OP_LDRB_POS:new_op = OP_LDURB; break;
8351 case OP_STRB_POS: new_op = OP_STURB; break;
8352 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8353 case OP_LDRH_POS: new_op = OP_LDURH; break;
8354 case OP_STRH_POS: new_op = OP_STURH; break;
8355 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8356 case OP_LDR_POS: new_op = OP_LDUR; break;
8357 case OP_STR_POS: new_op = OP_STUR; break;
8358 case OP_LDRF_POS: new_op = OP_LDURV; break;
8359 case OP_STRF_POS: new_op = OP_STURV; break;
8360 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8361 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8362 default: new_op = OP_NIL; break;
8363 }
8364
8365 if (new_op == OP_NIL)
8366 return false;
8367
8368 new_opcode = aarch64_get_opcode (new_op);
8369 gas_assert (new_opcode != NULL);
8370
8371 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8372 instr->opcode->op, new_opcode->op);
8373
8374 aarch64_replace_opcode (instr, new_opcode);
8375
8376 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8377 qualifier matching may fail because the out-of-date qualifier will
8378 prevent the operand being updated with a new and correct qualifier. */
8379 idx = aarch64_operand_index (instr->opcode->operands,
8380 AARCH64_OPND_ADDR_SIMM9);
8381 gas_assert (idx == 1);
8382 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8383
8384 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8385
8386 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8387 insn_sequence))
8388 return false;
8389
8390 return true;
8391 }
8392
8393 /* Called by fix_insn to fix a MOV immediate alias instruction.
8394
8395 Operand for a generic move immediate instruction, which is an alias
8396 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8397 a 32-bit/64-bit immediate value into general register. An assembler error
8398 shall result if the immediate cannot be created by a single one of these
8399 instructions. If there is a choice, then to ensure reversability an
8400 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8401
8402 static void
8403 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8404 {
8405 const aarch64_opcode *opcode;
8406
8407 /* Need to check if the destination is SP/ZR. The check has to be done
8408 before any aarch64_replace_opcode. */
8409 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8410 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8411
8412 instr->operands[1].imm.value = value;
8413 instr->operands[1].skip = 0;
8414
8415 if (try_mov_wide_p)
8416 {
8417 /* Try the MOVZ alias. */
8418 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8419 aarch64_replace_opcode (instr, opcode);
8420 if (aarch64_opcode_encode (instr->opcode, instr,
8421 &instr->value, NULL, NULL, insn_sequence))
8422 {
8423 put_aarch64_insn (buf, instr->value);
8424 return;
8425 }
8426 /* Try the MOVK alias. */
8427 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8428 aarch64_replace_opcode (instr, opcode);
8429 if (aarch64_opcode_encode (instr->opcode, instr,
8430 &instr->value, NULL, NULL, insn_sequence))
8431 {
8432 put_aarch64_insn (buf, instr->value);
8433 return;
8434 }
8435 }
8436
8437 if (try_mov_bitmask_p)
8438 {
8439 /* Try the ORR alias. */
8440 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8441 aarch64_replace_opcode (instr, opcode);
8442 if (aarch64_opcode_encode (instr->opcode, instr,
8443 &instr->value, NULL, NULL, insn_sequence))
8444 {
8445 put_aarch64_insn (buf, instr->value);
8446 return;
8447 }
8448 }
8449
8450 as_bad_where (fixP->fx_file, fixP->fx_line,
8451 _("immediate cannot be moved by a single instruction"));
8452 }
8453
8454 /* An instruction operand which is immediate related may have symbol used
8455 in the assembly, e.g.
8456
8457 mov w0, u32
8458 .set u32, 0x00ffff00
8459
8460 At the time when the assembly instruction is parsed, a referenced symbol,
8461 like 'u32' in the above example may not have been seen; a fixS is created
8462 in such a case and is handled here after symbols have been resolved.
8463 Instruction is fixed up with VALUE using the information in *FIXP plus
8464 extra information in FLAGS.
8465
8466 This function is called by md_apply_fix to fix up instructions that need
8467 a fix-up described above but does not involve any linker-time relocation. */
8468
8469 static void
8470 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8471 {
8472 int idx;
8473 uint32_t insn;
8474 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8475 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8476 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8477
8478 if (new_inst)
8479 {
8480 /* Now the instruction is about to be fixed-up, so the operand that
8481 was previously marked as 'ignored' needs to be unmarked in order
8482 to get the encoding done properly. */
8483 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8484 new_inst->operands[idx].skip = 0;
8485 }
8486
8487 gas_assert (opnd != AARCH64_OPND_NIL);
8488
8489 switch (opnd)
8490 {
8491 case AARCH64_OPND_EXCEPTION:
8492 case AARCH64_OPND_UNDEFINED:
8493 if (unsigned_overflow (value, 16))
8494 as_bad_where (fixP->fx_file, fixP->fx_line,
8495 _("immediate out of range"));
8496 insn = get_aarch64_insn (buf);
8497 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8498 put_aarch64_insn (buf, insn);
8499 break;
8500
8501 case AARCH64_OPND_AIMM:
8502 /* ADD or SUB with immediate.
8503 NOTE this assumes we come here with a add/sub shifted reg encoding
8504 3 322|2222|2 2 2 21111 111111
8505 1 098|7654|3 2 1 09876 543210 98765 43210
8506 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8507 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8508 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8509 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8510 ->
8511 3 322|2222|2 2 221111111111
8512 1 098|7654|3 2 109876543210 98765 43210
8513 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8514 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8515 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8516 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8517 Fields sf Rn Rd are already set. */
8518 insn = get_aarch64_insn (buf);
8519 if (value < 0)
8520 {
8521 /* Add <-> sub. */
8522 insn = reencode_addsub_switch_add_sub (insn);
8523 value = -value;
8524 }
8525
8526 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8527 && unsigned_overflow (value, 12))
8528 {
8529 /* Try to shift the value by 12 to make it fit. */
8530 if (((value >> 12) << 12) == value
8531 && ! unsigned_overflow (value, 12 + 12))
8532 {
8533 value >>= 12;
8534 insn |= encode_addsub_imm_shift_amount (1);
8535 }
8536 }
8537
8538 if (unsigned_overflow (value, 12))
8539 as_bad_where (fixP->fx_file, fixP->fx_line,
8540 _("immediate out of range"));
8541
8542 insn |= encode_addsub_imm (value);
8543
8544 put_aarch64_insn (buf, insn);
8545 break;
8546
8547 case AARCH64_OPND_SIMD_IMM:
8548 case AARCH64_OPND_SIMD_IMM_SFT:
8549 case AARCH64_OPND_LIMM:
8550 /* Bit mask immediate. */
8551 gas_assert (new_inst != NULL);
8552 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8553 new_inst->operands[idx].imm.value = value;
8554 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8555 &new_inst->value, NULL, NULL, insn_sequence))
8556 put_aarch64_insn (buf, new_inst->value);
8557 else
8558 as_bad_where (fixP->fx_file, fixP->fx_line,
8559 _("invalid immediate"));
8560 break;
8561
8562 case AARCH64_OPND_HALF:
8563 /* 16-bit unsigned immediate. */
8564 if (unsigned_overflow (value, 16))
8565 as_bad_where (fixP->fx_file, fixP->fx_line,
8566 _("immediate out of range"));
8567 insn = get_aarch64_insn (buf);
8568 insn |= encode_movw_imm (value & 0xffff);
8569 put_aarch64_insn (buf, insn);
8570 break;
8571
8572 case AARCH64_OPND_IMM_MOV:
8573 /* Operand for a generic move immediate instruction, which is
8574 an alias instruction that generates a single MOVZ, MOVN or ORR
8575 instruction to loads a 32-bit/64-bit immediate value into general
8576 register. An assembler error shall result if the immediate cannot be
8577 created by a single one of these instructions. If there is a choice,
8578 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8579 and MOVZ or MOVN to ORR. */
8580 gas_assert (new_inst != NULL);
8581 fix_mov_imm_insn (fixP, buf, new_inst, value);
8582 break;
8583
8584 case AARCH64_OPND_ADDR_SIMM7:
8585 case AARCH64_OPND_ADDR_SIMM9:
8586 case AARCH64_OPND_ADDR_SIMM9_2:
8587 case AARCH64_OPND_ADDR_SIMM10:
8588 case AARCH64_OPND_ADDR_UIMM12:
8589 case AARCH64_OPND_ADDR_SIMM11:
8590 case AARCH64_OPND_ADDR_SIMM13:
8591 /* Immediate offset in an address. */
8592 insn = get_aarch64_insn (buf);
8593
8594 gas_assert (new_inst != NULL && new_inst->value == insn);
8595 gas_assert (new_inst->opcode->operands[1] == opnd
8596 || new_inst->opcode->operands[2] == opnd);
8597
8598 /* Get the index of the address operand. */
8599 if (new_inst->opcode->operands[1] == opnd)
8600 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8601 idx = 1;
8602 else
8603 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8604 idx = 2;
8605
8606 /* Update the resolved offset value. */
8607 new_inst->operands[idx].addr.offset.imm = value;
8608
8609 /* Encode/fix-up. */
8610 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8611 &new_inst->value, NULL, NULL, insn_sequence))
8612 {
8613 put_aarch64_insn (buf, new_inst->value);
8614 break;
8615 }
8616 else if (new_inst->opcode->iclass == ldst_pos
8617 && try_to_encode_as_unscaled_ldst (new_inst))
8618 {
8619 put_aarch64_insn (buf, new_inst->value);
8620 break;
8621 }
8622
8623 as_bad_where (fixP->fx_file, fixP->fx_line,
8624 _("immediate offset out of range"));
8625 break;
8626
8627 default:
8628 gas_assert (0);
8629 as_fatal (_("unhandled operand code %d"), opnd);
8630 }
8631 }
8632
8633 /* Apply a fixup (fixP) to segment data, once it has been determined
8634 by our caller that we have all the info we need to fix it up.
8635
8636 Parameter valP is the pointer to the value of the bits. */
8637
8638 void
8639 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8640 {
8641 offsetT value = *valP;
8642 uint32_t insn;
8643 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8644 int scale;
8645 unsigned flags = fixP->fx_addnumber;
8646
8647 DEBUG_TRACE ("\n\n");
8648 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8649 DEBUG_TRACE ("Enter md_apply_fix");
8650
8651 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8652
8653 /* Note whether this will delete the relocation. */
8654
8655 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8656 fixP->fx_done = 1;
8657
8658 /* Process the relocations. */
8659 switch (fixP->fx_r_type)
8660 {
8661 case BFD_RELOC_NONE:
8662 /* This will need to go in the object file. */
8663 fixP->fx_done = 0;
8664 break;
8665
8666 case BFD_RELOC_8:
8667 case BFD_RELOC_8_PCREL:
8668 if (fixP->fx_done || !seg->use_rela_p)
8669 md_number_to_chars (buf, value, 1);
8670 break;
8671
8672 case BFD_RELOC_16:
8673 case BFD_RELOC_16_PCREL:
8674 if (fixP->fx_done || !seg->use_rela_p)
8675 md_number_to_chars (buf, value, 2);
8676 break;
8677
8678 case BFD_RELOC_32:
8679 case BFD_RELOC_32_PCREL:
8680 if (fixP->fx_done || !seg->use_rela_p)
8681 md_number_to_chars (buf, value, 4);
8682 break;
8683
8684 case BFD_RELOC_64:
8685 case BFD_RELOC_64_PCREL:
8686 if (fixP->fx_done || !seg->use_rela_p)
8687 md_number_to_chars (buf, value, 8);
8688 break;
8689
8690 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8691 /* We claim that these fixups have been processed here, even if
8692 in fact we generate an error because we do not have a reloc
8693 for them, so tc_gen_reloc() will reject them. */
8694 fixP->fx_done = 1;
8695 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8696 {
8697 as_bad_where (fixP->fx_file, fixP->fx_line,
8698 _("undefined symbol %s used as an immediate value"),
8699 S_GET_NAME (fixP->fx_addsy));
8700 goto apply_fix_return;
8701 }
8702 fix_insn (fixP, flags, value);
8703 break;
8704
8705 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8706 if (fixP->fx_done || !seg->use_rela_p)
8707 {
8708 if (value & 3)
8709 as_bad_where (fixP->fx_file, fixP->fx_line,
8710 _("pc-relative load offset not word aligned"));
8711 if (signed_overflow (value, 21))
8712 as_bad_where (fixP->fx_file, fixP->fx_line,
8713 _("pc-relative load offset out of range"));
8714 insn = get_aarch64_insn (buf);
8715 insn |= encode_ld_lit_ofs_19 (value >> 2);
8716 put_aarch64_insn (buf, insn);
8717 }
8718 break;
8719
8720 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8721 if (fixP->fx_done || !seg->use_rela_p)
8722 {
8723 if (signed_overflow (value, 21))
8724 as_bad_where (fixP->fx_file, fixP->fx_line,
8725 _("pc-relative address offset out of range"));
8726 insn = get_aarch64_insn (buf);
8727 insn |= encode_adr_imm (value);
8728 put_aarch64_insn (buf, insn);
8729 }
8730 break;
8731
8732 case BFD_RELOC_AARCH64_BRANCH19:
8733 if (fixP->fx_done || !seg->use_rela_p)
8734 {
8735 if (value & 3)
8736 as_bad_where (fixP->fx_file, fixP->fx_line,
8737 _("conditional branch target not word aligned"));
8738 if (signed_overflow (value, 21))
8739 as_bad_where (fixP->fx_file, fixP->fx_line,
8740 _("conditional branch out of range"));
8741 insn = get_aarch64_insn (buf);
8742 insn |= encode_cond_branch_ofs_19 (value >> 2);
8743 put_aarch64_insn (buf, insn);
8744 }
8745 break;
8746
8747 case BFD_RELOC_AARCH64_TSTBR14:
8748 if (fixP->fx_done || !seg->use_rela_p)
8749 {
8750 if (value & 3)
8751 as_bad_where (fixP->fx_file, fixP->fx_line,
8752 _("conditional branch target not word aligned"));
8753 if (signed_overflow (value, 16))
8754 as_bad_where (fixP->fx_file, fixP->fx_line,
8755 _("conditional branch out of range"));
8756 insn = get_aarch64_insn (buf);
8757 insn |= encode_tst_branch_ofs_14 (value >> 2);
8758 put_aarch64_insn (buf, insn);
8759 }
8760 break;
8761
8762 case BFD_RELOC_AARCH64_CALL26:
8763 case BFD_RELOC_AARCH64_JUMP26:
8764 if (fixP->fx_done || !seg->use_rela_p)
8765 {
8766 if (value & 3)
8767 as_bad_where (fixP->fx_file, fixP->fx_line,
8768 _("branch target not word aligned"));
8769 if (signed_overflow (value, 28))
8770 as_bad_where (fixP->fx_file, fixP->fx_line,
8771 _("branch out of range"));
8772 insn = get_aarch64_insn (buf);
8773 insn |= encode_branch_ofs_26 (value >> 2);
8774 put_aarch64_insn (buf, insn);
8775 }
8776 break;
8777
8778 case BFD_RELOC_AARCH64_MOVW_G0:
8779 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8780 case BFD_RELOC_AARCH64_MOVW_G0_S:
8781 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8782 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8783 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8784 scale = 0;
8785 goto movw_common;
8786 case BFD_RELOC_AARCH64_MOVW_G1:
8787 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8788 case BFD_RELOC_AARCH64_MOVW_G1_S:
8789 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8790 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8791 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8792 scale = 16;
8793 goto movw_common;
8794 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8795 scale = 0;
8796 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8797 /* Should always be exported to object file, see
8798 aarch64_force_relocation(). */
8799 gas_assert (!fixP->fx_done);
8800 gas_assert (seg->use_rela_p);
8801 goto movw_common;
8802 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8803 scale = 16;
8804 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8805 /* Should always be exported to object file, see
8806 aarch64_force_relocation(). */
8807 gas_assert (!fixP->fx_done);
8808 gas_assert (seg->use_rela_p);
8809 goto movw_common;
8810 case BFD_RELOC_AARCH64_MOVW_G2:
8811 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8812 case BFD_RELOC_AARCH64_MOVW_G2_S:
8813 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8814 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8815 scale = 32;
8816 goto movw_common;
8817 case BFD_RELOC_AARCH64_MOVW_G3:
8818 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8819 scale = 48;
8820 movw_common:
8821 if (fixP->fx_done || !seg->use_rela_p)
8822 {
8823 insn = get_aarch64_insn (buf);
8824
8825 if (!fixP->fx_done)
8826 {
8827 /* REL signed addend must fit in 16 bits */
8828 if (signed_overflow (value, 16))
8829 as_bad_where (fixP->fx_file, fixP->fx_line,
8830 _("offset out of range"));
8831 }
8832 else
8833 {
8834 /* Check for overflow and scale. */
8835 switch (fixP->fx_r_type)
8836 {
8837 case BFD_RELOC_AARCH64_MOVW_G0:
8838 case BFD_RELOC_AARCH64_MOVW_G1:
8839 case BFD_RELOC_AARCH64_MOVW_G2:
8840 case BFD_RELOC_AARCH64_MOVW_G3:
8841 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8842 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8843 if (unsigned_overflow (value, scale + 16))
8844 as_bad_where (fixP->fx_file, fixP->fx_line,
8845 _("unsigned value out of range"));
8846 break;
8847 case BFD_RELOC_AARCH64_MOVW_G0_S:
8848 case BFD_RELOC_AARCH64_MOVW_G1_S:
8849 case BFD_RELOC_AARCH64_MOVW_G2_S:
8850 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8851 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8852 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8853 /* NOTE: We can only come here with movz or movn. */
8854 if (signed_overflow (value, scale + 16))
8855 as_bad_where (fixP->fx_file, fixP->fx_line,
8856 _("signed value out of range"));
8857 if (value < 0)
8858 {
8859 /* Force use of MOVN. */
8860 value = ~value;
8861 insn = reencode_movzn_to_movn (insn);
8862 }
8863 else
8864 {
8865 /* Force use of MOVZ. */
8866 insn = reencode_movzn_to_movz (insn);
8867 }
8868 break;
8869 default:
8870 /* Unchecked relocations. */
8871 break;
8872 }
8873 value >>= scale;
8874 }
8875
8876 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8877 insn |= encode_movw_imm (value & 0xffff);
8878
8879 put_aarch64_insn (buf, insn);
8880 }
8881 break;
8882
8883 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8884 fixP->fx_r_type = (ilp32_p
8885 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8886 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8887 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8888 /* Should always be exported to object file, see
8889 aarch64_force_relocation(). */
8890 gas_assert (!fixP->fx_done);
8891 gas_assert (seg->use_rela_p);
8892 break;
8893
8894 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8895 fixP->fx_r_type = (ilp32_p
8896 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8897 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8898 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8899 /* Should always be exported to object file, see
8900 aarch64_force_relocation(). */
8901 gas_assert (!fixP->fx_done);
8902 gas_assert (seg->use_rela_p);
8903 break;
8904
8905 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8906 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8907 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8908 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8909 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8910 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8911 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8912 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8913 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8914 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8915 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8916 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8917 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8918 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8919 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8920 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8921 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8922 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8923 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8924 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8925 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8926 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8927 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8928 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8929 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8930 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8931 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8932 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8933 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8934 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8935 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8936 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8937 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8938 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8939 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8940 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8941 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8942 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8943 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8944 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8945 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8946 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8947 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8948 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8949 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8950 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8951 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8952 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8953 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8954 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8955 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8956 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8957 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8958 /* Should always be exported to object file, see
8959 aarch64_force_relocation(). */
8960 gas_assert (!fixP->fx_done);
8961 gas_assert (seg->use_rela_p);
8962 break;
8963
8964 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8965 /* Should always be exported to object file, see
8966 aarch64_force_relocation(). */
8967 fixP->fx_r_type = (ilp32_p
8968 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8969 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8970 gas_assert (!fixP->fx_done);
8971 gas_assert (seg->use_rela_p);
8972 break;
8973
8974 case BFD_RELOC_AARCH64_ADD_LO12:
8975 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8976 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8977 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8978 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8979 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8980 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8981 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8982 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8983 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8984 case BFD_RELOC_AARCH64_LDST128_LO12:
8985 case BFD_RELOC_AARCH64_LDST16_LO12:
8986 case BFD_RELOC_AARCH64_LDST32_LO12:
8987 case BFD_RELOC_AARCH64_LDST64_LO12:
8988 case BFD_RELOC_AARCH64_LDST8_LO12:
8989 /* Should always be exported to object file, see
8990 aarch64_force_relocation(). */
8991 gas_assert (!fixP->fx_done);
8992 gas_assert (seg->use_rela_p);
8993 break;
8994
8995 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8996 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8997 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8998 break;
8999
9000 case BFD_RELOC_UNUSED:
9001 /* An error will already have been reported. */
9002 break;
9003
9004 default:
9005 as_bad_where (fixP->fx_file, fixP->fx_line,
9006 _("unexpected %s fixup"),
9007 bfd_get_reloc_code_name (fixP->fx_r_type));
9008 break;
9009 }
9010
9011 apply_fix_return:
9012 /* Free the allocated the struct aarch64_inst.
9013 N.B. currently there are very limited number of fix-up types actually use
9014 this field, so the impact on the performance should be minimal . */
9015 free (fixP->tc_fix_data.inst);
9016
9017 return;
9018 }
9019
9020 /* Translate internal representation of relocation info to BFD target
9021 format. */
9022
9023 arelent *
9024 tc_gen_reloc (asection * section, fixS * fixp)
9025 {
9026 arelent *reloc;
9027 bfd_reloc_code_real_type code;
9028
9029 reloc = XNEW (arelent);
9030
9031 reloc->sym_ptr_ptr = XNEW (asymbol *);
9032 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9033 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9034
9035 if (fixp->fx_pcrel)
9036 {
9037 if (section->use_rela_p)
9038 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9039 else
9040 fixp->fx_offset = reloc->address;
9041 }
9042 reloc->addend = fixp->fx_offset;
9043
9044 code = fixp->fx_r_type;
9045 switch (code)
9046 {
9047 case BFD_RELOC_16:
9048 if (fixp->fx_pcrel)
9049 code = BFD_RELOC_16_PCREL;
9050 break;
9051
9052 case BFD_RELOC_32:
9053 if (fixp->fx_pcrel)
9054 code = BFD_RELOC_32_PCREL;
9055 break;
9056
9057 case BFD_RELOC_64:
9058 if (fixp->fx_pcrel)
9059 code = BFD_RELOC_64_PCREL;
9060 break;
9061
9062 default:
9063 break;
9064 }
9065
9066 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9067 if (reloc->howto == NULL)
9068 {
9069 as_bad_where (fixp->fx_file, fixp->fx_line,
9070 _
9071 ("cannot represent %s relocation in this object file format"),
9072 bfd_get_reloc_code_name (code));
9073 return NULL;
9074 }
9075
9076 return reloc;
9077 }
9078
9079 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9080
9081 void
9082 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9083 {
9084 bfd_reloc_code_real_type type;
9085 int pcrel = 0;
9086
9087 /* Pick a reloc.
9088 FIXME: @@ Should look at CPU word size. */
9089 switch (size)
9090 {
9091 case 1:
9092 type = BFD_RELOC_8;
9093 break;
9094 case 2:
9095 type = BFD_RELOC_16;
9096 break;
9097 case 4:
9098 type = BFD_RELOC_32;
9099 break;
9100 case 8:
9101 type = BFD_RELOC_64;
9102 break;
9103 default:
9104 as_bad (_("cannot do %u-byte relocation"), size);
9105 type = BFD_RELOC_UNUSED;
9106 break;
9107 }
9108
9109 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9110 }
9111
9112 #ifdef OBJ_ELF
9113
9114 /* Implement md_after_parse_args. This is the earliest time we need to decide
9115 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9116
9117 void
9118 aarch64_after_parse_args (void)
9119 {
9120 if (aarch64_abi != AARCH64_ABI_NONE)
9121 return;
9122
9123 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9124 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9125 aarch64_abi = AARCH64_ABI_ILP32;
9126 else
9127 aarch64_abi = AARCH64_ABI_LP64;
9128 }
9129
9130 const char *
9131 elf64_aarch64_target_format (void)
9132 {
9133 #ifdef TE_CLOUDABI
9134 /* FIXME: What to do for ilp32_p ? */
9135 if (target_big_endian)
9136 return "elf64-bigaarch64-cloudabi";
9137 else
9138 return "elf64-littleaarch64-cloudabi";
9139 #else
9140 if (target_big_endian)
9141 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9142 else
9143 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9144 #endif
9145 }
9146
9147 void
9148 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9149 {
9150 elf_frob_symbol (symp, puntp);
9151 }
9152 #endif
9153
9154 /* MD interface: Finalization. */
9155
9156 /* A good place to do this, although this was probably not intended
9157 for this kind of use. We need to dump the literal pool before
9158 references are made to a null symbol pointer. */
9159
9160 void
9161 aarch64_cleanup (void)
9162 {
9163 literal_pool *pool;
9164
9165 for (pool = list_of_pools; pool; pool = pool->next)
9166 {
9167 /* Put it at the end of the relevant section. */
9168 subseg_set (pool->section, pool->sub_section);
9169 s_ltorg (0);
9170 }
9171 }
9172
9173 #ifdef OBJ_ELF
9174 /* Remove any excess mapping symbols generated for alignment frags in
9175 SEC. We may have created a mapping symbol before a zero byte
9176 alignment; remove it if there's a mapping symbol after the
9177 alignment. */
9178 static void
9179 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9180 void *dummy ATTRIBUTE_UNUSED)
9181 {
9182 segment_info_type *seginfo = seg_info (sec);
9183 fragS *fragp;
9184
9185 if (seginfo == NULL || seginfo->frchainP == NULL)
9186 return;
9187
9188 for (fragp = seginfo->frchainP->frch_root;
9189 fragp != NULL; fragp = fragp->fr_next)
9190 {
9191 symbolS *sym = fragp->tc_frag_data.last_map;
9192 fragS *next = fragp->fr_next;
9193
9194 /* Variable-sized frags have been converted to fixed size by
9195 this point. But if this was variable-sized to start with,
9196 there will be a fixed-size frag after it. So don't handle
9197 next == NULL. */
9198 if (sym == NULL || next == NULL)
9199 continue;
9200
9201 if (S_GET_VALUE (sym) < next->fr_address)
9202 /* Not at the end of this frag. */
9203 continue;
9204 know (S_GET_VALUE (sym) == next->fr_address);
9205
9206 do
9207 {
9208 if (next->tc_frag_data.first_map != NULL)
9209 {
9210 /* Next frag starts with a mapping symbol. Discard this
9211 one. */
9212 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9213 break;
9214 }
9215
9216 if (next->fr_next == NULL)
9217 {
9218 /* This mapping symbol is at the end of the section. Discard
9219 it. */
9220 know (next->fr_fix == 0 && next->fr_var == 0);
9221 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9222 break;
9223 }
9224
9225 /* As long as we have empty frags without any mapping symbols,
9226 keep looking. */
9227 /* If the next frag is non-empty and does not start with a
9228 mapping symbol, then this mapping symbol is required. */
9229 if (next->fr_address != next->fr_next->fr_address)
9230 break;
9231
9232 next = next->fr_next;
9233 }
9234 while (next != NULL);
9235 }
9236 }
9237 #endif
9238
9239 /* Adjust the symbol table. */
9240
9241 void
9242 aarch64_adjust_symtab (void)
9243 {
9244 #ifdef OBJ_ELF
9245 /* Remove any overlapping mapping symbols generated by alignment frags. */
9246 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9247 /* Now do generic ELF adjustments. */
9248 elf_adjust_symtab ();
9249 #endif
9250 }
9251
9252 static void
9253 checked_hash_insert (htab_t table, const char *key, void *value)
9254 {
9255 str_hash_insert (table, key, value, 0);
9256 }
9257
9258 static void
9259 sysreg_hash_insert (htab_t table, const char *key, void *value)
9260 {
9261 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9262 checked_hash_insert (table, key, value);
9263 }
9264
9265 static void
9266 fill_instruction_hash_table (void)
9267 {
9268 const aarch64_opcode *opcode = aarch64_opcode_table;
9269
9270 while (opcode->name != NULL)
9271 {
9272 templates *templ, *new_templ;
9273 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9274
9275 new_templ = XNEW (templates);
9276 new_templ->opcode = opcode;
9277 new_templ->next = NULL;
9278
9279 if (!templ)
9280 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9281 else
9282 {
9283 new_templ->next = templ->next;
9284 templ->next = new_templ;
9285 }
9286 ++opcode;
9287 }
9288 }
9289
9290 static inline void
9291 convert_to_upper (char *dst, const char *src, size_t num)
9292 {
9293 unsigned int i;
9294 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9295 *dst = TOUPPER (*src);
9296 *dst = '\0';
9297 }
9298
9299 /* Assume STR point to a lower-case string, allocate, convert and return
9300 the corresponding upper-case string. */
9301 static inline const char*
9302 get_upper_str (const char *str)
9303 {
9304 char *ret;
9305 size_t len = strlen (str);
9306 ret = XNEWVEC (char, len + 1);
9307 convert_to_upper (ret, str, len);
9308 return ret;
9309 }
9310
9311 /* MD interface: Initialization. */
9312
9313 void
9314 md_begin (void)
9315 {
9316 unsigned mach;
9317 unsigned int i;
9318
9319 aarch64_ops_hsh = str_htab_create ();
9320 aarch64_cond_hsh = str_htab_create ();
9321 aarch64_shift_hsh = str_htab_create ();
9322 aarch64_sys_regs_hsh = str_htab_create ();
9323 aarch64_pstatefield_hsh = str_htab_create ();
9324 aarch64_sys_regs_ic_hsh = str_htab_create ();
9325 aarch64_sys_regs_dc_hsh = str_htab_create ();
9326 aarch64_sys_regs_at_hsh = str_htab_create ();
9327 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9328 aarch64_sys_regs_sr_hsh = str_htab_create ();
9329 aarch64_reg_hsh = str_htab_create ();
9330 aarch64_barrier_opt_hsh = str_htab_create ();
9331 aarch64_nzcv_hsh = str_htab_create ();
9332 aarch64_pldop_hsh = str_htab_create ();
9333 aarch64_hint_opt_hsh = str_htab_create ();
9334
9335 fill_instruction_hash_table ();
9336
9337 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9338 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9339 (void *) (aarch64_sys_regs + i));
9340
9341 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9342 sysreg_hash_insert (aarch64_pstatefield_hsh,
9343 aarch64_pstatefields[i].name,
9344 (void *) (aarch64_pstatefields + i));
9345
9346 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9347 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9348 aarch64_sys_regs_ic[i].name,
9349 (void *) (aarch64_sys_regs_ic + i));
9350
9351 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9352 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9353 aarch64_sys_regs_dc[i].name,
9354 (void *) (aarch64_sys_regs_dc + i));
9355
9356 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9357 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9358 aarch64_sys_regs_at[i].name,
9359 (void *) (aarch64_sys_regs_at + i));
9360
9361 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9362 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9363 aarch64_sys_regs_tlbi[i].name,
9364 (void *) (aarch64_sys_regs_tlbi + i));
9365
9366 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9367 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9368 aarch64_sys_regs_sr[i].name,
9369 (void *) (aarch64_sys_regs_sr + i));
9370
9371 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9372 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9373 (void *) (reg_names + i));
9374
9375 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9376 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9377 (void *) (nzcv_names + i));
9378
9379 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9380 {
9381 const char *name = aarch64_operand_modifiers[i].name;
9382 checked_hash_insert (aarch64_shift_hsh, name,
9383 (void *) (aarch64_operand_modifiers + i));
9384 /* Also hash the name in the upper case. */
9385 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9386 (void *) (aarch64_operand_modifiers + i));
9387 }
9388
9389 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9390 {
9391 unsigned int j;
9392 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9393 the same condition code. */
9394 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9395 {
9396 const char *name = aarch64_conds[i].names[j];
9397 if (name == NULL)
9398 break;
9399 checked_hash_insert (aarch64_cond_hsh, name,
9400 (void *) (aarch64_conds + i));
9401 /* Also hash the name in the upper case. */
9402 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9403 (void *) (aarch64_conds + i));
9404 }
9405 }
9406
9407 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9408 {
9409 const char *name = aarch64_barrier_options[i].name;
9410 /* Skip xx00 - the unallocated values of option. */
9411 if ((i & 0x3) == 0)
9412 continue;
9413 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9414 (void *) (aarch64_barrier_options + i));
9415 /* Also hash the name in the upper case. */
9416 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9417 (void *) (aarch64_barrier_options + i));
9418 }
9419
9420 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9421 {
9422 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9423 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9424 (void *) (aarch64_barrier_dsb_nxs_options + i));
9425 /* Also hash the name in the upper case. */
9426 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9427 (void *) (aarch64_barrier_dsb_nxs_options + i));
9428 }
9429
9430 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9431 {
9432 const char* name = aarch64_prfops[i].name;
9433 /* Skip the unallocated hint encodings. */
9434 if (name == NULL)
9435 continue;
9436 checked_hash_insert (aarch64_pldop_hsh, name,
9437 (void *) (aarch64_prfops + i));
9438 /* Also hash the name in the upper case. */
9439 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9440 (void *) (aarch64_prfops + i));
9441 }
9442
9443 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9444 {
9445 const char* name = aarch64_hint_options[i].name;
9446 const char* upper_name = get_upper_str(name);
9447
9448 checked_hash_insert (aarch64_hint_opt_hsh, name,
9449 (void *) (aarch64_hint_options + i));
9450
9451 /* Also hash the name in the upper case if not the same. */
9452 if (strcmp (name, upper_name) != 0)
9453 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9454 (void *) (aarch64_hint_options + i));
9455 }
9456
9457 /* Set the cpu variant based on the command-line options. */
9458 if (!mcpu_cpu_opt)
9459 mcpu_cpu_opt = march_cpu_opt;
9460
9461 if (!mcpu_cpu_opt)
9462 mcpu_cpu_opt = &cpu_default;
9463
9464 cpu_variant = *mcpu_cpu_opt;
9465
9466 /* Record the CPU type. */
9467 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9468
9469 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9470 }
9471
9472 /* Command line processing. */
9473
9474 const char *md_shortopts = "m:";
9475
9476 #ifdef AARCH64_BI_ENDIAN
9477 #define OPTION_EB (OPTION_MD_BASE + 0)
9478 #define OPTION_EL (OPTION_MD_BASE + 1)
9479 #else
9480 #if TARGET_BYTES_BIG_ENDIAN
9481 #define OPTION_EB (OPTION_MD_BASE + 0)
9482 #else
9483 #define OPTION_EL (OPTION_MD_BASE + 1)
9484 #endif
9485 #endif
9486
9487 struct option md_longopts[] = {
9488 #ifdef OPTION_EB
9489 {"EB", no_argument, NULL, OPTION_EB},
9490 #endif
9491 #ifdef OPTION_EL
9492 {"EL", no_argument, NULL, OPTION_EL},
9493 #endif
9494 {NULL, no_argument, NULL, 0}
9495 };
9496
9497 size_t md_longopts_size = sizeof (md_longopts);
9498
9499 struct aarch64_option_table
9500 {
9501 const char *option; /* Option name to match. */
9502 const char *help; /* Help information. */
9503 int *var; /* Variable to change. */
9504 int value; /* What to change it to. */
9505 char *deprecated; /* If non-null, print this message. */
9506 };
9507
9508 static struct aarch64_option_table aarch64_opts[] = {
9509 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9510 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9511 NULL},
9512 #ifdef DEBUG_AARCH64
9513 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9514 #endif /* DEBUG_AARCH64 */
9515 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9516 NULL},
9517 {"mno-verbose-error", N_("do not output verbose error messages"),
9518 &verbose_error_p, 0, NULL},
9519 {NULL, NULL, NULL, 0, NULL}
9520 };
9521
9522 struct aarch64_cpu_option_table
9523 {
9524 const char *name;
9525 const aarch64_feature_set value;
9526 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9527 case. */
9528 const char *canonical_name;
9529 };
9530
9531 /* This list should, at a minimum, contain all the cpu names
9532 recognized by GCC. */
9533 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9534 {"all", AARCH64_ANY, NULL},
9535 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9536 AARCH64_FEATURE_CRC), "Cortex-A34"},
9537 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9538 AARCH64_FEATURE_CRC), "Cortex-A35"},
9539 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9540 AARCH64_FEATURE_CRC), "Cortex-A53"},
9541 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9542 AARCH64_FEATURE_CRC), "Cortex-A57"},
9543 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9544 AARCH64_FEATURE_CRC), "Cortex-A72"},
9545 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9546 AARCH64_FEATURE_CRC), "Cortex-A73"},
9547 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9548 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9549 "Cortex-A55"},
9550 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9551 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9552 "Cortex-A75"},
9553 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9554 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9555 "Cortex-A76"},
9556 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9557 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9558 | AARCH64_FEATURE_DOTPROD
9559 | AARCH64_FEATURE_SSBS),
9560 "Cortex-A76AE"},
9561 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9562 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9563 | AARCH64_FEATURE_DOTPROD
9564 | AARCH64_FEATURE_SSBS),
9565 "Cortex-A77"},
9566 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9567 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9568 | AARCH64_FEATURE_DOTPROD
9569 | AARCH64_FEATURE_SSBS),
9570 "Cortex-A65"},
9571 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9572 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9573 | AARCH64_FEATURE_DOTPROD
9574 | AARCH64_FEATURE_SSBS),
9575 "Cortex-A65AE"},
9576 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9577 AARCH64_FEATURE_F16
9578 | AARCH64_FEATURE_RCPC
9579 | AARCH64_FEATURE_DOTPROD
9580 | AARCH64_FEATURE_SSBS
9581 | AARCH64_FEATURE_PROFILE),
9582 "Cortex-A78"},
9583 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9584 AARCH64_FEATURE_F16
9585 | AARCH64_FEATURE_RCPC
9586 | AARCH64_FEATURE_DOTPROD
9587 | AARCH64_FEATURE_SSBS
9588 | AARCH64_FEATURE_PROFILE),
9589 "Cortex-A78AE"},
9590 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9591 AARCH64_FEATURE_DOTPROD
9592 | AARCH64_FEATURE_F16
9593 | AARCH64_FEATURE_FLAGM
9594 | AARCH64_FEATURE_PAC
9595 | AARCH64_FEATURE_PROFILE
9596 | AARCH64_FEATURE_RCPC
9597 | AARCH64_FEATURE_SSBS),
9598 "Cortex-A78C"},
9599 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9600 AARCH64_FEATURE_BFLOAT16
9601 | AARCH64_FEATURE_I8MM
9602 | AARCH64_FEATURE_MEMTAG
9603 | AARCH64_FEATURE_SVE2_BITPERM),
9604 "Cortex-A510"},
9605 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9606 AARCH64_FEATURE_BFLOAT16
9607 | AARCH64_FEATURE_I8MM
9608 | AARCH64_FEATURE_MEMTAG
9609 | AARCH64_FEATURE_SVE2_BITPERM),
9610 "Cortex-A710"},
9611 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9612 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9613 | AARCH64_FEATURE_DOTPROD
9614 | AARCH64_FEATURE_PROFILE),
9615 "Ares"},
9616 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9617 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9618 "Samsung Exynos M1"},
9619 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9620 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9621 | AARCH64_FEATURE_RDMA),
9622 "Qualcomm Falkor"},
9623 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9624 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9625 | AARCH64_FEATURE_DOTPROD
9626 | AARCH64_FEATURE_SSBS),
9627 "Neoverse E1"},
9628 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9629 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9630 | AARCH64_FEATURE_DOTPROD
9631 | AARCH64_FEATURE_PROFILE),
9632 "Neoverse N1"},
9633 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9634 AARCH64_FEATURE_BFLOAT16
9635 | AARCH64_FEATURE_I8MM
9636 | AARCH64_FEATURE_F16
9637 | AARCH64_FEATURE_SVE
9638 | AARCH64_FEATURE_SVE2
9639 | AARCH64_FEATURE_SVE2_BITPERM
9640 | AARCH64_FEATURE_MEMTAG
9641 | AARCH64_FEATURE_RNG),
9642 "Neoverse N2"},
9643 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9644 AARCH64_FEATURE_PROFILE
9645 | AARCH64_FEATURE_CVADP
9646 | AARCH64_FEATURE_SVE
9647 | AARCH64_FEATURE_SSBS
9648 | AARCH64_FEATURE_RNG
9649 | AARCH64_FEATURE_F16
9650 | AARCH64_FEATURE_BFLOAT16
9651 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9652 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9653 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9654 | AARCH64_FEATURE_RDMA),
9655 "Qualcomm QDF24XX"},
9656 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9657 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9658 "Qualcomm Saphira"},
9659 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9660 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9661 "Cavium ThunderX"},
9662 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9663 AARCH64_FEATURE_CRYPTO),
9664 "Broadcom Vulcan"},
9665 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9666 in earlier releases and is superseded by 'xgene1' in all
9667 tools. */
9668 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9669 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9670 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9671 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9672 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9673 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9674 AARCH64_FEATURE_F16
9675 | AARCH64_FEATURE_RCPC
9676 | AARCH64_FEATURE_DOTPROD
9677 | AARCH64_FEATURE_SSBS
9678 | AARCH64_FEATURE_PROFILE),
9679 "Cortex-X1"},
9680 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9681 AARCH64_FEATURE_BFLOAT16
9682 | AARCH64_FEATURE_I8MM
9683 | AARCH64_FEATURE_MEMTAG
9684 | AARCH64_FEATURE_SVE2_BITPERM),
9685 "Cortex-X2"},
9686 {"generic", AARCH64_ARCH_V8, NULL},
9687
9688 {NULL, AARCH64_ARCH_NONE, NULL}
9689 };
9690
9691 struct aarch64_arch_option_table
9692 {
9693 const char *name;
9694 const aarch64_feature_set value;
9695 };
9696
9697 /* This list should, at a minimum, contain all the architecture names
9698 recognized by GCC. */
9699 static const struct aarch64_arch_option_table aarch64_archs[] = {
9700 {"all", AARCH64_ANY},
9701 {"armv8-a", AARCH64_ARCH_V8},
9702 {"armv8.1-a", AARCH64_ARCH_V8_1},
9703 {"armv8.2-a", AARCH64_ARCH_V8_2},
9704 {"armv8.3-a", AARCH64_ARCH_V8_3},
9705 {"armv8.4-a", AARCH64_ARCH_V8_4},
9706 {"armv8.5-a", AARCH64_ARCH_V8_5},
9707 {"armv8.6-a", AARCH64_ARCH_V8_6},
9708 {"armv8.7-a", AARCH64_ARCH_V8_7},
9709 {"armv8-r", AARCH64_ARCH_V8_R},
9710 {"armv9-a", AARCH64_ARCH_V9},
9711 {NULL, AARCH64_ARCH_NONE}
9712 };
9713
9714 /* ISA extensions. */
9715 struct aarch64_option_cpu_value_table
9716 {
9717 const char *name;
9718 const aarch64_feature_set value;
9719 const aarch64_feature_set require; /* Feature dependencies. */
9720 };
9721
9722 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9723 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9724 AARCH64_ARCH_NONE},
9725 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9726 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9727 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9728 AARCH64_ARCH_NONE},
9729 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9730 AARCH64_ARCH_NONE},
9731 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9732 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9733 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9734 AARCH64_ARCH_NONE},
9735 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9736 AARCH64_ARCH_NONE},
9737 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9738 AARCH64_ARCH_NONE},
9739 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9740 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9741 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9742 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9743 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9744 AARCH64_FEATURE (AARCH64_FEATURE_FP
9745 | AARCH64_FEATURE_F16, 0)},
9746 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9747 AARCH64_ARCH_NONE},
9748 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9749 AARCH64_FEATURE (AARCH64_FEATURE_F16
9750 | AARCH64_FEATURE_SIMD
9751 | AARCH64_FEATURE_COMPNUM, 0)},
9752 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9753 AARCH64_ARCH_NONE},
9754 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9755 AARCH64_FEATURE (AARCH64_FEATURE_F16
9756 | AARCH64_FEATURE_SIMD, 0)},
9757 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9758 AARCH64_ARCH_NONE},
9759 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9760 AARCH64_ARCH_NONE},
9761 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9762 AARCH64_ARCH_NONE},
9763 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9764 AARCH64_ARCH_NONE},
9765 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9766 AARCH64_ARCH_NONE},
9767 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9768 AARCH64_ARCH_NONE},
9769 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9770 AARCH64_ARCH_NONE},
9771 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9772 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9773 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9774 AARCH64_ARCH_NONE},
9775 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9776 AARCH64_ARCH_NONE},
9777 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9778 AARCH64_ARCH_NONE},
9779 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9780 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9781 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9782 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9783 | AARCH64_FEATURE_SM4, 0)},
9784 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9785 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9786 | AARCH64_FEATURE_AES, 0)},
9787 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9788 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9789 | AARCH64_FEATURE_SHA3, 0)},
9790 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9791 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9792 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9793 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9794 | AARCH64_FEATURE_BFLOAT16, 0)},
9795 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
9796 AARCH64_FEATURE (AARCH64_FEATURE_SME
9797 | AARCH64_FEATURE_SVE2
9798 | AARCH64_FEATURE_BFLOAT16, 0)},
9799 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
9800 AARCH64_FEATURE (AARCH64_FEATURE_SME
9801 | AARCH64_FEATURE_SVE2
9802 | AARCH64_FEATURE_BFLOAT16, 0)},
9803 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9804 AARCH64_ARCH_NONE},
9805 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9806 AARCH64_ARCH_NONE},
9807 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9808 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9809 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9810 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9811 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9812 AARCH64_ARCH_NONE},
9813 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9814 AARCH64_ARCH_NONE},
9815 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9816 AARCH64_ARCH_NONE},
9817 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9818 };
9819
9820 struct aarch64_long_option_table
9821 {
9822 const char *option; /* Substring to match. */
9823 const char *help; /* Help information. */
9824 int (*func) (const char *subopt); /* Function to decode sub-option. */
9825 char *deprecated; /* If non-null, print this message. */
9826 };
9827
9828 /* Transitive closure of features depending on set. */
9829 static aarch64_feature_set
9830 aarch64_feature_disable_set (aarch64_feature_set set)
9831 {
9832 const struct aarch64_option_cpu_value_table *opt;
9833 aarch64_feature_set prev = 0;
9834
9835 while (prev != set) {
9836 prev = set;
9837 for (opt = aarch64_features; opt->name != NULL; opt++)
9838 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9839 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9840 }
9841 return set;
9842 }
9843
9844 /* Transitive closure of dependencies of set. */
9845 static aarch64_feature_set
9846 aarch64_feature_enable_set (aarch64_feature_set set)
9847 {
9848 const struct aarch64_option_cpu_value_table *opt;
9849 aarch64_feature_set prev = 0;
9850
9851 while (prev != set) {
9852 prev = set;
9853 for (opt = aarch64_features; opt->name != NULL; opt++)
9854 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9855 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9856 }
9857 return set;
9858 }
9859
9860 static int
9861 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9862 bool ext_only)
9863 {
9864 /* We insist on extensions being added before being removed. We achieve
9865 this by using the ADDING_VALUE variable to indicate whether we are
9866 adding an extension (1) or removing it (0) and only allowing it to
9867 change in the order -1 -> 1 -> 0. */
9868 int adding_value = -1;
9869 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9870
9871 /* Copy the feature set, so that we can modify it. */
9872 *ext_set = **opt_p;
9873 *opt_p = ext_set;
9874
9875 while (str != NULL && *str != 0)
9876 {
9877 const struct aarch64_option_cpu_value_table *opt;
9878 const char *ext = NULL;
9879 int optlen;
9880
9881 if (!ext_only)
9882 {
9883 if (*str != '+')
9884 {
9885 as_bad (_("invalid architectural extension"));
9886 return 0;
9887 }
9888
9889 ext = strchr (++str, '+');
9890 }
9891
9892 if (ext != NULL)
9893 optlen = ext - str;
9894 else
9895 optlen = strlen (str);
9896
9897 if (optlen >= 2 && startswith (str, "no"))
9898 {
9899 if (adding_value != 0)
9900 adding_value = 0;
9901 optlen -= 2;
9902 str += 2;
9903 }
9904 else if (optlen > 0)
9905 {
9906 if (adding_value == -1)
9907 adding_value = 1;
9908 else if (adding_value != 1)
9909 {
9910 as_bad (_("must specify extensions to add before specifying "
9911 "those to remove"));
9912 return false;
9913 }
9914 }
9915
9916 if (optlen == 0)
9917 {
9918 as_bad (_("missing architectural extension"));
9919 return 0;
9920 }
9921
9922 gas_assert (adding_value != -1);
9923
9924 for (opt = aarch64_features; opt->name != NULL; opt++)
9925 if (strncmp (opt->name, str, optlen) == 0)
9926 {
9927 aarch64_feature_set set;
9928
9929 /* Add or remove the extension. */
9930 if (adding_value)
9931 {
9932 set = aarch64_feature_enable_set (opt->value);
9933 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9934 }
9935 else
9936 {
9937 set = aarch64_feature_disable_set (opt->value);
9938 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9939 }
9940 break;
9941 }
9942
9943 if (opt->name == NULL)
9944 {
9945 as_bad (_("unknown architectural extension `%s'"), str);
9946 return 0;
9947 }
9948
9949 str = ext;
9950 };
9951
9952 return 1;
9953 }
9954
9955 static int
9956 aarch64_parse_cpu (const char *str)
9957 {
9958 const struct aarch64_cpu_option_table *opt;
9959 const char *ext = strchr (str, '+');
9960 size_t optlen;
9961
9962 if (ext != NULL)
9963 optlen = ext - str;
9964 else
9965 optlen = strlen (str);
9966
9967 if (optlen == 0)
9968 {
9969 as_bad (_("missing cpu name `%s'"), str);
9970 return 0;
9971 }
9972
9973 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9974 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9975 {
9976 mcpu_cpu_opt = &opt->value;
9977 if (ext != NULL)
9978 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
9979
9980 return 1;
9981 }
9982
9983 as_bad (_("unknown cpu `%s'"), str);
9984 return 0;
9985 }
9986
9987 static int
9988 aarch64_parse_arch (const char *str)
9989 {
9990 const struct aarch64_arch_option_table *opt;
9991 const char *ext = strchr (str, '+');
9992 size_t optlen;
9993
9994 if (ext != NULL)
9995 optlen = ext - str;
9996 else
9997 optlen = strlen (str);
9998
9999 if (optlen == 0)
10000 {
10001 as_bad (_("missing architecture name `%s'"), str);
10002 return 0;
10003 }
10004
10005 for (opt = aarch64_archs; opt->name != NULL; opt++)
10006 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10007 {
10008 march_cpu_opt = &opt->value;
10009 if (ext != NULL)
10010 return aarch64_parse_features (ext, &march_cpu_opt, false);
10011
10012 return 1;
10013 }
10014
10015 as_bad (_("unknown architecture `%s'\n"), str);
10016 return 0;
10017 }
10018
10019 /* ABIs. */
10020 struct aarch64_option_abi_value_table
10021 {
10022 const char *name;
10023 enum aarch64_abi_type value;
10024 };
10025
10026 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10027 {"ilp32", AARCH64_ABI_ILP32},
10028 {"lp64", AARCH64_ABI_LP64},
10029 };
10030
10031 static int
10032 aarch64_parse_abi (const char *str)
10033 {
10034 unsigned int i;
10035
10036 if (str[0] == '\0')
10037 {
10038 as_bad (_("missing abi name `%s'"), str);
10039 return 0;
10040 }
10041
10042 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10043 if (strcmp (str, aarch64_abis[i].name) == 0)
10044 {
10045 aarch64_abi = aarch64_abis[i].value;
10046 return 1;
10047 }
10048
10049 as_bad (_("unknown abi `%s'\n"), str);
10050 return 0;
10051 }
10052
10053 static struct aarch64_long_option_table aarch64_long_opts[] = {
10054 #ifdef OBJ_ELF
10055 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10056 aarch64_parse_abi, NULL},
10057 #endif /* OBJ_ELF */
10058 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10059 aarch64_parse_cpu, NULL},
10060 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10061 aarch64_parse_arch, NULL},
10062 {NULL, NULL, 0, NULL}
10063 };
10064
10065 int
10066 md_parse_option (int c, const char *arg)
10067 {
10068 struct aarch64_option_table *opt;
10069 struct aarch64_long_option_table *lopt;
10070
10071 switch (c)
10072 {
10073 #ifdef OPTION_EB
10074 case OPTION_EB:
10075 target_big_endian = 1;
10076 break;
10077 #endif
10078
10079 #ifdef OPTION_EL
10080 case OPTION_EL:
10081 target_big_endian = 0;
10082 break;
10083 #endif
10084
10085 case 'a':
10086 /* Listing option. Just ignore these, we don't support additional
10087 ones. */
10088 return 0;
10089
10090 default:
10091 for (opt = aarch64_opts; opt->option != NULL; opt++)
10092 {
10093 if (c == opt->option[0]
10094 && ((arg == NULL && opt->option[1] == 0)
10095 || streq (arg, opt->option + 1)))
10096 {
10097 /* If the option is deprecated, tell the user. */
10098 if (opt->deprecated != NULL)
10099 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10100 arg ? arg : "", _(opt->deprecated));
10101
10102 if (opt->var != NULL)
10103 *opt->var = opt->value;
10104
10105 return 1;
10106 }
10107 }
10108
10109 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10110 {
10111 /* These options are expected to have an argument. */
10112 if (c == lopt->option[0]
10113 && arg != NULL
10114 && startswith (arg, lopt->option + 1))
10115 {
10116 /* If the option is deprecated, tell the user. */
10117 if (lopt->deprecated != NULL)
10118 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10119 _(lopt->deprecated));
10120
10121 /* Call the sup-option parser. */
10122 return lopt->func (arg + strlen (lopt->option) - 1);
10123 }
10124 }
10125
10126 return 0;
10127 }
10128
10129 return 1;
10130 }
10131
10132 void
10133 md_show_usage (FILE * fp)
10134 {
10135 struct aarch64_option_table *opt;
10136 struct aarch64_long_option_table *lopt;
10137
10138 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10139
10140 for (opt = aarch64_opts; opt->option != NULL; opt++)
10141 if (opt->help != NULL)
10142 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10143
10144 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10145 if (lopt->help != NULL)
10146 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10147
10148 #ifdef OPTION_EB
10149 fprintf (fp, _("\
10150 -EB assemble code for a big-endian cpu\n"));
10151 #endif
10152
10153 #ifdef OPTION_EL
10154 fprintf (fp, _("\
10155 -EL assemble code for a little-endian cpu\n"));
10156 #endif
10157 }
10158
10159 /* Parse a .cpu directive. */
10160
10161 static void
10162 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10163 {
10164 const struct aarch64_cpu_option_table *opt;
10165 char saved_char;
10166 char *name;
10167 char *ext;
10168 size_t optlen;
10169
10170 name = input_line_pointer;
10171 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10172 input_line_pointer++;
10173 saved_char = *input_line_pointer;
10174 *input_line_pointer = 0;
10175
10176 ext = strchr (name, '+');
10177
10178 if (ext != NULL)
10179 optlen = ext - name;
10180 else
10181 optlen = strlen (name);
10182
10183 /* Skip the first "all" entry. */
10184 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10185 if (strlen (opt->name) == optlen
10186 && strncmp (name, opt->name, optlen) == 0)
10187 {
10188 mcpu_cpu_opt = &opt->value;
10189 if (ext != NULL)
10190 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10191 return;
10192
10193 cpu_variant = *mcpu_cpu_opt;
10194
10195 *input_line_pointer = saved_char;
10196 demand_empty_rest_of_line ();
10197 return;
10198 }
10199 as_bad (_("unknown cpu `%s'"), name);
10200 *input_line_pointer = saved_char;
10201 ignore_rest_of_line ();
10202 }
10203
10204
10205 /* Parse a .arch directive. */
10206
10207 static void
10208 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10209 {
10210 const struct aarch64_arch_option_table *opt;
10211 char saved_char;
10212 char *name;
10213 char *ext;
10214 size_t optlen;
10215
10216 name = input_line_pointer;
10217 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10218 input_line_pointer++;
10219 saved_char = *input_line_pointer;
10220 *input_line_pointer = 0;
10221
10222 ext = strchr (name, '+');
10223
10224 if (ext != NULL)
10225 optlen = ext - name;
10226 else
10227 optlen = strlen (name);
10228
10229 /* Skip the first "all" entry. */
10230 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10231 if (strlen (opt->name) == optlen
10232 && strncmp (name, opt->name, optlen) == 0)
10233 {
10234 mcpu_cpu_opt = &opt->value;
10235 if (ext != NULL)
10236 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10237 return;
10238
10239 cpu_variant = *mcpu_cpu_opt;
10240
10241 *input_line_pointer = saved_char;
10242 demand_empty_rest_of_line ();
10243 return;
10244 }
10245
10246 as_bad (_("unknown architecture `%s'\n"), name);
10247 *input_line_pointer = saved_char;
10248 ignore_rest_of_line ();
10249 }
10250
10251 /* Parse a .arch_extension directive. */
10252
10253 static void
10254 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10255 {
10256 char saved_char;
10257 char *ext = input_line_pointer;;
10258
10259 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10260 input_line_pointer++;
10261 saved_char = *input_line_pointer;
10262 *input_line_pointer = 0;
10263
10264 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10265 return;
10266
10267 cpu_variant = *mcpu_cpu_opt;
10268
10269 *input_line_pointer = saved_char;
10270 demand_empty_rest_of_line ();
10271 }
10272
10273 /* Copy symbol information. */
10274
10275 void
10276 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10277 {
10278 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10279 }
10280
10281 #ifdef OBJ_ELF
10282 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10283 This is needed so AArch64 specific st_other values can be independently
10284 specified for an IFUNC resolver (that is called by the dynamic linker)
10285 and the symbol it resolves (aliased to the resolver). In particular,
10286 if a function symbol has special st_other value set via directives,
10287 then attaching an IFUNC resolver to that symbol should not override
10288 the st_other setting. Requiring the directive on the IFUNC resolver
10289 symbol would be unexpected and problematic in C code, where the two
10290 symbols appear as two independent function declarations. */
10291
10292 void
10293 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10294 {
10295 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10296 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10297 if (srcelf->size)
10298 {
10299 if (destelf->size == NULL)
10300 destelf->size = XNEW (expressionS);
10301 *destelf->size = *srcelf->size;
10302 }
10303 else
10304 {
10305 free (destelf->size);
10306 destelf->size = NULL;
10307 }
10308 S_SET_SIZE (dest, S_GET_SIZE (src));
10309 }
10310 #endif