aarch64-pe support for LD, GAS and BFD
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2022 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #endif
34
35 #include "dw2gencfi.h"
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64 #endif
65
66 /* Which ABI to use. */
67 enum aarch64_abi_type
68 {
69 AARCH64_ABI_NONE = 0,
70 AARCH64_ABI_LP64 = 1,
71 AARCH64_ABI_ILP32 = 2,
72 AARCH64_ABI_LLP64 = 3
73 };
74
75 #ifndef DEFAULT_ARCH
76 #define DEFAULT_ARCH "aarch64"
77 #endif
78
79 #ifdef OBJ_ELF
80 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
81 static const char *default_arch = DEFAULT_ARCH;
82 #endif
83
84 /* AArch64 ABI for the output file. */
85 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
86
87 /* When non-zero, program to a 32-bit model, in which the C data types
88 int, long and all pointer types are 32-bit objects (ILP32); or to a
89 64-bit model, in which the C int type is 32-bits but the C long type
90 and all pointer types are 64-bit objects (LP64). */
91 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
92
93 /* When non zero, C types int and long are 32 bit,
94 pointers, however are 64 bit */
95 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
96
97 enum vector_el_type
98 {
99 NT_invtype = -1,
100 NT_b,
101 NT_h,
102 NT_s,
103 NT_d,
104 NT_q,
105 NT_zero,
106 NT_merge
107 };
108
109 /* SME horizontal or vertical slice indicator, encoded in "V".
110 Values:
111 0 - Horizontal
112 1 - vertical
113 */
114 enum sme_hv_slice
115 {
116 HV_horizontal = 0,
117 HV_vertical = 1
118 };
119
120 /* Bits for DEFINED field in vector_type_el. */
121 #define NTA_HASTYPE 1
122 #define NTA_HASINDEX 2
123 #define NTA_HASVARWIDTH 4
124
125 struct vector_type_el
126 {
127 enum vector_el_type type;
128 unsigned char defined;
129 unsigned width;
130 int64_t index;
131 };
132
133 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
134
135 struct reloc
136 {
137 bfd_reloc_code_real_type type;
138 expressionS exp;
139 int pc_rel;
140 enum aarch64_opnd opnd;
141 uint32_t flags;
142 unsigned need_libopcodes_p : 1;
143 };
144
145 struct aarch64_instruction
146 {
147 /* libopcodes structure for instruction intermediate representation. */
148 aarch64_inst base;
149 /* Record assembly errors found during the parsing. */
150 struct
151 {
152 enum aarch64_operand_error_kind kind;
153 const char *error;
154 } parsing_error;
155 /* The condition that appears in the assembly line. */
156 int cond;
157 /* Relocation information (including the GAS internal fixup). */
158 struct reloc reloc;
159 /* Need to generate an immediate in the literal pool. */
160 unsigned gen_lit_pool : 1;
161 };
162
163 typedef struct aarch64_instruction aarch64_instruction;
164
165 static aarch64_instruction inst;
166
167 static bool parse_operands (char *, const aarch64_opcode *);
168 static bool programmer_friendly_fixup (aarch64_instruction *);
169
170 /* Diagnostics inline function utilities.
171
172 These are lightweight utilities which should only be called by parse_operands
173 and other parsers. GAS processes each assembly line by parsing it against
174 instruction template(s), in the case of multiple templates (for the same
175 mnemonic name), those templates are tried one by one until one succeeds or
176 all fail. An assembly line may fail a few templates before being
177 successfully parsed; an error saved here in most cases is not a user error
178 but an error indicating the current template is not the right template.
179 Therefore it is very important that errors can be saved at a low cost during
180 the parsing; we don't want to slow down the whole parsing by recording
181 non-user errors in detail.
182
183 Remember that the objective is to help GAS pick up the most appropriate
184 error message in the case of multiple templates, e.g. FMOV which has 8
185 templates. */
186
187 static inline void
188 clear_error (void)
189 {
190 inst.parsing_error.kind = AARCH64_OPDE_NIL;
191 inst.parsing_error.error = NULL;
192 }
193
194 static inline bool
195 error_p (void)
196 {
197 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
198 }
199
200 static inline const char *
201 get_error_message (void)
202 {
203 return inst.parsing_error.error;
204 }
205
206 static inline enum aarch64_operand_error_kind
207 get_error_kind (void)
208 {
209 return inst.parsing_error.kind;
210 }
211
212 static inline void
213 set_error (enum aarch64_operand_error_kind kind, const char *error)
214 {
215 inst.parsing_error.kind = kind;
216 inst.parsing_error.error = error;
217 }
218
219 static inline void
220 set_recoverable_error (const char *error)
221 {
222 set_error (AARCH64_OPDE_RECOVERABLE, error);
223 }
224
225 /* Use the DESC field of the corresponding aarch64_operand entry to compose
226 the error message. */
227 static inline void
228 set_default_error (void)
229 {
230 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
231 }
232
233 static inline void
234 set_syntax_error (const char *error)
235 {
236 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238
239 static inline void
240 set_first_syntax_error (const char *error)
241 {
242 if (! error_p ())
243 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
244 }
245
246 static inline void
247 set_fatal_syntax_error (const char *error)
248 {
249 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
250 }
251 \f
252 /* Return value for certain parsers when the parsing fails; those parsers
253 return the information of the parsed result, e.g. register number, on
254 success. */
255 #define PARSE_FAIL -1
256
257 /* This is an invalid condition code that means no conditional field is
258 present. */
259 #define COND_ALWAYS 0x10
260
261 typedef struct
262 {
263 const char *template;
264 uint32_t value;
265 } asm_nzcv;
266
267 struct reloc_entry
268 {
269 char *name;
270 bfd_reloc_code_real_type reloc;
271 };
272
273 /* Macros to define the register types and masks for the purpose
274 of parsing. */
275
276 #undef AARCH64_REG_TYPES
277 #define AARCH64_REG_TYPES \
278 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
279 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
280 BASIC_REG_TYPE(SP_32) /* wsp */ \
281 BASIC_REG_TYPE(SP_64) /* sp */ \
282 BASIC_REG_TYPE(Z_32) /* wzr */ \
283 BASIC_REG_TYPE(Z_64) /* xzr */ \
284 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
285 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
286 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
287 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
288 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
289 BASIC_REG_TYPE(VN) /* v[0-31] */ \
290 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
291 BASIC_REG_TYPE(PN) /* p[0-15] */ \
292 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
293 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
294 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
295 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
296 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
297 /* Typecheck: same, plus SVE registers. */ \
298 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
299 | REG_TYPE(ZN)) \
300 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
301 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
303 /* Typecheck: same, plus SVE registers. */ \
304 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
306 | REG_TYPE(ZN)) \
307 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
308 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
309 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
310 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
311 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
314 /* Typecheck: any [BHSDQ]P FP. */ \
315 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
318 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
320 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
321 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
322 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
323 be used for SVE instructions, since Zn and Pn are valid symbols \
324 in other contexts. */ \
325 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
326 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
327 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
328 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
329 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
330 | REG_TYPE(ZN) | REG_TYPE(PN)) \
331 /* Any integer register; used for error messages only. */ \
332 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
333 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
334 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
335 /* Pseudo type to mark the end of the enumerator sequence. */ \
336 BASIC_REG_TYPE(MAX)
337
338 #undef BASIC_REG_TYPE
339 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
342
343 /* Register type enumerators. */
344 typedef enum aarch64_reg_type_
345 {
346 /* A list of REG_TYPE_*. */
347 AARCH64_REG_TYPES
348 } aarch64_reg_type;
349
350 #undef BASIC_REG_TYPE
351 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
352 #undef REG_TYPE
353 #define REG_TYPE(T) (1 << REG_TYPE_##T)
354 #undef MULTI_REG_TYPE
355 #define MULTI_REG_TYPE(T,V) V,
356
357 /* Structure for a hash table entry for a register. */
358 typedef struct
359 {
360 const char *name;
361 unsigned char number;
362 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
363 unsigned char builtin;
364 } reg_entry;
365
366 /* Values indexed by aarch64_reg_type to assist the type checking. */
367 static const unsigned reg_type_masks[] =
368 {
369 AARCH64_REG_TYPES
370 };
371
372 #undef BASIC_REG_TYPE
373 #undef REG_TYPE
374 #undef MULTI_REG_TYPE
375 #undef AARCH64_REG_TYPES
376
377 /* Diagnostics used when we don't get a register of the expected type.
378 Note: this has to synchronized with aarch64_reg_type definitions
379 above. */
380 static const char *
381 get_reg_expected_msg (aarch64_reg_type reg_type)
382 {
383 const char *msg;
384
385 switch (reg_type)
386 {
387 case REG_TYPE_R_32:
388 msg = N_("integer 32-bit register expected");
389 break;
390 case REG_TYPE_R_64:
391 msg = N_("integer 64-bit register expected");
392 break;
393 case REG_TYPE_R_N:
394 msg = N_("integer register expected");
395 break;
396 case REG_TYPE_R64_SP:
397 msg = N_("64-bit integer or SP register expected");
398 break;
399 case REG_TYPE_SVE_BASE:
400 msg = N_("base register expected");
401 break;
402 case REG_TYPE_R_Z:
403 msg = N_("integer or zero register expected");
404 break;
405 case REG_TYPE_SVE_OFFSET:
406 msg = N_("offset register expected");
407 break;
408 case REG_TYPE_R_SP:
409 msg = N_("integer or SP register expected");
410 break;
411 case REG_TYPE_R_Z_SP:
412 msg = N_("integer, zero or SP register expected");
413 break;
414 case REG_TYPE_FP_B:
415 msg = N_("8-bit SIMD scalar register expected");
416 break;
417 case REG_TYPE_FP_H:
418 msg = N_("16-bit SIMD scalar or floating-point half precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_S:
422 msg = N_("32-bit SIMD scalar or floating-point single precision "
423 "register expected");
424 break;
425 case REG_TYPE_FP_D:
426 msg = N_("64-bit SIMD scalar or floating-point double precision "
427 "register expected");
428 break;
429 case REG_TYPE_FP_Q:
430 msg = N_("128-bit SIMD scalar or floating-point quad precision "
431 "register expected");
432 break;
433 case REG_TYPE_R_Z_BHSDQ_V:
434 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
435 msg = N_("register expected");
436 break;
437 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
438 msg = N_("SIMD scalar or floating-point register expected");
439 break;
440 case REG_TYPE_VN: /* any V reg */
441 msg = N_("vector register expected");
442 break;
443 case REG_TYPE_ZN:
444 msg = N_("SVE vector register expected");
445 break;
446 case REG_TYPE_PN:
447 msg = N_("SVE predicate register expected");
448 break;
449 default:
450 as_fatal (_("invalid register type %d"), reg_type);
451 }
452 return msg;
453 }
454
455 /* Some well known registers that we refer to directly elsewhere. */
456 #define REG_SP 31
457 #define REG_ZR 31
458
459 /* Instructions take 4 bytes in the object file. */
460 #define INSN_SIZE 4
461
462 static htab_t aarch64_ops_hsh;
463 static htab_t aarch64_cond_hsh;
464 static htab_t aarch64_shift_hsh;
465 static htab_t aarch64_sys_regs_hsh;
466 static htab_t aarch64_pstatefield_hsh;
467 static htab_t aarch64_sys_regs_ic_hsh;
468 static htab_t aarch64_sys_regs_dc_hsh;
469 static htab_t aarch64_sys_regs_at_hsh;
470 static htab_t aarch64_sys_regs_tlbi_hsh;
471 static htab_t aarch64_sys_regs_sr_hsh;
472 static htab_t aarch64_reg_hsh;
473 static htab_t aarch64_barrier_opt_hsh;
474 static htab_t aarch64_nzcv_hsh;
475 static htab_t aarch64_pldop_hsh;
476 static htab_t aarch64_hint_opt_hsh;
477
478 /* Stuff needed to resolve the label ambiguity
479 As:
480 ...
481 label: <insn>
482 may differ from:
483 ...
484 label:
485 <insn> */
486
487 static symbolS *last_label_seen;
488
489 /* Literal pool structure. Held on a per-section
490 and per-sub-section basis. */
491
492 #define MAX_LITERAL_POOL_SIZE 1024
493 typedef struct literal_expression
494 {
495 expressionS exp;
496 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
497 LITTLENUM_TYPE * bignum;
498 } literal_expression;
499
500 typedef struct literal_pool
501 {
502 literal_expression literals[MAX_LITERAL_POOL_SIZE];
503 unsigned int next_free_entry;
504 unsigned int id;
505 symbolS *symbol;
506 segT section;
507 subsegT sub_section;
508 int size;
509 struct literal_pool *next;
510 } literal_pool;
511
512 /* Pointer to a linked list of literal pools. */
513 static literal_pool *list_of_pools = NULL;
514 \f
515 /* Pure syntax. */
516
517 /* This array holds the chars that always start a comment. If the
518 pre-processor is disabled, these aren't very useful. */
519 const char comment_chars[] = "";
520
521 /* This array holds the chars that only start a comment at the beginning of
522 a line. If the line seems to have the form '# 123 filename'
523 .line and .file directives will appear in the pre-processed output. */
524 /* Note that input_file.c hand checks for '#' at the beginning of the
525 first line of the input file. This is because the compiler outputs
526 #NO_APP at the beginning of its output. */
527 /* Also note that comments like this one will always work. */
528 const char line_comment_chars[] = "#";
529
530 const char line_separator_chars[] = ";";
531
532 /* Chars that can be used to separate mant
533 from exp in floating point numbers. */
534 const char EXP_CHARS[] = "eE";
535
536 /* Chars that mean this number is a floating point constant. */
537 /* As in 0f12.456 */
538 /* or 0d1.2345e12 */
539
540 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
541
542 /* Prefix character that indicates the start of an immediate value. */
543 #define is_immediate_prefix(C) ((C) == '#')
544
545 /* Separator character handling. */
546
547 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
548
549 static inline bool
550 skip_past_char (char **str, char c)
551 {
552 if (**str == c)
553 {
554 (*str)++;
555 return true;
556 }
557 else
558 return false;
559 }
560
561 #define skip_past_comma(str) skip_past_char (str, ',')
562
563 /* Arithmetic expressions (possibly involving symbols). */
564
565 static bool in_aarch64_get_expression = false;
566
567 /* Third argument to aarch64_get_expression. */
568 #define GE_NO_PREFIX false
569 #define GE_OPT_PREFIX true
570
571 /* Fourth argument to aarch64_get_expression. */
572 #define ALLOW_ABSENT false
573 #define REJECT_ABSENT true
574
575 /* Return TRUE if the string pointed by *STR is successfully parsed
576 as an valid expression; *EP will be filled with the information of
577 such an expression. Otherwise return FALSE.
578
579 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
580 If REJECT_ABSENT is true then trat missing expressions as an error. */
581
582 static bool
583 aarch64_get_expression (expressionS * ep,
584 char ** str,
585 bool allow_immediate_prefix,
586 bool reject_absent)
587 {
588 char *save_in;
589 segT seg;
590 bool prefix_present = false;
591
592 if (allow_immediate_prefix)
593 {
594 if (is_immediate_prefix (**str))
595 {
596 (*str)++;
597 prefix_present = true;
598 }
599 }
600
601 memset (ep, 0, sizeof (expressionS));
602
603 save_in = input_line_pointer;
604 input_line_pointer = *str;
605 in_aarch64_get_expression = true;
606 seg = expression (ep);
607 in_aarch64_get_expression = false;
608
609 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
610 {
611 /* We found a bad expression in md_operand(). */
612 *str = input_line_pointer;
613 input_line_pointer = save_in;
614 if (prefix_present && ! error_p ())
615 set_fatal_syntax_error (_("bad expression"));
616 else
617 set_first_syntax_error (_("bad expression"));
618 return false;
619 }
620
621 #ifdef OBJ_AOUT
622 if (seg != absolute_section
623 && seg != text_section
624 && seg != data_section
625 && seg != bss_section
626 && seg != undefined_section)
627 {
628 set_syntax_error (_("bad segment"));
629 *str = input_line_pointer;
630 input_line_pointer = save_in;
631 return false;
632 }
633 #else
634 (void) seg;
635 #endif
636
637 *str = input_line_pointer;
638 input_line_pointer = save_in;
639 return true;
640 }
641
642 /* Turn a string in input_line_pointer into a floating point constant
643 of type TYPE, and store the appropriate bytes in *LITP. The number
644 of LITTLENUMS emitted is stored in *SIZEP. An error message is
645 returned, or NULL on OK. */
646
647 const char *
648 md_atof (int type, char *litP, int *sizeP)
649 {
650 return ieee_md_atof (type, litP, sizeP, target_big_endian);
651 }
652
653 /* We handle all bad expressions here, so that we can report the faulty
654 instruction in the error message. */
655 void
656 md_operand (expressionS * exp)
657 {
658 if (in_aarch64_get_expression)
659 exp->X_op = O_illegal;
660 }
661
662 /* Immediate values. */
663
664 /* Errors may be set multiple times during parsing or bit encoding
665 (particularly in the Neon bits), but usually the earliest error which is set
666 will be the most meaningful. Avoid overwriting it with later (cascading)
667 errors by calling this function. */
668
669 static void
670 first_error (const char *error)
671 {
672 if (! error_p ())
673 set_syntax_error (error);
674 }
675
676 /* Similar to first_error, but this function accepts formatted error
677 message. */
678 static void
679 first_error_fmt (const char *format, ...)
680 {
681 va_list args;
682 enum
683 { size = 100 };
684 /* N.B. this single buffer will not cause error messages for different
685 instructions to pollute each other; this is because at the end of
686 processing of each assembly line, error message if any will be
687 collected by as_bad. */
688 static char buffer[size];
689
690 if (! error_p ())
691 {
692 int ret ATTRIBUTE_UNUSED;
693 va_start (args, format);
694 ret = vsnprintf (buffer, size, format, args);
695 know (ret <= size - 1 && ret >= 0);
696 va_end (args);
697 set_syntax_error (buffer);
698 }
699 }
700
701 /* Register parsing. */
702
703 /* Generic register parser which is called by other specialized
704 register parsers.
705 CCP points to what should be the beginning of a register name.
706 If it is indeed a valid register name, advance CCP over it and
707 return the reg_entry structure; otherwise return NULL.
708 It does not issue diagnostics. */
709
710 static reg_entry *
711 parse_reg (char **ccp)
712 {
713 char *start = *ccp;
714 char *p;
715 reg_entry *reg;
716
717 #ifdef REGISTER_PREFIX
718 if (*start != REGISTER_PREFIX)
719 return NULL;
720 start++;
721 #endif
722
723 p = start;
724 if (!ISALPHA (*p) || !is_name_beginner (*p))
725 return NULL;
726
727 do
728 p++;
729 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
730
731 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
732
733 if (!reg)
734 return NULL;
735
736 *ccp = p;
737 return reg;
738 }
739
740 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
741 return FALSE. */
742 static bool
743 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
744 {
745 return (reg_type_masks[type] & (1 << reg->type)) != 0;
746 }
747
748 /* Try to parse a base or offset register. Allow SVE base and offset
749 registers if REG_TYPE includes SVE registers. Return the register
750 entry on success, setting *QUALIFIER to the register qualifier.
751 Return null otherwise.
752
753 Note that this function does not issue any diagnostics. */
754
755 static const reg_entry *
756 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
757 aarch64_opnd_qualifier_t *qualifier)
758 {
759 char *str = *ccp;
760 const reg_entry *reg = parse_reg (&str);
761
762 if (reg == NULL)
763 return NULL;
764
765 switch (reg->type)
766 {
767 case REG_TYPE_R_32:
768 case REG_TYPE_SP_32:
769 case REG_TYPE_Z_32:
770 *qualifier = AARCH64_OPND_QLF_W;
771 break;
772
773 case REG_TYPE_R_64:
774 case REG_TYPE_SP_64:
775 case REG_TYPE_Z_64:
776 *qualifier = AARCH64_OPND_QLF_X;
777 break;
778
779 case REG_TYPE_ZN:
780 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
781 || str[0] != '.')
782 return NULL;
783 switch (TOLOWER (str[1]))
784 {
785 case 's':
786 *qualifier = AARCH64_OPND_QLF_S_S;
787 break;
788 case 'd':
789 *qualifier = AARCH64_OPND_QLF_S_D;
790 break;
791 default:
792 return NULL;
793 }
794 str += 2;
795 break;
796
797 default:
798 return NULL;
799 }
800
801 *ccp = str;
802
803 return reg;
804 }
805
806 /* Try to parse a base or offset register. Return the register entry
807 on success, setting *QUALIFIER to the register qualifier. Return null
808 otherwise.
809
810 Note that this function does not issue any diagnostics. */
811
812 static const reg_entry *
813 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
814 {
815 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
816 }
817
818 /* Parse the qualifier of a vector register or vector element of type
819 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
820 succeeds; otherwise return FALSE.
821
822 Accept only one occurrence of:
823 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
824 b h s d q */
825 static bool
826 parse_vector_type_for_operand (aarch64_reg_type reg_type,
827 struct vector_type_el *parsed_type, char **str)
828 {
829 char *ptr = *str;
830 unsigned width;
831 unsigned element_size;
832 enum vector_el_type type;
833
834 /* skip '.' */
835 gas_assert (*ptr == '.');
836 ptr++;
837
838 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
839 {
840 width = 0;
841 goto elt_size;
842 }
843 width = strtoul (ptr, &ptr, 10);
844 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
845 {
846 first_error_fmt (_("bad size %d in vector width specifier"), width);
847 return false;
848 }
849
850 elt_size:
851 switch (TOLOWER (*ptr))
852 {
853 case 'b':
854 type = NT_b;
855 element_size = 8;
856 break;
857 case 'h':
858 type = NT_h;
859 element_size = 16;
860 break;
861 case 's':
862 type = NT_s;
863 element_size = 32;
864 break;
865 case 'd':
866 type = NT_d;
867 element_size = 64;
868 break;
869 case 'q':
870 if (reg_type == REG_TYPE_ZN || width == 1)
871 {
872 type = NT_q;
873 element_size = 128;
874 break;
875 }
876 /* fall through. */
877 default:
878 if (*ptr != '\0')
879 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
880 else
881 first_error (_("missing element size"));
882 return false;
883 }
884 if (width != 0 && width * element_size != 64
885 && width * element_size != 128
886 && !(width == 2 && element_size == 16)
887 && !(width == 4 && element_size == 8))
888 {
889 first_error_fmt (_
890 ("invalid element size %d and vector size combination %c"),
891 width, *ptr);
892 return false;
893 }
894 ptr++;
895
896 parsed_type->type = type;
897 parsed_type->width = width;
898
899 *str = ptr;
900
901 return true;
902 }
903
904 /* *STR contains an SVE zero/merge predication suffix. Parse it into
905 *PARSED_TYPE and point *STR at the end of the suffix. */
906
907 static bool
908 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
909 {
910 char *ptr = *str;
911
912 /* Skip '/'. */
913 gas_assert (*ptr == '/');
914 ptr++;
915 switch (TOLOWER (*ptr))
916 {
917 case 'z':
918 parsed_type->type = NT_zero;
919 break;
920 case 'm':
921 parsed_type->type = NT_merge;
922 break;
923 default:
924 if (*ptr != '\0' && *ptr != ',')
925 first_error_fmt (_("unexpected character `%c' in predication type"),
926 *ptr);
927 else
928 first_error (_("missing predication type"));
929 return false;
930 }
931 parsed_type->width = 0;
932 *str = ptr + 1;
933 return true;
934 }
935
936 /* Parse a register of the type TYPE.
937
938 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
939 name or the parsed register is not of TYPE.
940
941 Otherwise return the register number, and optionally fill in the actual
942 type of the register in *RTYPE when multiple alternatives were given, and
943 return the register shape and element index information in *TYPEINFO.
944
945 IN_REG_LIST should be set with TRUE if the caller is parsing a register
946 list. */
947
948 static int
949 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
950 struct vector_type_el *typeinfo, bool in_reg_list)
951 {
952 char *str = *ccp;
953 const reg_entry *reg = parse_reg (&str);
954 struct vector_type_el atype;
955 struct vector_type_el parsetype;
956 bool is_typed_vecreg = false;
957
958 atype.defined = 0;
959 atype.type = NT_invtype;
960 atype.width = -1;
961 atype.index = 0;
962
963 if (reg == NULL)
964 {
965 if (typeinfo)
966 *typeinfo = atype;
967 set_default_error ();
968 return PARSE_FAIL;
969 }
970
971 if (! aarch64_check_reg_type (reg, type))
972 {
973 DEBUG_TRACE ("reg type check failed");
974 set_default_error ();
975 return PARSE_FAIL;
976 }
977 type = reg->type;
978
979 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
980 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
981 {
982 if (*str == '.')
983 {
984 if (!parse_vector_type_for_operand (type, &parsetype, &str))
985 return PARSE_FAIL;
986 }
987 else
988 {
989 if (!parse_predication_for_operand (&parsetype, &str))
990 return PARSE_FAIL;
991 }
992
993 /* Register if of the form Vn.[bhsdq]. */
994 is_typed_vecreg = true;
995
996 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
997 {
998 /* The width is always variable; we don't allow an integer width
999 to be specified. */
1000 gas_assert (parsetype.width == 0);
1001 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1002 }
1003 else if (parsetype.width == 0)
1004 /* Expect index. In the new scheme we cannot have
1005 Vn.[bhsdq] represent a scalar. Therefore any
1006 Vn.[bhsdq] should have an index following it.
1007 Except in reglists of course. */
1008 atype.defined |= NTA_HASINDEX;
1009 else
1010 atype.defined |= NTA_HASTYPE;
1011
1012 atype.type = parsetype.type;
1013 atype.width = parsetype.width;
1014 }
1015
1016 if (skip_past_char (&str, '['))
1017 {
1018 expressionS exp;
1019
1020 /* Reject Sn[index] syntax. */
1021 if (!is_typed_vecreg)
1022 {
1023 first_error (_("this type of register can't be indexed"));
1024 return PARSE_FAIL;
1025 }
1026
1027 if (in_reg_list)
1028 {
1029 first_error (_("index not allowed inside register list"));
1030 return PARSE_FAIL;
1031 }
1032
1033 atype.defined |= NTA_HASINDEX;
1034
1035 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1036
1037 if (exp.X_op != O_constant)
1038 {
1039 first_error (_("constant expression required"));
1040 return PARSE_FAIL;
1041 }
1042
1043 if (! skip_past_char (&str, ']'))
1044 return PARSE_FAIL;
1045
1046 atype.index = exp.X_add_number;
1047 }
1048 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1049 {
1050 /* Indexed vector register expected. */
1051 first_error (_("indexed vector register expected"));
1052 return PARSE_FAIL;
1053 }
1054
1055 /* A vector reg Vn should be typed or indexed. */
1056 if (type == REG_TYPE_VN && atype.defined == 0)
1057 {
1058 first_error (_("invalid use of vector register"));
1059 }
1060
1061 if (typeinfo)
1062 *typeinfo = atype;
1063
1064 if (rtype)
1065 *rtype = type;
1066
1067 *ccp = str;
1068
1069 return reg->number;
1070 }
1071
1072 /* Parse register.
1073
1074 Return the register number on success; return PARSE_FAIL otherwise.
1075
1076 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1077 the register (e.g. NEON double or quad reg when either has been requested).
1078
1079 If this is a NEON vector register with additional type information, fill
1080 in the struct pointed to by VECTYPE (if non-NULL).
1081
1082 This parser does not handle register list. */
1083
1084 static int
1085 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1086 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1087 {
1088 struct vector_type_el atype;
1089 char *str = *ccp;
1090 int reg = parse_typed_reg (&str, type, rtype, &atype,
1091 /*in_reg_list= */ false);
1092
1093 if (reg == PARSE_FAIL)
1094 return PARSE_FAIL;
1095
1096 if (vectype)
1097 *vectype = atype;
1098
1099 *ccp = str;
1100
1101 return reg;
1102 }
1103
1104 static inline bool
1105 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1106 {
1107 return
1108 e1.type == e2.type
1109 && e1.defined == e2.defined
1110 && e1.width == e2.width && e1.index == e2.index;
1111 }
1112
1113 /* This function parses a list of vector registers of type TYPE.
1114 On success, it returns the parsed register list information in the
1115 following encoded format:
1116
1117 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1118 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1119
1120 The information of the register shape and/or index is returned in
1121 *VECTYPE.
1122
1123 It returns PARSE_FAIL if the register list is invalid.
1124
1125 The list contains one to four registers.
1126 Each register can be one of:
1127 <Vt>.<T>[<index>]
1128 <Vt>.<T>
1129 All <T> should be identical.
1130 All <index> should be identical.
1131 There are restrictions on <Vt> numbers which are checked later
1132 (by reg_list_valid_p). */
1133
1134 static int
1135 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1136 struct vector_type_el *vectype)
1137 {
1138 char *str = *ccp;
1139 int nb_regs;
1140 struct vector_type_el typeinfo, typeinfo_first;
1141 int val, val_range;
1142 int in_range;
1143 int ret_val;
1144 int i;
1145 bool error = false;
1146 bool expect_index = false;
1147
1148 if (*str != '{')
1149 {
1150 set_syntax_error (_("expecting {"));
1151 return PARSE_FAIL;
1152 }
1153 str++;
1154
1155 nb_regs = 0;
1156 typeinfo_first.defined = 0;
1157 typeinfo_first.type = NT_invtype;
1158 typeinfo_first.width = -1;
1159 typeinfo_first.index = 0;
1160 ret_val = 0;
1161 val = -1;
1162 val_range = -1;
1163 in_range = 0;
1164 do
1165 {
1166 if (in_range)
1167 {
1168 str++; /* skip over '-' */
1169 val_range = val;
1170 }
1171 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1172 /*in_reg_list= */ true);
1173 if (val == PARSE_FAIL)
1174 {
1175 set_first_syntax_error (_("invalid vector register in list"));
1176 error = true;
1177 continue;
1178 }
1179 /* reject [bhsd]n */
1180 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1181 {
1182 set_first_syntax_error (_("invalid scalar register in list"));
1183 error = true;
1184 continue;
1185 }
1186
1187 if (typeinfo.defined & NTA_HASINDEX)
1188 expect_index = true;
1189
1190 if (in_range)
1191 {
1192 if (val < val_range)
1193 {
1194 set_first_syntax_error
1195 (_("invalid range in vector register list"));
1196 error = true;
1197 }
1198 val_range++;
1199 }
1200 else
1201 {
1202 val_range = val;
1203 if (nb_regs == 0)
1204 typeinfo_first = typeinfo;
1205 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1206 {
1207 set_first_syntax_error
1208 (_("type mismatch in vector register list"));
1209 error = true;
1210 }
1211 }
1212 if (! error)
1213 for (i = val_range; i <= val; i++)
1214 {
1215 ret_val |= i << (5 * nb_regs);
1216 nb_regs++;
1217 }
1218 in_range = 0;
1219 }
1220 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1221
1222 skip_whitespace (str);
1223 if (*str != '}')
1224 {
1225 set_first_syntax_error (_("end of vector register list not found"));
1226 error = true;
1227 }
1228 str++;
1229
1230 skip_whitespace (str);
1231
1232 if (expect_index)
1233 {
1234 if (skip_past_char (&str, '['))
1235 {
1236 expressionS exp;
1237
1238 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1239 if (exp.X_op != O_constant)
1240 {
1241 set_first_syntax_error (_("constant expression required."));
1242 error = true;
1243 }
1244 if (! skip_past_char (&str, ']'))
1245 error = true;
1246 else
1247 typeinfo_first.index = exp.X_add_number;
1248 }
1249 else
1250 {
1251 set_first_syntax_error (_("expected index"));
1252 error = true;
1253 }
1254 }
1255
1256 if (nb_regs > 4)
1257 {
1258 set_first_syntax_error (_("too many registers in vector register list"));
1259 error = true;
1260 }
1261 else if (nb_regs == 0)
1262 {
1263 set_first_syntax_error (_("empty vector register list"));
1264 error = true;
1265 }
1266
1267 *ccp = str;
1268 if (! error)
1269 *vectype = typeinfo_first;
1270
1271 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1272 }
1273
1274 /* Directives: register aliases. */
1275
1276 static reg_entry *
1277 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1278 {
1279 reg_entry *new;
1280 const char *name;
1281
1282 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1283 {
1284 if (new->builtin)
1285 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1286 str);
1287
1288 /* Only warn about a redefinition if it's not defined as the
1289 same register. */
1290 else if (new->number != number || new->type != type)
1291 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1292
1293 return NULL;
1294 }
1295
1296 name = xstrdup (str);
1297 new = XNEW (reg_entry);
1298
1299 new->name = name;
1300 new->number = number;
1301 new->type = type;
1302 new->builtin = false;
1303
1304 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1305
1306 return new;
1307 }
1308
1309 /* Look for the .req directive. This is of the form:
1310
1311 new_register_name .req existing_register_name
1312
1313 If we find one, or if it looks sufficiently like one that we want to
1314 handle any error here, return TRUE. Otherwise return FALSE. */
1315
1316 static bool
1317 create_register_alias (char *newname, char *p)
1318 {
1319 const reg_entry *old;
1320 char *oldname, *nbuf;
1321 size_t nlen;
1322
1323 /* The input scrubber ensures that whitespace after the mnemonic is
1324 collapsed to single spaces. */
1325 oldname = p;
1326 if (!startswith (oldname, " .req "))
1327 return false;
1328
1329 oldname += 6;
1330 if (*oldname == '\0')
1331 return false;
1332
1333 old = str_hash_find (aarch64_reg_hsh, oldname);
1334 if (!old)
1335 {
1336 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1337 return true;
1338 }
1339
1340 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1341 the desired alias name, and p points to its end. If not, then
1342 the desired alias name is in the global original_case_string. */
1343 #ifdef TC_CASE_SENSITIVE
1344 nlen = p - newname;
1345 #else
1346 newname = original_case_string;
1347 nlen = strlen (newname);
1348 #endif
1349
1350 nbuf = xmemdup0 (newname, nlen);
1351
1352 /* Create aliases under the new name as stated; an all-lowercase
1353 version of the new name; and an all-uppercase version of the new
1354 name. */
1355 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1356 {
1357 for (p = nbuf; *p; p++)
1358 *p = TOUPPER (*p);
1359
1360 if (strncmp (nbuf, newname, nlen))
1361 {
1362 /* If this attempt to create an additional alias fails, do not bother
1363 trying to create the all-lower case alias. We will fail and issue
1364 a second, duplicate error message. This situation arises when the
1365 programmer does something like:
1366 foo .req r0
1367 Foo .req r1
1368 The second .req creates the "Foo" alias but then fails to create
1369 the artificial FOO alias because it has already been created by the
1370 first .req. */
1371 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1372 {
1373 free (nbuf);
1374 return true;
1375 }
1376 }
1377
1378 for (p = nbuf; *p; p++)
1379 *p = TOLOWER (*p);
1380
1381 if (strncmp (nbuf, newname, nlen))
1382 insert_reg_alias (nbuf, old->number, old->type);
1383 }
1384
1385 free (nbuf);
1386 return true;
1387 }
1388
1389 /* Should never be called, as .req goes between the alias and the
1390 register name, not at the beginning of the line. */
1391 static void
1392 s_req (int a ATTRIBUTE_UNUSED)
1393 {
1394 as_bad (_("invalid syntax for .req directive"));
1395 }
1396
1397 /* The .unreq directive deletes an alias which was previously defined
1398 by .req. For example:
1399
1400 my_alias .req r11
1401 .unreq my_alias */
1402
1403 static void
1404 s_unreq (int a ATTRIBUTE_UNUSED)
1405 {
1406 char *name;
1407 char saved_char;
1408
1409 name = input_line_pointer;
1410 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1411 saved_char = *input_line_pointer;
1412 *input_line_pointer = 0;
1413
1414 if (!*name)
1415 as_bad (_("invalid syntax for .unreq directive"));
1416 else
1417 {
1418 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1419
1420 if (!reg)
1421 as_bad (_("unknown register alias '%s'"), name);
1422 else if (reg->builtin)
1423 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1424 name);
1425 else
1426 {
1427 char *p;
1428 char *nbuf;
1429
1430 str_hash_delete (aarch64_reg_hsh, name);
1431 free ((char *) reg->name);
1432 free (reg);
1433
1434 /* Also locate the all upper case and all lower case versions.
1435 Do not complain if we cannot find one or the other as it
1436 was probably deleted above. */
1437
1438 nbuf = strdup (name);
1439 for (p = nbuf; *p; p++)
1440 *p = TOUPPER (*p);
1441 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1442 if (reg)
1443 {
1444 str_hash_delete (aarch64_reg_hsh, nbuf);
1445 free ((char *) reg->name);
1446 free (reg);
1447 }
1448
1449 for (p = nbuf; *p; p++)
1450 *p = TOLOWER (*p);
1451 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1452 if (reg)
1453 {
1454 str_hash_delete (aarch64_reg_hsh, nbuf);
1455 free ((char *) reg->name);
1456 free (reg);
1457 }
1458
1459 free (nbuf);
1460 }
1461 }
1462
1463 *input_line_pointer = saved_char;
1464 demand_empty_rest_of_line ();
1465 }
1466
1467 /* Directives: Instruction set selection. */
1468
1469 #if defined OBJ_ELF || defined OBJ_COFF
1470 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1471 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1472 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1473 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1474
1475 /* Create a new mapping symbol for the transition to STATE. */
1476
1477 static void
1478 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1479 {
1480 symbolS *symbolP;
1481 const char *symname;
1482 int type;
1483
1484 switch (state)
1485 {
1486 case MAP_DATA:
1487 symname = "$d";
1488 type = BSF_NO_FLAGS;
1489 break;
1490 case MAP_INSN:
1491 symname = "$x";
1492 type = BSF_NO_FLAGS;
1493 break;
1494 default:
1495 abort ();
1496 }
1497
1498 symbolP = symbol_new (symname, now_seg, frag, value);
1499 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1500
1501 /* Save the mapping symbols for future reference. Also check that
1502 we do not place two mapping symbols at the same offset within a
1503 frag. We'll handle overlap between frags in
1504 check_mapping_symbols.
1505
1506 If .fill or other data filling directive generates zero sized data,
1507 the mapping symbol for the following code will have the same value
1508 as the one generated for the data filling directive. In this case,
1509 we replace the old symbol with the new one at the same address. */
1510 if (value == 0)
1511 {
1512 if (frag->tc_frag_data.first_map != NULL)
1513 {
1514 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1515 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1516 &symbol_lastP);
1517 }
1518 frag->tc_frag_data.first_map = symbolP;
1519 }
1520 if (frag->tc_frag_data.last_map != NULL)
1521 {
1522 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1523 S_GET_VALUE (symbolP));
1524 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1525 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1526 &symbol_lastP);
1527 }
1528 frag->tc_frag_data.last_map = symbolP;
1529 }
1530
1531 /* We must sometimes convert a region marked as code to data during
1532 code alignment, if an odd number of bytes have to be padded. The
1533 code mapping symbol is pushed to an aligned address. */
1534
1535 static void
1536 insert_data_mapping_symbol (enum mstate state,
1537 valueT value, fragS * frag, offsetT bytes)
1538 {
1539 /* If there was already a mapping symbol, remove it. */
1540 if (frag->tc_frag_data.last_map != NULL
1541 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1542 frag->fr_address + value)
1543 {
1544 symbolS *symp = frag->tc_frag_data.last_map;
1545
1546 if (value == 0)
1547 {
1548 know (frag->tc_frag_data.first_map == symp);
1549 frag->tc_frag_data.first_map = NULL;
1550 }
1551 frag->tc_frag_data.last_map = NULL;
1552 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1553 }
1554
1555 make_mapping_symbol (MAP_DATA, value, frag);
1556 make_mapping_symbol (state, value + bytes, frag);
1557 }
1558
1559 static void mapping_state_2 (enum mstate state, int max_chars);
1560
1561 /* Set the mapping state to STATE. Only call this when about to
1562 emit some STATE bytes to the file. */
1563
1564 void
1565 mapping_state (enum mstate state)
1566 {
1567 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1568
1569 if (state == MAP_INSN)
1570 /* AArch64 instructions require 4-byte alignment. When emitting
1571 instructions into any section, record the appropriate section
1572 alignment. */
1573 record_alignment (now_seg, 2);
1574
1575 if (mapstate == state)
1576 /* The mapping symbol has already been emitted.
1577 There is nothing else to do. */
1578 return;
1579
1580 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1581 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1582 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1583 evaluated later in the next else. */
1584 return;
1585 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1586 {
1587 /* Only add the symbol if the offset is > 0:
1588 if we're at the first frag, check it's size > 0;
1589 if we're not at the first frag, then for sure
1590 the offset is > 0. */
1591 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1592 const int add_symbol = (frag_now != frag_first)
1593 || (frag_now_fix () > 0);
1594
1595 if (add_symbol)
1596 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1597 }
1598 #undef TRANSITION
1599
1600 mapping_state_2 (state, 0);
1601 }
1602
1603 /* Same as mapping_state, but MAX_CHARS bytes have already been
1604 allocated. Put the mapping symbol that far back. */
1605
1606 static void
1607 mapping_state_2 (enum mstate state, int max_chars)
1608 {
1609 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1610
1611 if (!SEG_NORMAL (now_seg))
1612 return;
1613
1614 if (mapstate == state)
1615 /* The mapping symbol has already been emitted.
1616 There is nothing else to do. */
1617 return;
1618
1619 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1620 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1621 }
1622 #else
1623 #define mapping_state(x) /* nothing */
1624 #define mapping_state_2(x, y) /* nothing */
1625 #endif
1626
1627 /* Directives: sectioning and alignment. */
1628
1629 static void
1630 s_bss (int ignore ATTRIBUTE_UNUSED)
1631 {
1632 /* We don't support putting frags in the BSS segment, we fake it by
1633 marking in_bss, then looking at s_skip for clues. */
1634 subseg_set (bss_section, 0);
1635 demand_empty_rest_of_line ();
1636 mapping_state (MAP_DATA);
1637 }
1638
1639 static void
1640 s_even (int ignore ATTRIBUTE_UNUSED)
1641 {
1642 /* Never make frag if expect extra pass. */
1643 if (!need_pass_2)
1644 frag_align (1, 0, 0);
1645
1646 record_alignment (now_seg, 1);
1647
1648 demand_empty_rest_of_line ();
1649 }
1650
1651 /* Directives: Literal pools. */
1652
1653 static literal_pool *
1654 find_literal_pool (int size)
1655 {
1656 literal_pool *pool;
1657
1658 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1659 {
1660 if (pool->section == now_seg
1661 && pool->sub_section == now_subseg && pool->size == size)
1662 break;
1663 }
1664
1665 return pool;
1666 }
1667
1668 static literal_pool *
1669 find_or_make_literal_pool (int size)
1670 {
1671 /* Next literal pool ID number. */
1672 static unsigned int latest_pool_num = 1;
1673 literal_pool *pool;
1674
1675 pool = find_literal_pool (size);
1676
1677 if (pool == NULL)
1678 {
1679 /* Create a new pool. */
1680 pool = XNEW (literal_pool);
1681 if (!pool)
1682 return NULL;
1683
1684 /* Currently we always put the literal pool in the current text
1685 section. If we were generating "small" model code where we
1686 knew that all code and initialised data was within 1MB then
1687 we could output literals to mergeable, read-only data
1688 sections. */
1689
1690 pool->next_free_entry = 0;
1691 pool->section = now_seg;
1692 pool->sub_section = now_subseg;
1693 pool->size = size;
1694 pool->next = list_of_pools;
1695 pool->symbol = NULL;
1696
1697 /* Add it to the list. */
1698 list_of_pools = pool;
1699 }
1700
1701 /* New pools, and emptied pools, will have a NULL symbol. */
1702 if (pool->symbol == NULL)
1703 {
1704 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1705 &zero_address_frag, 0);
1706 pool->id = latest_pool_num++;
1707 }
1708
1709 /* Done. */
1710 return pool;
1711 }
1712
1713 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1714 Return TRUE on success, otherwise return FALSE. */
1715 static bool
1716 add_to_lit_pool (expressionS *exp, int size)
1717 {
1718 literal_pool *pool;
1719 unsigned int entry;
1720
1721 pool = find_or_make_literal_pool (size);
1722
1723 /* Check if this literal value is already in the pool. */
1724 for (entry = 0; entry < pool->next_free_entry; entry++)
1725 {
1726 expressionS * litexp = & pool->literals[entry].exp;
1727
1728 if ((litexp->X_op == exp->X_op)
1729 && (exp->X_op == O_constant)
1730 && (litexp->X_add_number == exp->X_add_number)
1731 && (litexp->X_unsigned == exp->X_unsigned))
1732 break;
1733
1734 if ((litexp->X_op == exp->X_op)
1735 && (exp->X_op == O_symbol)
1736 && (litexp->X_add_number == exp->X_add_number)
1737 && (litexp->X_add_symbol == exp->X_add_symbol)
1738 && (litexp->X_op_symbol == exp->X_op_symbol))
1739 break;
1740 }
1741
1742 /* Do we need to create a new entry? */
1743 if (entry == pool->next_free_entry)
1744 {
1745 if (entry >= MAX_LITERAL_POOL_SIZE)
1746 {
1747 set_syntax_error (_("literal pool overflow"));
1748 return false;
1749 }
1750
1751 pool->literals[entry].exp = *exp;
1752 pool->next_free_entry += 1;
1753 if (exp->X_op == O_big)
1754 {
1755 /* PR 16688: Bignums are held in a single global array. We must
1756 copy and preserve that value now, before it is overwritten. */
1757 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1758 exp->X_add_number);
1759 memcpy (pool->literals[entry].bignum, generic_bignum,
1760 CHARS_PER_LITTLENUM * exp->X_add_number);
1761 }
1762 else
1763 pool->literals[entry].bignum = NULL;
1764 }
1765
1766 exp->X_op = O_symbol;
1767 exp->X_add_number = ((int) entry) * size;
1768 exp->X_add_symbol = pool->symbol;
1769
1770 return true;
1771 }
1772
1773 /* Can't use symbol_new here, so have to create a symbol and then at
1774 a later date assign it a value. That's what these functions do. */
1775
1776 static void
1777 symbol_locate (symbolS * symbolP,
1778 const char *name,/* It is copied, the caller can modify. */
1779 segT segment, /* Segment identifier (SEG_<something>). */
1780 valueT valu, /* Symbol value. */
1781 fragS * frag) /* Associated fragment. */
1782 {
1783 size_t name_length;
1784 char *preserved_copy_of_name;
1785
1786 name_length = strlen (name) + 1; /* +1 for \0. */
1787 obstack_grow (&notes, name, name_length);
1788 preserved_copy_of_name = obstack_finish (&notes);
1789
1790 #ifdef tc_canonicalize_symbol_name
1791 preserved_copy_of_name =
1792 tc_canonicalize_symbol_name (preserved_copy_of_name);
1793 #endif
1794
1795 S_SET_NAME (symbolP, preserved_copy_of_name);
1796
1797 S_SET_SEGMENT (symbolP, segment);
1798 S_SET_VALUE (symbolP, valu);
1799 symbol_clear_list_pointers (symbolP);
1800
1801 symbol_set_frag (symbolP, frag);
1802
1803 /* Link to end of symbol chain. */
1804 {
1805 extern int symbol_table_frozen;
1806
1807 if (symbol_table_frozen)
1808 abort ();
1809 }
1810
1811 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1812
1813 obj_symbol_new_hook (symbolP);
1814
1815 #ifdef tc_symbol_new_hook
1816 tc_symbol_new_hook (symbolP);
1817 #endif
1818
1819 #ifdef DEBUG_SYMS
1820 verify_symbol_chain (symbol_rootP, symbol_lastP);
1821 #endif /* DEBUG_SYMS */
1822 }
1823
1824
1825 static void
1826 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1827 {
1828 unsigned int entry;
1829 literal_pool *pool;
1830 char sym_name[20];
1831 int align;
1832
1833 for (align = 2; align <= 4; align++)
1834 {
1835 int size = 1 << align;
1836
1837 pool = find_literal_pool (size);
1838 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1839 continue;
1840
1841 /* Align pool as you have word accesses.
1842 Only make a frag if we have to. */
1843 if (!need_pass_2)
1844 frag_align (align, 0, 0);
1845
1846 mapping_state (MAP_DATA);
1847
1848 record_alignment (now_seg, align);
1849
1850 sprintf (sym_name, "$$lit_\002%x", pool->id);
1851
1852 symbol_locate (pool->symbol, sym_name, now_seg,
1853 (valueT) frag_now_fix (), frag_now);
1854 symbol_table_insert (pool->symbol);
1855
1856 for (entry = 0; entry < pool->next_free_entry; entry++)
1857 {
1858 expressionS * exp = & pool->literals[entry].exp;
1859
1860 if (exp->X_op == O_big)
1861 {
1862 /* PR 16688: Restore the global bignum value. */
1863 gas_assert (pool->literals[entry].bignum != NULL);
1864 memcpy (generic_bignum, pool->literals[entry].bignum,
1865 CHARS_PER_LITTLENUM * exp->X_add_number);
1866 }
1867
1868 /* First output the expression in the instruction to the pool. */
1869 emit_expr (exp, size); /* .word|.xword */
1870
1871 if (exp->X_op == O_big)
1872 {
1873 free (pool->literals[entry].bignum);
1874 pool->literals[entry].bignum = NULL;
1875 }
1876 }
1877
1878 /* Mark the pool as empty. */
1879 pool->next_free_entry = 0;
1880 pool->symbol = NULL;
1881 }
1882 }
1883
1884 #ifdef OBJ_ELF
1885 /* Forward declarations for functions below, in the MD interface
1886 section. */
1887 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1888 static struct reloc_table_entry * find_reloc_table_entry (char **);
1889
1890 /* Directives: Data. */
1891 /* N.B. the support for relocation suffix in this directive needs to be
1892 implemented properly. */
1893
1894 static void
1895 s_aarch64_elf_cons (int nbytes)
1896 {
1897 expressionS exp;
1898
1899 #ifdef md_flush_pending_output
1900 md_flush_pending_output ();
1901 #endif
1902
1903 if (is_it_end_of_statement ())
1904 {
1905 demand_empty_rest_of_line ();
1906 return;
1907 }
1908
1909 #ifdef md_cons_align
1910 md_cons_align (nbytes);
1911 #endif
1912
1913 mapping_state (MAP_DATA);
1914 do
1915 {
1916 struct reloc_table_entry *reloc;
1917
1918 expression (&exp);
1919
1920 if (exp.X_op != O_symbol)
1921 emit_expr (&exp, (unsigned int) nbytes);
1922 else
1923 {
1924 skip_past_char (&input_line_pointer, '#');
1925 if (skip_past_char (&input_line_pointer, ':'))
1926 {
1927 reloc = find_reloc_table_entry (&input_line_pointer);
1928 if (reloc == NULL)
1929 as_bad (_("unrecognized relocation suffix"));
1930 else
1931 as_bad (_("unimplemented relocation suffix"));
1932 ignore_rest_of_line ();
1933 return;
1934 }
1935 else
1936 emit_expr (&exp, (unsigned int) nbytes);
1937 }
1938 }
1939 while (*input_line_pointer++ == ',');
1940
1941 /* Put terminator back into stream. */
1942 input_line_pointer--;
1943 demand_empty_rest_of_line ();
1944 }
1945
1946 /* Mark symbol that it follows a variant PCS convention. */
1947
1948 static void
1949 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1950 {
1951 char *name;
1952 char c;
1953 symbolS *sym;
1954 asymbol *bfdsym;
1955 elf_symbol_type *elfsym;
1956
1957 c = get_symbol_name (&name);
1958 if (!*name)
1959 as_bad (_("Missing symbol name in directive"));
1960 sym = symbol_find_or_make (name);
1961 restore_line_pointer (c);
1962 demand_empty_rest_of_line ();
1963 bfdsym = symbol_get_bfdsym (sym);
1964 elfsym = elf_symbol_from (bfdsym);
1965 gas_assert (elfsym);
1966 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1967 }
1968 #endif /* OBJ_ELF */
1969
1970 /* Output a 32-bit word, but mark as an instruction. */
1971
1972 static void
1973 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1974 {
1975 expressionS exp;
1976 unsigned n = 0;
1977
1978 #ifdef md_flush_pending_output
1979 md_flush_pending_output ();
1980 #endif
1981
1982 if (is_it_end_of_statement ())
1983 {
1984 demand_empty_rest_of_line ();
1985 return;
1986 }
1987
1988 /* Sections are assumed to start aligned. In executable section, there is no
1989 MAP_DATA symbol pending. So we only align the address during
1990 MAP_DATA --> MAP_INSN transition.
1991 For other sections, this is not guaranteed. */
1992 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1993 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1994 frag_align_code (2, 0);
1995
1996 #ifdef OBJ_ELF
1997 mapping_state (MAP_INSN);
1998 #endif
1999
2000 do
2001 {
2002 expression (&exp);
2003 if (exp.X_op != O_constant)
2004 {
2005 as_bad (_("constant expression required"));
2006 ignore_rest_of_line ();
2007 return;
2008 }
2009
2010 if (target_big_endian)
2011 {
2012 unsigned int val = exp.X_add_number;
2013 exp.X_add_number = SWAP_32 (val);
2014 }
2015 emit_expr (&exp, INSN_SIZE);
2016 ++n;
2017 }
2018 while (*input_line_pointer++ == ',');
2019
2020 dwarf2_emit_insn (n * INSN_SIZE);
2021
2022 /* Put terminator back into stream. */
2023 input_line_pointer--;
2024 demand_empty_rest_of_line ();
2025 }
2026
2027 static void
2028 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2029 {
2030 demand_empty_rest_of_line ();
2031 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2032 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2033 }
2034
2035 #ifdef OBJ_ELF
2036 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2037
2038 static void
2039 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2040 {
2041 expressionS exp;
2042
2043 expression (&exp);
2044 frag_grow (4);
2045 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2046 BFD_RELOC_AARCH64_TLSDESC_ADD);
2047
2048 demand_empty_rest_of_line ();
2049 }
2050
2051 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2052
2053 static void
2054 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2055 {
2056 expressionS exp;
2057
2058 /* Since we're just labelling the code, there's no need to define a
2059 mapping symbol. */
2060 expression (&exp);
2061 /* Make sure there is enough room in this frag for the following
2062 blr. This trick only works if the blr follows immediately after
2063 the .tlsdesc directive. */
2064 frag_grow (4);
2065 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2066 BFD_RELOC_AARCH64_TLSDESC_CALL);
2067
2068 demand_empty_rest_of_line ();
2069 }
2070
2071 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2072
2073 static void
2074 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2075 {
2076 expressionS exp;
2077
2078 expression (&exp);
2079 frag_grow (4);
2080 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2081 BFD_RELOC_AARCH64_TLSDESC_LDR);
2082
2083 demand_empty_rest_of_line ();
2084 }
2085 #endif /* OBJ_ELF */
2086
2087 static void s_aarch64_arch (int);
2088 static void s_aarch64_cpu (int);
2089 static void s_aarch64_arch_extension (int);
2090
2091 /* This table describes all the machine specific pseudo-ops the assembler
2092 has to support. The fields are:
2093 pseudo-op name without dot
2094 function to call to execute this pseudo-op
2095 Integer arg to pass to the function. */
2096
2097 const pseudo_typeS md_pseudo_table[] = {
2098 /* Never called because '.req' does not start a line. */
2099 {"req", s_req, 0},
2100 {"unreq", s_unreq, 0},
2101 {"bss", s_bss, 0},
2102 {"even", s_even, 0},
2103 {"ltorg", s_ltorg, 0},
2104 {"pool", s_ltorg, 0},
2105 {"cpu", s_aarch64_cpu, 0},
2106 {"arch", s_aarch64_arch, 0},
2107 {"arch_extension", s_aarch64_arch_extension, 0},
2108 {"inst", s_aarch64_inst, 0},
2109 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2110 #ifdef OBJ_ELF
2111 {"tlsdescadd", s_tlsdescadd, 0},
2112 {"tlsdesccall", s_tlsdesccall, 0},
2113 {"tlsdescldr", s_tlsdescldr, 0},
2114 {"word", s_aarch64_elf_cons, 4},
2115 {"long", s_aarch64_elf_cons, 4},
2116 {"xword", s_aarch64_elf_cons, 8},
2117 {"dword", s_aarch64_elf_cons, 8},
2118 {"variant_pcs", s_variant_pcs, 0},
2119 #endif
2120 {"float16", float_cons, 'h'},
2121 {"bfloat16", float_cons, 'b'},
2122 {0, 0, 0}
2123 };
2124 \f
2125
2126 /* Check whether STR points to a register name followed by a comma or the
2127 end of line; REG_TYPE indicates which register types are checked
2128 against. Return TRUE if STR is such a register name; otherwise return
2129 FALSE. The function does not intend to produce any diagnostics, but since
2130 the register parser aarch64_reg_parse, which is called by this function,
2131 does produce diagnostics, we call clear_error to clear any diagnostics
2132 that may be generated by aarch64_reg_parse.
2133 Also, the function returns FALSE directly if there is any user error
2134 present at the function entry. This prevents the existing diagnostics
2135 state from being spoiled.
2136 The function currently serves parse_constant_immediate and
2137 parse_big_immediate only. */
2138 static bool
2139 reg_name_p (char *str, aarch64_reg_type reg_type)
2140 {
2141 int reg;
2142
2143 /* Prevent the diagnostics state from being spoiled. */
2144 if (error_p ())
2145 return false;
2146
2147 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2148
2149 /* Clear the parsing error that may be set by the reg parser. */
2150 clear_error ();
2151
2152 if (reg == PARSE_FAIL)
2153 return false;
2154
2155 skip_whitespace (str);
2156 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2157 return true;
2158
2159 return false;
2160 }
2161
2162 /* Parser functions used exclusively in instruction operands. */
2163
2164 /* Parse an immediate expression which may not be constant.
2165
2166 To prevent the expression parser from pushing a register name
2167 into the symbol table as an undefined symbol, firstly a check is
2168 done to find out whether STR is a register of type REG_TYPE followed
2169 by a comma or the end of line. Return FALSE if STR is such a string. */
2170
2171 static bool
2172 parse_immediate_expression (char **str, expressionS *exp,
2173 aarch64_reg_type reg_type)
2174 {
2175 if (reg_name_p (*str, reg_type))
2176 {
2177 set_recoverable_error (_("immediate operand required"));
2178 return false;
2179 }
2180
2181 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2182
2183 if (exp->X_op == O_absent)
2184 {
2185 set_fatal_syntax_error (_("missing immediate expression"));
2186 return false;
2187 }
2188
2189 return true;
2190 }
2191
2192 /* Constant immediate-value read function for use in insn parsing.
2193 STR points to the beginning of the immediate (with the optional
2194 leading #); *VAL receives the value. REG_TYPE says which register
2195 names should be treated as registers rather than as symbolic immediates.
2196
2197 Return TRUE on success; otherwise return FALSE. */
2198
2199 static bool
2200 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2201 {
2202 expressionS exp;
2203
2204 if (! parse_immediate_expression (str, &exp, reg_type))
2205 return false;
2206
2207 if (exp.X_op != O_constant)
2208 {
2209 set_syntax_error (_("constant expression required"));
2210 return false;
2211 }
2212
2213 *val = exp.X_add_number;
2214 return true;
2215 }
2216
2217 static uint32_t
2218 encode_imm_float_bits (uint32_t imm)
2219 {
2220 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2221 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2222 }
2223
2224 /* Return TRUE if the single-precision floating-point value encoded in IMM
2225 can be expressed in the AArch64 8-bit signed floating-point format with
2226 3-bit exponent and normalized 4 bits of precision; in other words, the
2227 floating-point value must be expressable as
2228 (+/-) n / 16 * power (2, r)
2229 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2230
2231 static bool
2232 aarch64_imm_float_p (uint32_t imm)
2233 {
2234 /* If a single-precision floating-point value has the following bit
2235 pattern, it can be expressed in the AArch64 8-bit floating-point
2236 format:
2237
2238 3 32222222 2221111111111
2239 1 09876543 21098765432109876543210
2240 n Eeeeeexx xxxx0000000000000000000
2241
2242 where n, e and each x are either 0 or 1 independently, with
2243 E == ~ e. */
2244
2245 uint32_t pattern;
2246
2247 /* Prepare the pattern for 'Eeeeee'. */
2248 if (((imm >> 30) & 0x1) == 0)
2249 pattern = 0x3e000000;
2250 else
2251 pattern = 0x40000000;
2252
2253 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2254 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2255 }
2256
2257 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2258 as an IEEE float without any loss of precision. Store the value in
2259 *FPWORD if so. */
2260
2261 static bool
2262 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2263 {
2264 /* If a double-precision floating-point value has the following bit
2265 pattern, it can be expressed in a float:
2266
2267 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2268 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2269 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2270
2271 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2272 if Eeee_eeee != 1111_1111
2273
2274 where n, e, s and S are either 0 or 1 independently and where ~ is the
2275 inverse of E. */
2276
2277 uint32_t pattern;
2278 uint32_t high32 = imm >> 32;
2279 uint32_t low32 = imm;
2280
2281 /* Lower 29 bits need to be 0s. */
2282 if ((imm & 0x1fffffff) != 0)
2283 return false;
2284
2285 /* Prepare the pattern for 'Eeeeeeeee'. */
2286 if (((high32 >> 30) & 0x1) == 0)
2287 pattern = 0x38000000;
2288 else
2289 pattern = 0x40000000;
2290
2291 /* Check E~~~. */
2292 if ((high32 & 0x78000000) != pattern)
2293 return false;
2294
2295 /* Check Eeee_eeee != 1111_1111. */
2296 if ((high32 & 0x7ff00000) == 0x47f00000)
2297 return false;
2298
2299 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2300 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2301 | (low32 >> 29)); /* 3 S bits. */
2302 return true;
2303 }
2304
2305 /* Return true if we should treat OPERAND as a double-precision
2306 floating-point operand rather than a single-precision one. */
2307 static bool
2308 double_precision_operand_p (const aarch64_opnd_info *operand)
2309 {
2310 /* Check for unsuffixed SVE registers, which are allowed
2311 for LDR and STR but not in instructions that require an
2312 immediate. We get better error messages if we arbitrarily
2313 pick one size, parse the immediate normally, and then
2314 report the match failure in the normal way. */
2315 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2316 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2317 }
2318
2319 /* Parse a floating-point immediate. Return TRUE on success and return the
2320 value in *IMMED in the format of IEEE754 single-precision encoding.
2321 *CCP points to the start of the string; DP_P is TRUE when the immediate
2322 is expected to be in double-precision (N.B. this only matters when
2323 hexadecimal representation is involved). REG_TYPE says which register
2324 names should be treated as registers rather than as symbolic immediates.
2325
2326 This routine accepts any IEEE float; it is up to the callers to reject
2327 invalid ones. */
2328
2329 static bool
2330 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2331 aarch64_reg_type reg_type)
2332 {
2333 char *str = *ccp;
2334 char *fpnum;
2335 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2336 int64_t val = 0;
2337 unsigned fpword = 0;
2338 bool hex_p = false;
2339
2340 skip_past_char (&str, '#');
2341
2342 fpnum = str;
2343 skip_whitespace (fpnum);
2344
2345 if (startswith (fpnum, "0x"))
2346 {
2347 /* Support the hexadecimal representation of the IEEE754 encoding.
2348 Double-precision is expected when DP_P is TRUE, otherwise the
2349 representation should be in single-precision. */
2350 if (! parse_constant_immediate (&str, &val, reg_type))
2351 goto invalid_fp;
2352
2353 if (dp_p)
2354 {
2355 if (!can_convert_double_to_float (val, &fpword))
2356 goto invalid_fp;
2357 }
2358 else if ((uint64_t) val > 0xffffffff)
2359 goto invalid_fp;
2360 else
2361 fpword = val;
2362
2363 hex_p = true;
2364 }
2365 else if (reg_name_p (str, reg_type))
2366 {
2367 set_recoverable_error (_("immediate operand required"));
2368 return false;
2369 }
2370
2371 if (! hex_p)
2372 {
2373 int i;
2374
2375 if ((str = atof_ieee (str, 's', words)) == NULL)
2376 goto invalid_fp;
2377
2378 /* Our FP word must be 32 bits (single-precision FP). */
2379 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2380 {
2381 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2382 fpword |= words[i];
2383 }
2384 }
2385
2386 *immed = fpword;
2387 *ccp = str;
2388 return true;
2389
2390 invalid_fp:
2391 set_fatal_syntax_error (_("invalid floating-point constant"));
2392 return false;
2393 }
2394
2395 /* Less-generic immediate-value read function with the possibility of loading
2396 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2397 instructions.
2398
2399 To prevent the expression parser from pushing a register name into the
2400 symbol table as an undefined symbol, a check is firstly done to find
2401 out whether STR is a register of type REG_TYPE followed by a comma or
2402 the end of line. Return FALSE if STR is such a register. */
2403
2404 static bool
2405 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2406 {
2407 char *ptr = *str;
2408
2409 if (reg_name_p (ptr, reg_type))
2410 {
2411 set_syntax_error (_("immediate operand required"));
2412 return false;
2413 }
2414
2415 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2416
2417 if (inst.reloc.exp.X_op == O_constant)
2418 *imm = inst.reloc.exp.X_add_number;
2419
2420 *str = ptr;
2421
2422 return true;
2423 }
2424
2425 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2426 if NEED_LIBOPCODES is non-zero, the fixup will need
2427 assistance from the libopcodes. */
2428
2429 static inline void
2430 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2431 const aarch64_opnd_info *operand,
2432 int need_libopcodes_p)
2433 {
2434 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2435 reloc->opnd = operand->type;
2436 if (need_libopcodes_p)
2437 reloc->need_libopcodes_p = 1;
2438 };
2439
2440 /* Return TRUE if the instruction needs to be fixed up later internally by
2441 the GAS; otherwise return FALSE. */
2442
2443 static inline bool
2444 aarch64_gas_internal_fixup_p (void)
2445 {
2446 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2447 }
2448
2449 /* Assign the immediate value to the relevant field in *OPERAND if
2450 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2451 needs an internal fixup in a later stage.
2452 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2453 IMM.VALUE that may get assigned with the constant. */
2454 static inline void
2455 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2456 aarch64_opnd_info *operand,
2457 int addr_off_p,
2458 int need_libopcodes_p,
2459 int skip_p)
2460 {
2461 if (reloc->exp.X_op == O_constant)
2462 {
2463 if (addr_off_p)
2464 operand->addr.offset.imm = reloc->exp.X_add_number;
2465 else
2466 operand->imm.value = reloc->exp.X_add_number;
2467 reloc->type = BFD_RELOC_UNUSED;
2468 }
2469 else
2470 {
2471 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2472 /* Tell libopcodes to ignore this operand or not. This is helpful
2473 when one of the operands needs to be fixed up later but we need
2474 libopcodes to check the other operands. */
2475 operand->skip = skip_p;
2476 }
2477 }
2478
2479 /* Relocation modifiers. Each entry in the table contains the textual
2480 name for the relocation which may be placed before a symbol used as
2481 a load/store offset, or add immediate. It must be surrounded by a
2482 leading and trailing colon, for example:
2483
2484 ldr x0, [x1, #:rello:varsym]
2485 add x0, x1, #:rello:varsym */
2486
2487 struct reloc_table_entry
2488 {
2489 const char *name;
2490 int pc_rel;
2491 bfd_reloc_code_real_type adr_type;
2492 bfd_reloc_code_real_type adrp_type;
2493 bfd_reloc_code_real_type movw_type;
2494 bfd_reloc_code_real_type add_type;
2495 bfd_reloc_code_real_type ldst_type;
2496 bfd_reloc_code_real_type ld_literal_type;
2497 };
2498
2499 static struct reloc_table_entry reloc_table[] =
2500 {
2501 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2502 {"lo12", 0,
2503 0, /* adr_type */
2504 0,
2505 0,
2506 BFD_RELOC_AARCH64_ADD_LO12,
2507 BFD_RELOC_AARCH64_LDST_LO12,
2508 0},
2509
2510 /* Higher 21 bits of pc-relative page offset: ADRP */
2511 {"pg_hi21", 1,
2512 0, /* adr_type */
2513 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2514 0,
2515 0,
2516 0,
2517 0},
2518
2519 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2520 {"pg_hi21_nc", 1,
2521 0, /* adr_type */
2522 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2523 0,
2524 0,
2525 0,
2526 0},
2527
2528 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2529 {"abs_g0", 0,
2530 0, /* adr_type */
2531 0,
2532 BFD_RELOC_AARCH64_MOVW_G0,
2533 0,
2534 0,
2535 0},
2536
2537 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2538 {"abs_g0_s", 0,
2539 0, /* adr_type */
2540 0,
2541 BFD_RELOC_AARCH64_MOVW_G0_S,
2542 0,
2543 0,
2544 0},
2545
2546 /* Less significant bits 0-15 of address/value: MOVK, no check */
2547 {"abs_g0_nc", 0,
2548 0, /* adr_type */
2549 0,
2550 BFD_RELOC_AARCH64_MOVW_G0_NC,
2551 0,
2552 0,
2553 0},
2554
2555 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2556 {"abs_g1", 0,
2557 0, /* adr_type */
2558 0,
2559 BFD_RELOC_AARCH64_MOVW_G1,
2560 0,
2561 0,
2562 0},
2563
2564 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2565 {"abs_g1_s", 0,
2566 0, /* adr_type */
2567 0,
2568 BFD_RELOC_AARCH64_MOVW_G1_S,
2569 0,
2570 0,
2571 0},
2572
2573 /* Less significant bits 16-31 of address/value: MOVK, no check */
2574 {"abs_g1_nc", 0,
2575 0, /* adr_type */
2576 0,
2577 BFD_RELOC_AARCH64_MOVW_G1_NC,
2578 0,
2579 0,
2580 0},
2581
2582 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2583 {"abs_g2", 0,
2584 0, /* adr_type */
2585 0,
2586 BFD_RELOC_AARCH64_MOVW_G2,
2587 0,
2588 0,
2589 0},
2590
2591 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2592 {"abs_g2_s", 0,
2593 0, /* adr_type */
2594 0,
2595 BFD_RELOC_AARCH64_MOVW_G2_S,
2596 0,
2597 0,
2598 0},
2599
2600 /* Less significant bits 32-47 of address/value: MOVK, no check */
2601 {"abs_g2_nc", 0,
2602 0, /* adr_type */
2603 0,
2604 BFD_RELOC_AARCH64_MOVW_G2_NC,
2605 0,
2606 0,
2607 0},
2608
2609 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2610 {"abs_g3", 0,
2611 0, /* adr_type */
2612 0,
2613 BFD_RELOC_AARCH64_MOVW_G3,
2614 0,
2615 0,
2616 0},
2617
2618 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2619 {"prel_g0", 1,
2620 0, /* adr_type */
2621 0,
2622 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2623 0,
2624 0,
2625 0},
2626
2627 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2628 {"prel_g0_nc", 1,
2629 0, /* adr_type */
2630 0,
2631 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2632 0,
2633 0,
2634 0},
2635
2636 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2637 {"prel_g1", 1,
2638 0, /* adr_type */
2639 0,
2640 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2641 0,
2642 0,
2643 0},
2644
2645 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2646 {"prel_g1_nc", 1,
2647 0, /* adr_type */
2648 0,
2649 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2650 0,
2651 0,
2652 0},
2653
2654 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2655 {"prel_g2", 1,
2656 0, /* adr_type */
2657 0,
2658 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2659 0,
2660 0,
2661 0},
2662
2663 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2664 {"prel_g2_nc", 1,
2665 0, /* adr_type */
2666 0,
2667 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2668 0,
2669 0,
2670 0},
2671
2672 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2673 {"prel_g3", 1,
2674 0, /* adr_type */
2675 0,
2676 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2677 0,
2678 0,
2679 0},
2680
2681 /* Get to the page containing GOT entry for a symbol. */
2682 {"got", 1,
2683 0, /* adr_type */
2684 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2685 0,
2686 0,
2687 0,
2688 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2689
2690 /* 12 bit offset into the page containing GOT entry for that symbol. */
2691 {"got_lo12", 0,
2692 0, /* adr_type */
2693 0,
2694 0,
2695 0,
2696 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2697 0},
2698
2699 /* 0-15 bits of address/value: MOVk, no check. */
2700 {"gotoff_g0_nc", 0,
2701 0, /* adr_type */
2702 0,
2703 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2704 0,
2705 0,
2706 0},
2707
2708 /* Most significant bits 16-31 of address/value: MOVZ. */
2709 {"gotoff_g1", 0,
2710 0, /* adr_type */
2711 0,
2712 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2713 0,
2714 0,
2715 0},
2716
2717 /* 15 bit offset into the page containing GOT entry for that symbol. */
2718 {"gotoff_lo15", 0,
2719 0, /* adr_type */
2720 0,
2721 0,
2722 0,
2723 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2724 0},
2725
2726 /* Get to the page containing GOT TLS entry for a symbol */
2727 {"gottprel_g0_nc", 0,
2728 0, /* adr_type */
2729 0,
2730 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2731 0,
2732 0,
2733 0},
2734
2735 /* Get to the page containing GOT TLS entry for a symbol */
2736 {"gottprel_g1", 0,
2737 0, /* adr_type */
2738 0,
2739 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2740 0,
2741 0,
2742 0},
2743
2744 /* Get to the page containing GOT TLS entry for a symbol */
2745 {"tlsgd", 0,
2746 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2747 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2748 0,
2749 0,
2750 0,
2751 0},
2752
2753 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2754 {"tlsgd_lo12", 0,
2755 0, /* adr_type */
2756 0,
2757 0,
2758 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2759 0,
2760 0},
2761
2762 /* Lower 16 bits address/value: MOVk. */
2763 {"tlsgd_g0_nc", 0,
2764 0, /* adr_type */
2765 0,
2766 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2767 0,
2768 0,
2769 0},
2770
2771 /* Most significant bits 16-31 of address/value: MOVZ. */
2772 {"tlsgd_g1", 0,
2773 0, /* adr_type */
2774 0,
2775 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2776 0,
2777 0,
2778 0},
2779
2780 /* Get to the page containing GOT TLS entry for a symbol */
2781 {"tlsdesc", 0,
2782 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2783 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2784 0,
2785 0,
2786 0,
2787 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2788
2789 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2790 {"tlsdesc_lo12", 0,
2791 0, /* adr_type */
2792 0,
2793 0,
2794 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2795 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2796 0},
2797
2798 /* Get to the page containing GOT TLS entry for a symbol.
2799 The same as GD, we allocate two consecutive GOT slots
2800 for module index and module offset, the only difference
2801 with GD is the module offset should be initialized to
2802 zero without any outstanding runtime relocation. */
2803 {"tlsldm", 0,
2804 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2805 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2806 0,
2807 0,
2808 0,
2809 0},
2810
2811 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2812 {"tlsldm_lo12_nc", 0,
2813 0, /* adr_type */
2814 0,
2815 0,
2816 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2817 0,
2818 0},
2819
2820 /* 12 bit offset into the module TLS base address. */
2821 {"dtprel_lo12", 0,
2822 0, /* adr_type */
2823 0,
2824 0,
2825 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2826 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2827 0},
2828
2829 /* Same as dtprel_lo12, no overflow check. */
2830 {"dtprel_lo12_nc", 0,
2831 0, /* adr_type */
2832 0,
2833 0,
2834 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2835 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2836 0},
2837
2838 /* bits[23:12] of offset to the module TLS base address. */
2839 {"dtprel_hi12", 0,
2840 0, /* adr_type */
2841 0,
2842 0,
2843 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2844 0,
2845 0},
2846
2847 /* bits[15:0] of offset to the module TLS base address. */
2848 {"dtprel_g0", 0,
2849 0, /* adr_type */
2850 0,
2851 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2852 0,
2853 0,
2854 0},
2855
2856 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2857 {"dtprel_g0_nc", 0,
2858 0, /* adr_type */
2859 0,
2860 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2861 0,
2862 0,
2863 0},
2864
2865 /* bits[31:16] of offset to the module TLS base address. */
2866 {"dtprel_g1", 0,
2867 0, /* adr_type */
2868 0,
2869 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2870 0,
2871 0,
2872 0},
2873
2874 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2875 {"dtprel_g1_nc", 0,
2876 0, /* adr_type */
2877 0,
2878 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2879 0,
2880 0,
2881 0},
2882
2883 /* bits[47:32] of offset to the module TLS base address. */
2884 {"dtprel_g2", 0,
2885 0, /* adr_type */
2886 0,
2887 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2888 0,
2889 0,
2890 0},
2891
2892 /* Lower 16 bit offset into GOT entry for a symbol */
2893 {"tlsdesc_off_g0_nc", 0,
2894 0, /* adr_type */
2895 0,
2896 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2897 0,
2898 0,
2899 0},
2900
2901 /* Higher 16 bit offset into GOT entry for a symbol */
2902 {"tlsdesc_off_g1", 0,
2903 0, /* adr_type */
2904 0,
2905 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2906 0,
2907 0,
2908 0},
2909
2910 /* Get to the page containing GOT TLS entry for a symbol */
2911 {"gottprel", 0,
2912 0, /* adr_type */
2913 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2914 0,
2915 0,
2916 0,
2917 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2918
2919 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2920 {"gottprel_lo12", 0,
2921 0, /* adr_type */
2922 0,
2923 0,
2924 0,
2925 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2926 0},
2927
2928 /* Get tp offset for a symbol. */
2929 {"tprel", 0,
2930 0, /* adr_type */
2931 0,
2932 0,
2933 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2934 0,
2935 0},
2936
2937 /* Get tp offset for a symbol. */
2938 {"tprel_lo12", 0,
2939 0, /* adr_type */
2940 0,
2941 0,
2942 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2943 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2944 0},
2945
2946 /* Get tp offset for a symbol. */
2947 {"tprel_hi12", 0,
2948 0, /* adr_type */
2949 0,
2950 0,
2951 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2952 0,
2953 0},
2954
2955 /* Get tp offset for a symbol. */
2956 {"tprel_lo12_nc", 0,
2957 0, /* adr_type */
2958 0,
2959 0,
2960 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2961 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2962 0},
2963
2964 /* Most significant bits 32-47 of address/value: MOVZ. */
2965 {"tprel_g2", 0,
2966 0, /* adr_type */
2967 0,
2968 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2969 0,
2970 0,
2971 0},
2972
2973 /* Most significant bits 16-31 of address/value: MOVZ. */
2974 {"tprel_g1", 0,
2975 0, /* adr_type */
2976 0,
2977 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2978 0,
2979 0,
2980 0},
2981
2982 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2983 {"tprel_g1_nc", 0,
2984 0, /* adr_type */
2985 0,
2986 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2987 0,
2988 0,
2989 0},
2990
2991 /* Most significant bits 0-15 of address/value: MOVZ. */
2992 {"tprel_g0", 0,
2993 0, /* adr_type */
2994 0,
2995 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2996 0,
2997 0,
2998 0},
2999
3000 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3001 {"tprel_g0_nc", 0,
3002 0, /* adr_type */
3003 0,
3004 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3005 0,
3006 0,
3007 0},
3008
3009 /* 15bit offset from got entry to base address of GOT table. */
3010 {"gotpage_lo15", 0,
3011 0,
3012 0,
3013 0,
3014 0,
3015 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3016 0},
3017
3018 /* 14bit offset from got entry to base address of GOT table. */
3019 {"gotpage_lo14", 0,
3020 0,
3021 0,
3022 0,
3023 0,
3024 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3025 0},
3026 };
3027
3028 /* Given the address of a pointer pointing to the textual name of a
3029 relocation as may appear in assembler source, attempt to find its
3030 details in reloc_table. The pointer will be updated to the character
3031 after the trailing colon. On failure, NULL will be returned;
3032 otherwise return the reloc_table_entry. */
3033
3034 static struct reloc_table_entry *
3035 find_reloc_table_entry (char **str)
3036 {
3037 unsigned int i;
3038 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3039 {
3040 int length = strlen (reloc_table[i].name);
3041
3042 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3043 && (*str)[length] == ':')
3044 {
3045 *str += (length + 1);
3046 return &reloc_table[i];
3047 }
3048 }
3049
3050 return NULL;
3051 }
3052
3053 /* Returns 0 if the relocation should never be forced,
3054 1 if the relocation must be forced, and -1 if either
3055 result is OK. */
3056
3057 static signed int
3058 aarch64_force_reloc (unsigned int type)
3059 {
3060 switch (type)
3061 {
3062 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3063 /* Perform these "immediate" internal relocations
3064 even if the symbol is extern or weak. */
3065 return 0;
3066
3067 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3068 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3069 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3070 /* Pseudo relocs that need to be fixed up according to
3071 ilp32_p. */
3072 return 1;
3073
3074 case BFD_RELOC_AARCH64_ADD_LO12:
3075 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3076 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3077 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3078 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3079 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3080 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3081 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3082 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3083 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3084 case BFD_RELOC_AARCH64_LDST128_LO12:
3085 case BFD_RELOC_AARCH64_LDST16_LO12:
3086 case BFD_RELOC_AARCH64_LDST32_LO12:
3087 case BFD_RELOC_AARCH64_LDST64_LO12:
3088 case BFD_RELOC_AARCH64_LDST8_LO12:
3089 case BFD_RELOC_AARCH64_LDST_LO12:
3090 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3091 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3092 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3093 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3094 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3095 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3096 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3097 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3098 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3099 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3100 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3101 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3102 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3103 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3104 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3105 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3106 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3107 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3108 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3109 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3110 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3111 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3112 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3113 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3114 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3115 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3116 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3117 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3118 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3119 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3120 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3121 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3122 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3123 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3124 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3125 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3126 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3127 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3128 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3129 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3130 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3131 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3132 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3133 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3134 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3135 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3136 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3137 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3138 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3139 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3140 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3141 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3142 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3143 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3144 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3145 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3146 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3147 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3148 /* Always leave these relocations for the linker. */
3149 return 1;
3150
3151 default:
3152 return -1;
3153 }
3154 }
3155
3156 int
3157 aarch64_force_relocation (struct fix *fixp)
3158 {
3159 int res = aarch64_force_reloc (fixp->fx_r_type);
3160
3161 if (res == -1)
3162 return generic_force_reloc (fixp);
3163 return res;
3164 }
3165
3166 /* Mode argument to parse_shift and parser_shifter_operand. */
3167 enum parse_shift_mode
3168 {
3169 SHIFTED_NONE, /* no shifter allowed */
3170 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3171 "#imm{,lsl #n}" */
3172 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3173 "#imm" */
3174 SHIFTED_LSL, /* bare "lsl #n" */
3175 SHIFTED_MUL, /* bare "mul #n" */
3176 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3177 SHIFTED_MUL_VL, /* "mul vl" */
3178 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3179 };
3180
3181 /* Parse a <shift> operator on an AArch64 data processing instruction.
3182 Return TRUE on success; otherwise return FALSE. */
3183 static bool
3184 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3185 {
3186 const struct aarch64_name_value_pair *shift_op;
3187 enum aarch64_modifier_kind kind;
3188 expressionS exp;
3189 int exp_has_prefix;
3190 char *s = *str;
3191 char *p = s;
3192
3193 for (p = *str; ISALPHA (*p); p++)
3194 ;
3195
3196 if (p == *str)
3197 {
3198 set_syntax_error (_("shift expression expected"));
3199 return false;
3200 }
3201
3202 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3203
3204 if (shift_op == NULL)
3205 {
3206 set_syntax_error (_("shift operator expected"));
3207 return false;
3208 }
3209
3210 kind = aarch64_get_operand_modifier (shift_op);
3211
3212 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3213 {
3214 set_syntax_error (_("invalid use of 'MSL'"));
3215 return false;
3216 }
3217
3218 if (kind == AARCH64_MOD_MUL
3219 && mode != SHIFTED_MUL
3220 && mode != SHIFTED_MUL_VL)
3221 {
3222 set_syntax_error (_("invalid use of 'MUL'"));
3223 return false;
3224 }
3225
3226 switch (mode)
3227 {
3228 case SHIFTED_LOGIC_IMM:
3229 if (aarch64_extend_operator_p (kind))
3230 {
3231 set_syntax_error (_("extending shift is not permitted"));
3232 return false;
3233 }
3234 break;
3235
3236 case SHIFTED_ARITH_IMM:
3237 if (kind == AARCH64_MOD_ROR)
3238 {
3239 set_syntax_error (_("'ROR' shift is not permitted"));
3240 return false;
3241 }
3242 break;
3243
3244 case SHIFTED_LSL:
3245 if (kind != AARCH64_MOD_LSL)
3246 {
3247 set_syntax_error (_("only 'LSL' shift is permitted"));
3248 return false;
3249 }
3250 break;
3251
3252 case SHIFTED_MUL:
3253 if (kind != AARCH64_MOD_MUL)
3254 {
3255 set_syntax_error (_("only 'MUL' is permitted"));
3256 return false;
3257 }
3258 break;
3259
3260 case SHIFTED_MUL_VL:
3261 /* "MUL VL" consists of two separate tokens. Require the first
3262 token to be "MUL" and look for a following "VL". */
3263 if (kind == AARCH64_MOD_MUL)
3264 {
3265 skip_whitespace (p);
3266 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3267 {
3268 p += 2;
3269 kind = AARCH64_MOD_MUL_VL;
3270 break;
3271 }
3272 }
3273 set_syntax_error (_("only 'MUL VL' is permitted"));
3274 return false;
3275
3276 case SHIFTED_REG_OFFSET:
3277 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3278 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3279 {
3280 set_fatal_syntax_error
3281 (_("invalid shift for the register offset addressing mode"));
3282 return false;
3283 }
3284 break;
3285
3286 case SHIFTED_LSL_MSL:
3287 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3288 {
3289 set_syntax_error (_("invalid shift operator"));
3290 return false;
3291 }
3292 break;
3293
3294 default:
3295 abort ();
3296 }
3297
3298 /* Whitespace can appear here if the next thing is a bare digit. */
3299 skip_whitespace (p);
3300
3301 /* Parse shift amount. */
3302 exp_has_prefix = 0;
3303 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3304 exp.X_op = O_absent;
3305 else
3306 {
3307 if (is_immediate_prefix (*p))
3308 {
3309 p++;
3310 exp_has_prefix = 1;
3311 }
3312 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3313 }
3314 if (kind == AARCH64_MOD_MUL_VL)
3315 /* For consistency, give MUL VL the same shift amount as an implicit
3316 MUL #1. */
3317 operand->shifter.amount = 1;
3318 else if (exp.X_op == O_absent)
3319 {
3320 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3321 {
3322 set_syntax_error (_("missing shift amount"));
3323 return false;
3324 }
3325 operand->shifter.amount = 0;
3326 }
3327 else if (exp.X_op != O_constant)
3328 {
3329 set_syntax_error (_("constant shift amount required"));
3330 return false;
3331 }
3332 /* For parsing purposes, MUL #n has no inherent range. The range
3333 depends on the operand and will be checked by operand-specific
3334 routines. */
3335 else if (kind != AARCH64_MOD_MUL
3336 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3337 {
3338 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3339 return false;
3340 }
3341 else
3342 {
3343 operand->shifter.amount = exp.X_add_number;
3344 operand->shifter.amount_present = 1;
3345 }
3346
3347 operand->shifter.operator_present = 1;
3348 operand->shifter.kind = kind;
3349
3350 *str = p;
3351 return true;
3352 }
3353
3354 /* Parse a <shifter_operand> for a data processing instruction:
3355
3356 #<immediate>
3357 #<immediate>, LSL #imm
3358
3359 Validation of immediate operands is deferred to md_apply_fix.
3360
3361 Return TRUE on success; otherwise return FALSE. */
3362
3363 static bool
3364 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3365 enum parse_shift_mode mode)
3366 {
3367 char *p;
3368
3369 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3370 return false;
3371
3372 p = *str;
3373
3374 /* Accept an immediate expression. */
3375 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3376 REJECT_ABSENT))
3377 return false;
3378
3379 /* Accept optional LSL for arithmetic immediate values. */
3380 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3381 if (! parse_shift (&p, operand, SHIFTED_LSL))
3382 return false;
3383
3384 /* Not accept any shifter for logical immediate values. */
3385 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3386 && parse_shift (&p, operand, mode))
3387 {
3388 set_syntax_error (_("unexpected shift operator"));
3389 return false;
3390 }
3391
3392 *str = p;
3393 return true;
3394 }
3395
3396 /* Parse a <shifter_operand> for a data processing instruction:
3397
3398 <Rm>
3399 <Rm>, <shift>
3400 #<immediate>
3401 #<immediate>, LSL #imm
3402
3403 where <shift> is handled by parse_shift above, and the last two
3404 cases are handled by the function above.
3405
3406 Validation of immediate operands is deferred to md_apply_fix.
3407
3408 Return TRUE on success; otherwise return FALSE. */
3409
3410 static bool
3411 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3412 enum parse_shift_mode mode)
3413 {
3414 const reg_entry *reg;
3415 aarch64_opnd_qualifier_t qualifier;
3416 enum aarch64_operand_class opd_class
3417 = aarch64_get_operand_class (operand->type);
3418
3419 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3420 if (reg)
3421 {
3422 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3423 {
3424 set_syntax_error (_("unexpected register in the immediate operand"));
3425 return false;
3426 }
3427
3428 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3429 {
3430 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3431 return false;
3432 }
3433
3434 operand->reg.regno = reg->number;
3435 operand->qualifier = qualifier;
3436
3437 /* Accept optional shift operation on register. */
3438 if (! skip_past_comma (str))
3439 return true;
3440
3441 if (! parse_shift (str, operand, mode))
3442 return false;
3443
3444 return true;
3445 }
3446 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3447 {
3448 set_syntax_error
3449 (_("integer register expected in the extended/shifted operand "
3450 "register"));
3451 return false;
3452 }
3453
3454 /* We have a shifted immediate variable. */
3455 return parse_shifter_operand_imm (str, operand, mode);
3456 }
3457
3458 /* Return TRUE on success; return FALSE otherwise. */
3459
3460 static bool
3461 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3462 enum parse_shift_mode mode)
3463 {
3464 char *p = *str;
3465
3466 /* Determine if we have the sequence of characters #: or just :
3467 coming next. If we do, then we check for a :rello: relocation
3468 modifier. If we don't, punt the whole lot to
3469 parse_shifter_operand. */
3470
3471 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3472 {
3473 struct reloc_table_entry *entry;
3474
3475 if (p[0] == '#')
3476 p += 2;
3477 else
3478 p++;
3479 *str = p;
3480
3481 /* Try to parse a relocation. Anything else is an error. */
3482 if (!(entry = find_reloc_table_entry (str)))
3483 {
3484 set_syntax_error (_("unknown relocation modifier"));
3485 return false;
3486 }
3487
3488 if (entry->add_type == 0)
3489 {
3490 set_syntax_error
3491 (_("this relocation modifier is not allowed on this instruction"));
3492 return false;
3493 }
3494
3495 /* Save str before we decompose it. */
3496 p = *str;
3497
3498 /* Next, we parse the expression. */
3499 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3500 REJECT_ABSENT))
3501 return false;
3502
3503 /* Record the relocation type (use the ADD variant here). */
3504 inst.reloc.type = entry->add_type;
3505 inst.reloc.pc_rel = entry->pc_rel;
3506
3507 /* If str is empty, we've reached the end, stop here. */
3508 if (**str == '\0')
3509 return true;
3510
3511 /* Otherwise, we have a shifted reloc modifier, so rewind to
3512 recover the variable name and continue parsing for the shifter. */
3513 *str = p;
3514 return parse_shifter_operand_imm (str, operand, mode);
3515 }
3516
3517 return parse_shifter_operand (str, operand, mode);
3518 }
3519
3520 /* Parse all forms of an address expression. Information is written
3521 to *OPERAND and/or inst.reloc.
3522
3523 The A64 instruction set has the following addressing modes:
3524
3525 Offset
3526 [base] // in SIMD ld/st structure
3527 [base{,#0}] // in ld/st exclusive
3528 [base{,#imm}]
3529 [base,Xm{,LSL #imm}]
3530 [base,Xm,SXTX {#imm}]
3531 [base,Wm,(S|U)XTW {#imm}]
3532 Pre-indexed
3533 [base]! // in ldraa/ldrab exclusive
3534 [base,#imm]!
3535 Post-indexed
3536 [base],#imm
3537 [base],Xm // in SIMD ld/st structure
3538 PC-relative (literal)
3539 label
3540 SVE:
3541 [base,#imm,MUL VL]
3542 [base,Zm.D{,LSL #imm}]
3543 [base,Zm.S,(S|U)XTW {#imm}]
3544 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3545 [Zn.S,#imm]
3546 [Zn.D,#imm]
3547 [Zn.S{, Xm}]
3548 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3549 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3550 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3551
3552 (As a convenience, the notation "=immediate" is permitted in conjunction
3553 with the pc-relative literal load instructions to automatically place an
3554 immediate value or symbolic address in a nearby literal pool and generate
3555 a hidden label which references it.)
3556
3557 Upon a successful parsing, the address structure in *OPERAND will be
3558 filled in the following way:
3559
3560 .base_regno = <base>
3561 .offset.is_reg // 1 if the offset is a register
3562 .offset.imm = <imm>
3563 .offset.regno = <Rm>
3564
3565 For different addressing modes defined in the A64 ISA:
3566
3567 Offset
3568 .pcrel=0; .preind=1; .postind=0; .writeback=0
3569 Pre-indexed
3570 .pcrel=0; .preind=1; .postind=0; .writeback=1
3571 Post-indexed
3572 .pcrel=0; .preind=0; .postind=1; .writeback=1
3573 PC-relative (literal)
3574 .pcrel=1; .preind=1; .postind=0; .writeback=0
3575
3576 The shift/extension information, if any, will be stored in .shifter.
3577 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3578 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3579 corresponding register.
3580
3581 BASE_TYPE says which types of base register should be accepted and
3582 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3583 is the type of shifter that is allowed for immediate offsets,
3584 or SHIFTED_NONE if none.
3585
3586 In all other respects, it is the caller's responsibility to check
3587 for addressing modes not supported by the instruction, and to set
3588 inst.reloc.type. */
3589
3590 static bool
3591 parse_address_main (char **str, aarch64_opnd_info *operand,
3592 aarch64_opnd_qualifier_t *base_qualifier,
3593 aarch64_opnd_qualifier_t *offset_qualifier,
3594 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3595 enum parse_shift_mode imm_shift_mode)
3596 {
3597 char *p = *str;
3598 const reg_entry *reg;
3599 expressionS *exp = &inst.reloc.exp;
3600
3601 *base_qualifier = AARCH64_OPND_QLF_NIL;
3602 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3603 if (! skip_past_char (&p, '['))
3604 {
3605 /* =immediate or label. */
3606 operand->addr.pcrel = 1;
3607 operand->addr.preind = 1;
3608
3609 /* #:<reloc_op>:<symbol> */
3610 skip_past_char (&p, '#');
3611 if (skip_past_char (&p, ':'))
3612 {
3613 bfd_reloc_code_real_type ty;
3614 struct reloc_table_entry *entry;
3615
3616 /* Try to parse a relocation modifier. Anything else is
3617 an error. */
3618 entry = find_reloc_table_entry (&p);
3619 if (! entry)
3620 {
3621 set_syntax_error (_("unknown relocation modifier"));
3622 return false;
3623 }
3624
3625 switch (operand->type)
3626 {
3627 case AARCH64_OPND_ADDR_PCREL21:
3628 /* adr */
3629 ty = entry->adr_type;
3630 break;
3631
3632 default:
3633 ty = entry->ld_literal_type;
3634 break;
3635 }
3636
3637 if (ty == 0)
3638 {
3639 set_syntax_error
3640 (_("this relocation modifier is not allowed on this "
3641 "instruction"));
3642 return false;
3643 }
3644
3645 /* #:<reloc_op>: */
3646 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3647 {
3648 set_syntax_error (_("invalid relocation expression"));
3649 return false;
3650 }
3651 /* #:<reloc_op>:<expr> */
3652 /* Record the relocation type. */
3653 inst.reloc.type = ty;
3654 inst.reloc.pc_rel = entry->pc_rel;
3655 }
3656 else
3657 {
3658 if (skip_past_char (&p, '='))
3659 /* =immediate; need to generate the literal in the literal pool. */
3660 inst.gen_lit_pool = 1;
3661
3662 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3663 {
3664 set_syntax_error (_("invalid address"));
3665 return false;
3666 }
3667 }
3668
3669 *str = p;
3670 return true;
3671 }
3672
3673 /* [ */
3674
3675 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3676 if (!reg || !aarch64_check_reg_type (reg, base_type))
3677 {
3678 set_syntax_error (_(get_reg_expected_msg (base_type)));
3679 return false;
3680 }
3681 operand->addr.base_regno = reg->number;
3682
3683 /* [Xn */
3684 if (skip_past_comma (&p))
3685 {
3686 /* [Xn, */
3687 operand->addr.preind = 1;
3688
3689 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3690 if (reg)
3691 {
3692 if (!aarch64_check_reg_type (reg, offset_type))
3693 {
3694 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3695 return false;
3696 }
3697
3698 /* [Xn,Rm */
3699 operand->addr.offset.regno = reg->number;
3700 operand->addr.offset.is_reg = 1;
3701 /* Shifted index. */
3702 if (skip_past_comma (&p))
3703 {
3704 /* [Xn,Rm, */
3705 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3706 /* Use the diagnostics set in parse_shift, so not set new
3707 error message here. */
3708 return false;
3709 }
3710 /* We only accept:
3711 [base,Xm] # For vector plus scalar SVE2 indexing.
3712 [base,Xm{,LSL #imm}]
3713 [base,Xm,SXTX {#imm}]
3714 [base,Wm,(S|U)XTW {#imm}] */
3715 if (operand->shifter.kind == AARCH64_MOD_NONE
3716 || operand->shifter.kind == AARCH64_MOD_LSL
3717 || operand->shifter.kind == AARCH64_MOD_SXTX)
3718 {
3719 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3720 {
3721 set_syntax_error (_("invalid use of 32-bit register offset"));
3722 return false;
3723 }
3724 if (aarch64_get_qualifier_esize (*base_qualifier)
3725 != aarch64_get_qualifier_esize (*offset_qualifier)
3726 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3727 || *base_qualifier != AARCH64_OPND_QLF_S_S
3728 || *offset_qualifier != AARCH64_OPND_QLF_X))
3729 {
3730 set_syntax_error (_("offset has different size from base"));
3731 return false;
3732 }
3733 }
3734 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3735 {
3736 set_syntax_error (_("invalid use of 64-bit register offset"));
3737 return false;
3738 }
3739 }
3740 else
3741 {
3742 /* [Xn,#:<reloc_op>:<symbol> */
3743 skip_past_char (&p, '#');
3744 if (skip_past_char (&p, ':'))
3745 {
3746 struct reloc_table_entry *entry;
3747
3748 /* Try to parse a relocation modifier. Anything else is
3749 an error. */
3750 if (!(entry = find_reloc_table_entry (&p)))
3751 {
3752 set_syntax_error (_("unknown relocation modifier"));
3753 return false;
3754 }
3755
3756 if (entry->ldst_type == 0)
3757 {
3758 set_syntax_error
3759 (_("this relocation modifier is not allowed on this "
3760 "instruction"));
3761 return false;
3762 }
3763
3764 /* [Xn,#:<reloc_op>: */
3765 /* We now have the group relocation table entry corresponding to
3766 the name in the assembler source. Next, we parse the
3767 expression. */
3768 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3769 {
3770 set_syntax_error (_("invalid relocation expression"));
3771 return false;
3772 }
3773
3774 /* [Xn,#:<reloc_op>:<expr> */
3775 /* Record the load/store relocation type. */
3776 inst.reloc.type = entry->ldst_type;
3777 inst.reloc.pc_rel = entry->pc_rel;
3778 }
3779 else
3780 {
3781 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3782 {
3783 set_syntax_error (_("invalid expression in the address"));
3784 return false;
3785 }
3786 /* [Xn,<expr> */
3787 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3788 /* [Xn,<expr>,<shifter> */
3789 if (! parse_shift (&p, operand, imm_shift_mode))
3790 return false;
3791 }
3792 }
3793 }
3794
3795 if (! skip_past_char (&p, ']'))
3796 {
3797 set_syntax_error (_("']' expected"));
3798 return false;
3799 }
3800
3801 if (skip_past_char (&p, '!'))
3802 {
3803 if (operand->addr.preind && operand->addr.offset.is_reg)
3804 {
3805 set_syntax_error (_("register offset not allowed in pre-indexed "
3806 "addressing mode"));
3807 return false;
3808 }
3809 /* [Xn]! */
3810 operand->addr.writeback = 1;
3811 }
3812 else if (skip_past_comma (&p))
3813 {
3814 /* [Xn], */
3815 operand->addr.postind = 1;
3816 operand->addr.writeback = 1;
3817
3818 if (operand->addr.preind)
3819 {
3820 set_syntax_error (_("cannot combine pre- and post-indexing"));
3821 return false;
3822 }
3823
3824 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3825 if (reg)
3826 {
3827 /* [Xn],Xm */
3828 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3829 {
3830 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3831 return false;
3832 }
3833
3834 operand->addr.offset.regno = reg->number;
3835 operand->addr.offset.is_reg = 1;
3836 }
3837 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3838 {
3839 /* [Xn],#expr */
3840 set_syntax_error (_("invalid expression in the address"));
3841 return false;
3842 }
3843 }
3844
3845 /* If at this point neither .preind nor .postind is set, we have a
3846 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3847 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3848 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3849 [Zn.<T>, xzr]. */
3850 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3851 {
3852 if (operand->addr.writeback)
3853 {
3854 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3855 {
3856 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3857 operand->addr.offset.is_reg = 0;
3858 operand->addr.offset.imm = 0;
3859 operand->addr.preind = 1;
3860 }
3861 else
3862 {
3863 /* Reject [Rn]! */
3864 set_syntax_error (_("missing offset in the pre-indexed address"));
3865 return false;
3866 }
3867 }
3868 else
3869 {
3870 operand->addr.preind = 1;
3871 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3872 {
3873 operand->addr.offset.is_reg = 1;
3874 operand->addr.offset.regno = REG_ZR;
3875 *offset_qualifier = AARCH64_OPND_QLF_X;
3876 }
3877 else
3878 {
3879 inst.reloc.exp.X_op = O_constant;
3880 inst.reloc.exp.X_add_number = 0;
3881 }
3882 }
3883 }
3884
3885 *str = p;
3886 return true;
3887 }
3888
3889 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3890 on success. */
3891 static bool
3892 parse_address (char **str, aarch64_opnd_info *operand)
3893 {
3894 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3895 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3896 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3897 }
3898
3899 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3900 The arguments have the same meaning as for parse_address_main.
3901 Return TRUE on success. */
3902 static bool
3903 parse_sve_address (char **str, aarch64_opnd_info *operand,
3904 aarch64_opnd_qualifier_t *base_qualifier,
3905 aarch64_opnd_qualifier_t *offset_qualifier)
3906 {
3907 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3908 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3909 SHIFTED_MUL_VL);
3910 }
3911
3912 /* Parse a register X0-X30. The register must be 64-bit and register 31
3913 is unallocated. */
3914 static bool
3915 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
3916 {
3917 const reg_entry *reg = parse_reg (str);
3918 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
3919 {
3920 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3921 return false;
3922 }
3923 operand->reg.regno = reg->number;
3924 operand->qualifier = AARCH64_OPND_QLF_X;
3925 return true;
3926 }
3927
3928 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3929 Return TRUE on success; otherwise return FALSE. */
3930 static bool
3931 parse_half (char **str, int *internal_fixup_p)
3932 {
3933 char *p = *str;
3934
3935 skip_past_char (&p, '#');
3936
3937 gas_assert (internal_fixup_p);
3938 *internal_fixup_p = 0;
3939
3940 if (*p == ':')
3941 {
3942 struct reloc_table_entry *entry;
3943
3944 /* Try to parse a relocation. Anything else is an error. */
3945 ++p;
3946
3947 if (!(entry = find_reloc_table_entry (&p)))
3948 {
3949 set_syntax_error (_("unknown relocation modifier"));
3950 return false;
3951 }
3952
3953 if (entry->movw_type == 0)
3954 {
3955 set_syntax_error
3956 (_("this relocation modifier is not allowed on this instruction"));
3957 return false;
3958 }
3959
3960 inst.reloc.type = entry->movw_type;
3961 }
3962 else
3963 *internal_fixup_p = 1;
3964
3965 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3966 return false;
3967
3968 *str = p;
3969 return true;
3970 }
3971
3972 /* Parse an operand for an ADRP instruction:
3973 ADRP <Xd>, <label>
3974 Return TRUE on success; otherwise return FALSE. */
3975
3976 static bool
3977 parse_adrp (char **str)
3978 {
3979 char *p;
3980
3981 p = *str;
3982 if (*p == ':')
3983 {
3984 struct reloc_table_entry *entry;
3985
3986 /* Try to parse a relocation. Anything else is an error. */
3987 ++p;
3988 if (!(entry = find_reloc_table_entry (&p)))
3989 {
3990 set_syntax_error (_("unknown relocation modifier"));
3991 return false;
3992 }
3993
3994 if (entry->adrp_type == 0)
3995 {
3996 set_syntax_error
3997 (_("this relocation modifier is not allowed on this instruction"));
3998 return false;
3999 }
4000
4001 inst.reloc.type = entry->adrp_type;
4002 }
4003 else
4004 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4005
4006 inst.reloc.pc_rel = 1;
4007 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4008 return false;
4009 *str = p;
4010 return true;
4011 }
4012
4013 /* Miscellaneous. */
4014
4015 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4016 of SIZE tokens in which index I gives the token for field value I,
4017 or is null if field value I is invalid. REG_TYPE says which register
4018 names should be treated as registers rather than as symbolic immediates.
4019
4020 Return true on success, moving *STR past the operand and storing the
4021 field value in *VAL. */
4022
4023 static int
4024 parse_enum_string (char **str, int64_t *val, const char *const *array,
4025 size_t size, aarch64_reg_type reg_type)
4026 {
4027 expressionS exp;
4028 char *p, *q;
4029 size_t i;
4030
4031 /* Match C-like tokens. */
4032 p = q = *str;
4033 while (ISALNUM (*q))
4034 q++;
4035
4036 for (i = 0; i < size; ++i)
4037 if (array[i]
4038 && strncasecmp (array[i], p, q - p) == 0
4039 && array[i][q - p] == 0)
4040 {
4041 *val = i;
4042 *str = q;
4043 return true;
4044 }
4045
4046 if (!parse_immediate_expression (&p, &exp, reg_type))
4047 return false;
4048
4049 if (exp.X_op == O_constant
4050 && (uint64_t) exp.X_add_number < size)
4051 {
4052 *val = exp.X_add_number;
4053 *str = p;
4054 return true;
4055 }
4056
4057 /* Use the default error for this operand. */
4058 return false;
4059 }
4060
4061 /* Parse an option for a preload instruction. Returns the encoding for the
4062 option, or PARSE_FAIL. */
4063
4064 static int
4065 parse_pldop (char **str)
4066 {
4067 char *p, *q;
4068 const struct aarch64_name_value_pair *o;
4069
4070 p = q = *str;
4071 while (ISALNUM (*q))
4072 q++;
4073
4074 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4075 if (!o)
4076 return PARSE_FAIL;
4077
4078 *str = q;
4079 return o->value;
4080 }
4081
4082 /* Parse an option for a barrier instruction. Returns the encoding for the
4083 option, or PARSE_FAIL. */
4084
4085 static int
4086 parse_barrier (char **str)
4087 {
4088 char *p, *q;
4089 const struct aarch64_name_value_pair *o;
4090
4091 p = q = *str;
4092 while (ISALPHA (*q))
4093 q++;
4094
4095 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4096 if (!o)
4097 return PARSE_FAIL;
4098
4099 *str = q;
4100 return o->value;
4101 }
4102
4103 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4104 return 0 if successful. Otherwise return PARSE_FAIL. */
4105
4106 static int
4107 parse_barrier_psb (char **str,
4108 const struct aarch64_name_value_pair ** hint_opt)
4109 {
4110 char *p, *q;
4111 const struct aarch64_name_value_pair *o;
4112
4113 p = q = *str;
4114 while (ISALPHA (*q))
4115 q++;
4116
4117 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4118 if (!o)
4119 {
4120 set_fatal_syntax_error
4121 ( _("unknown or missing option to PSB/TSB"));
4122 return PARSE_FAIL;
4123 }
4124
4125 if (o->value != 0x11)
4126 {
4127 /* PSB only accepts option name 'CSYNC'. */
4128 set_syntax_error
4129 (_("the specified option is not accepted for PSB/TSB"));
4130 return PARSE_FAIL;
4131 }
4132
4133 *str = q;
4134 *hint_opt = o;
4135 return 0;
4136 }
4137
4138 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4139 return 0 if successful. Otherwise return PARSE_FAIL. */
4140
4141 static int
4142 parse_bti_operand (char **str,
4143 const struct aarch64_name_value_pair ** hint_opt)
4144 {
4145 char *p, *q;
4146 const struct aarch64_name_value_pair *o;
4147
4148 p = q = *str;
4149 while (ISALPHA (*q))
4150 q++;
4151
4152 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4153 if (!o)
4154 {
4155 set_fatal_syntax_error
4156 ( _("unknown option to BTI"));
4157 return PARSE_FAIL;
4158 }
4159
4160 switch (o->value)
4161 {
4162 /* Valid BTI operands. */
4163 case HINT_OPD_C:
4164 case HINT_OPD_J:
4165 case HINT_OPD_JC:
4166 break;
4167
4168 default:
4169 set_syntax_error
4170 (_("unknown option to BTI"));
4171 return PARSE_FAIL;
4172 }
4173
4174 *str = q;
4175 *hint_opt = o;
4176 return 0;
4177 }
4178
4179 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4180 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4181 on failure. Format:
4182
4183 REG_TYPE.QUALIFIER
4184
4185 Side effect: Update STR with current parse position of success.
4186 */
4187
4188 static const reg_entry *
4189 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4190 aarch64_opnd_qualifier_t *qualifier)
4191 {
4192 char *q;
4193
4194 reg_entry *reg = parse_reg (str);
4195 if (reg != NULL && reg->type == reg_type)
4196 {
4197 if (!skip_past_char (str, '.'))
4198 {
4199 set_syntax_error (_("missing ZA tile element size separator"));
4200 return NULL;
4201 }
4202
4203 q = *str;
4204 switch (TOLOWER (*q))
4205 {
4206 case 'b':
4207 *qualifier = AARCH64_OPND_QLF_S_B;
4208 break;
4209 case 'h':
4210 *qualifier = AARCH64_OPND_QLF_S_H;
4211 break;
4212 case 's':
4213 *qualifier = AARCH64_OPND_QLF_S_S;
4214 break;
4215 case 'd':
4216 *qualifier = AARCH64_OPND_QLF_S_D;
4217 break;
4218 case 'q':
4219 *qualifier = AARCH64_OPND_QLF_S_Q;
4220 break;
4221 default:
4222 return NULL;
4223 }
4224 q++;
4225
4226 *str = q;
4227 return reg;
4228 }
4229
4230 return NULL;
4231 }
4232
4233 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4234 Function return tile QUALIFIER on success.
4235
4236 Tiles are in example format: za[0-9]\.[bhsd]
4237
4238 Function returns <ZAda> register number or PARSE_FAIL.
4239 */
4240 static int
4241 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4242 {
4243 int regno;
4244 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4245
4246 if (reg == NULL)
4247 return PARSE_FAIL;
4248 regno = reg->number;
4249
4250 switch (*qualifier)
4251 {
4252 case AARCH64_OPND_QLF_S_B:
4253 if (regno != 0x00)
4254 {
4255 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4256 return PARSE_FAIL;
4257 }
4258 break;
4259 case AARCH64_OPND_QLF_S_H:
4260 if (regno > 0x01)
4261 {
4262 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4263 return PARSE_FAIL;
4264 }
4265 break;
4266 case AARCH64_OPND_QLF_S_S:
4267 if (regno > 0x03)
4268 {
4269 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4270 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4271 return PARSE_FAIL;
4272 }
4273 break;
4274 case AARCH64_OPND_QLF_S_D:
4275 if (regno > 0x07)
4276 {
4277 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4278 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4279 return PARSE_FAIL;
4280 }
4281 break;
4282 default:
4283 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4284 return PARSE_FAIL;
4285 }
4286
4287 return regno;
4288 }
4289
4290 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4291
4292 #<imm>
4293 <imm>
4294
4295 Function return TRUE if immediate was found, or FALSE.
4296 */
4297 static bool
4298 parse_sme_immediate (char **str, int64_t *imm)
4299 {
4300 int64_t val;
4301 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4302 return false;
4303
4304 *imm = val;
4305 return true;
4306 }
4307
4308 /* Parse index with vector select register and immediate:
4309
4310 [<Wv>, <imm>]
4311 [<Wv>, #<imm>]
4312 where <Wv> is in W12-W15 range and # is optional for immediate.
4313
4314 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4315 is set to true.
4316
4317 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4318 IMM output.
4319 */
4320 static bool
4321 parse_sme_za_hv_tiles_operand_index (char **str,
4322 int *vector_select_register,
4323 int64_t *imm)
4324 {
4325 const reg_entry *reg;
4326
4327 if (!skip_past_char (str, '['))
4328 {
4329 set_syntax_error (_("expected '['"));
4330 return false;
4331 }
4332
4333 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4334 reg = parse_reg (str);
4335 if (reg == NULL || reg->type != REG_TYPE_R_32
4336 || reg->number < 12 || reg->number > 15)
4337 {
4338 set_syntax_error (_("expected vector select register W12-W15"));
4339 return false;
4340 }
4341 *vector_select_register = reg->number;
4342
4343 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4344 {
4345 set_syntax_error (_("expected ','"));
4346 return false;
4347 }
4348
4349 if (!parse_sme_immediate (str, imm))
4350 {
4351 set_syntax_error (_("index offset immediate expected"));
4352 return false;
4353 }
4354
4355 if (!skip_past_char (str, ']'))
4356 {
4357 set_syntax_error (_("expected ']'"));
4358 return false;
4359 }
4360
4361 return true;
4362 }
4363
4364 /* Parse SME ZA horizontal or vertical vector access to tiles.
4365 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4366 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4367 contains <Wv> select register and corresponding optional IMMEDIATE.
4368 In addition QUALIFIER is extracted.
4369
4370 Field format examples:
4371
4372 ZA0<HV>.B[<Wv>, #<imm>]
4373 <ZAn><HV>.H[<Wv>, #<imm>]
4374 <ZAn><HV>.S[<Wv>, #<imm>]
4375 <ZAn><HV>.D[<Wv>, #<imm>]
4376 <ZAn><HV>.Q[<Wv>, #<imm>]
4377
4378 Function returns <ZAda> register number or PARSE_FAIL.
4379 */
4380 static int
4381 parse_sme_za_hv_tiles_operand (char **str,
4382 enum sme_hv_slice *slice_indicator,
4383 int *vector_select_register,
4384 int *imm,
4385 aarch64_opnd_qualifier_t *qualifier)
4386 {
4387 char *qh, *qv;
4388 int regno;
4389 int regno_limit;
4390 int64_t imm_limit;
4391 int64_t imm_value;
4392 const reg_entry *reg;
4393
4394 qh = qv = *str;
4395 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4396 {
4397 *slice_indicator = HV_horizontal;
4398 *str = qh;
4399 }
4400 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4401 {
4402 *slice_indicator = HV_vertical;
4403 *str = qv;
4404 }
4405 else
4406 return PARSE_FAIL;
4407 regno = reg->number;
4408
4409 switch (*qualifier)
4410 {
4411 case AARCH64_OPND_QLF_S_B:
4412 regno_limit = 0;
4413 imm_limit = 15;
4414 break;
4415 case AARCH64_OPND_QLF_S_H:
4416 regno_limit = 1;
4417 imm_limit = 7;
4418 break;
4419 case AARCH64_OPND_QLF_S_S:
4420 regno_limit = 3;
4421 imm_limit = 3;
4422 break;
4423 case AARCH64_OPND_QLF_S_D:
4424 regno_limit = 7;
4425 imm_limit = 1;
4426 break;
4427 case AARCH64_OPND_QLF_S_Q:
4428 regno_limit = 15;
4429 imm_limit = 0;
4430 break;
4431 default:
4432 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4433 return PARSE_FAIL;
4434 }
4435
4436 /* Check if destination register ZA tile vector is in range for given
4437 instruction variant. */
4438 if (regno < 0 || regno > regno_limit)
4439 {
4440 set_syntax_error (_("ZA tile vector out of range"));
4441 return PARSE_FAIL;
4442 }
4443
4444 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4445 &imm_value))
4446 return PARSE_FAIL;
4447
4448 /* Check if optional index offset is in the range for instruction
4449 variant. */
4450 if (imm_value < 0 || imm_value > imm_limit)
4451 {
4452 set_syntax_error (_("index offset out of range"));
4453 return PARSE_FAIL;
4454 }
4455
4456 *imm = imm_value;
4457
4458 return regno;
4459 }
4460
4461
4462 static int
4463 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4464 enum sme_hv_slice *slice_indicator,
4465 int *vector_select_register,
4466 int *imm,
4467 aarch64_opnd_qualifier_t *qualifier)
4468 {
4469 int regno;
4470
4471 if (!skip_past_char (str, '{'))
4472 {
4473 set_syntax_error (_("expected '{'"));
4474 return PARSE_FAIL;
4475 }
4476
4477 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4478 vector_select_register, imm,
4479 qualifier);
4480
4481 if (regno == PARSE_FAIL)
4482 return PARSE_FAIL;
4483
4484 if (!skip_past_char (str, '}'))
4485 {
4486 set_syntax_error (_("expected '}'"));
4487 return PARSE_FAIL;
4488 }
4489
4490 return regno;
4491 }
4492
4493 /* Parse list of up to eight 64-bit element tile names separated by commas in
4494 SME's ZERO instruction:
4495
4496 ZERO { <mask> }
4497
4498 Function returns <mask>:
4499
4500 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4501 */
4502 static int
4503 parse_sme_zero_mask(char **str)
4504 {
4505 char *q;
4506 int mask;
4507 aarch64_opnd_qualifier_t qualifier;
4508
4509 mask = 0x00;
4510 q = *str;
4511 do
4512 {
4513 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4514 if (reg)
4515 {
4516 int regno = reg->number;
4517 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4518 {
4519 /* { ZA0.B } is assembled as all-ones immediate. */
4520 mask = 0xff;
4521 }
4522 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4523 mask |= 0x55 << regno;
4524 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4525 mask |= 0x11 << regno;
4526 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4527 mask |= 0x01 << regno;
4528 else
4529 {
4530 set_syntax_error (_("wrong ZA tile element format"));
4531 return PARSE_FAIL;
4532 }
4533 continue;
4534 }
4535 else if (strncasecmp (q, "za", 2) == 0
4536 && !ISALNUM (q[2]))
4537 {
4538 /* { ZA } is assembled as all-ones immediate. */
4539 mask = 0xff;
4540 q += 2;
4541 continue;
4542 }
4543 else
4544 {
4545 set_syntax_error (_("wrong ZA tile element format"));
4546 return PARSE_FAIL;
4547 }
4548 }
4549 while (skip_past_char (&q, ','));
4550
4551 *str = q;
4552 return mask;
4553 }
4554
4555 /* Wraps in curly braces <mask> operand ZERO instruction:
4556
4557 ZERO { <mask> }
4558
4559 Function returns value of <mask> bit-field.
4560 */
4561 static int
4562 parse_sme_list_of_64bit_tiles (char **str)
4563 {
4564 int regno;
4565
4566 if (!skip_past_char (str, '{'))
4567 {
4568 set_syntax_error (_("expected '{'"));
4569 return PARSE_FAIL;
4570 }
4571
4572 /* Empty <mask> list is an all-zeros immediate. */
4573 if (!skip_past_char (str, '}'))
4574 {
4575 regno = parse_sme_zero_mask (str);
4576 if (regno == PARSE_FAIL)
4577 return PARSE_FAIL;
4578
4579 if (!skip_past_char (str, '}'))
4580 {
4581 set_syntax_error (_("expected '}'"));
4582 return PARSE_FAIL;
4583 }
4584 }
4585 else
4586 regno = 0x00;
4587
4588 return regno;
4589 }
4590
4591 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4592 Operand format:
4593
4594 ZA[<Wv>, <imm>]
4595 ZA[<Wv>, #<imm>]
4596
4597 Function returns <Wv> or PARSE_FAIL.
4598 */
4599 static int
4600 parse_sme_za_array (char **str, int *imm)
4601 {
4602 char *p, *q;
4603 int regno;
4604 int64_t imm_value;
4605
4606 p = q = *str;
4607 while (ISALPHA (*q))
4608 q++;
4609
4610 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4611 {
4612 set_syntax_error (_("expected ZA array"));
4613 return PARSE_FAIL;
4614 }
4615
4616 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4617 return PARSE_FAIL;
4618
4619 if (imm_value < 0 || imm_value > 15)
4620 {
4621 set_syntax_error (_("offset out of range"));
4622 return PARSE_FAIL;
4623 }
4624
4625 *imm = imm_value;
4626 *str = q;
4627 return regno;
4628 }
4629
4630 /* Parse streaming mode operand for SMSTART and SMSTOP.
4631
4632 {SM | ZA}
4633
4634 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4635 */
4636 static int
4637 parse_sme_sm_za (char **str)
4638 {
4639 char *p, *q;
4640
4641 p = q = *str;
4642 while (ISALPHA (*q))
4643 q++;
4644
4645 if ((q - p != 2)
4646 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4647 {
4648 set_syntax_error (_("expected SM or ZA operand"));
4649 return PARSE_FAIL;
4650 }
4651
4652 *str = q;
4653 return TOLOWER (p[0]);
4654 }
4655
4656 /* Parse the name of the source scalable predicate register, the index base
4657 register W12-W15 and the element index. Function performs element index
4658 limit checks as well as qualifier type checks.
4659
4660 <Pn>.<T>[<Wv>, <imm>]
4661 <Pn>.<T>[<Wv>, #<imm>]
4662
4663 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4664 <imm> to IMM.
4665 Function returns <Pn>, or PARSE_FAIL.
4666 */
4667 static int
4668 parse_sme_pred_reg_with_index(char **str,
4669 int *index_base_reg,
4670 int *imm,
4671 aarch64_opnd_qualifier_t *qualifier)
4672 {
4673 int regno;
4674 int64_t imm_limit;
4675 int64_t imm_value;
4676 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4677
4678 if (reg == NULL)
4679 return PARSE_FAIL;
4680 regno = reg->number;
4681
4682 switch (*qualifier)
4683 {
4684 case AARCH64_OPND_QLF_S_B:
4685 imm_limit = 15;
4686 break;
4687 case AARCH64_OPND_QLF_S_H:
4688 imm_limit = 7;
4689 break;
4690 case AARCH64_OPND_QLF_S_S:
4691 imm_limit = 3;
4692 break;
4693 case AARCH64_OPND_QLF_S_D:
4694 imm_limit = 1;
4695 break;
4696 default:
4697 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4698 return PARSE_FAIL;
4699 }
4700
4701 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4702 return PARSE_FAIL;
4703
4704 if (imm_value < 0 || imm_value > imm_limit)
4705 {
4706 set_syntax_error (_("element index out of range for given variant"));
4707 return PARSE_FAIL;
4708 }
4709
4710 *imm = imm_value;
4711
4712 return regno;
4713 }
4714
4715 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4716 Returns the encoding for the option, or PARSE_FAIL.
4717
4718 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4719 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4720
4721 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4722 field, otherwise as a system register.
4723 */
4724
4725 static int
4726 parse_sys_reg (char **str, htab_t sys_regs,
4727 int imple_defined_p, int pstatefield_p,
4728 uint32_t* flags)
4729 {
4730 char *p, *q;
4731 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4732 const aarch64_sys_reg *o;
4733 int value;
4734
4735 p = buf;
4736 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4737 if (p < buf + (sizeof (buf) - 1))
4738 *p++ = TOLOWER (*q);
4739 *p = '\0';
4740
4741 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4742 valid system register. This is enforced by construction of the hash
4743 table. */
4744 if (p - buf != q - *str)
4745 return PARSE_FAIL;
4746
4747 o = str_hash_find (sys_regs, buf);
4748 if (!o)
4749 {
4750 if (!imple_defined_p)
4751 return PARSE_FAIL;
4752 else
4753 {
4754 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4755 unsigned int op0, op1, cn, cm, op2;
4756
4757 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4758 != 5)
4759 return PARSE_FAIL;
4760 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4761 return PARSE_FAIL;
4762 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4763 if (flags)
4764 *flags = 0;
4765 }
4766 }
4767 else
4768 {
4769 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4770 as_bad (_("selected processor does not support PSTATE field "
4771 "name '%s'"), buf);
4772 if (!pstatefield_p
4773 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4774 o->value, o->flags, o->features))
4775 as_bad (_("selected processor does not support system register "
4776 "name '%s'"), buf);
4777 if (aarch64_sys_reg_deprecated_p (o->flags))
4778 as_warn (_("system register name '%s' is deprecated and may be "
4779 "removed in a future release"), buf);
4780 value = o->value;
4781 if (flags)
4782 *flags = o->flags;
4783 }
4784
4785 *str = q;
4786 return value;
4787 }
4788
4789 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4790 for the option, or NULL. */
4791
4792 static const aarch64_sys_ins_reg *
4793 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4794 {
4795 char *p, *q;
4796 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4797 const aarch64_sys_ins_reg *o;
4798
4799 p = buf;
4800 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4801 if (p < buf + (sizeof (buf) - 1))
4802 *p++ = TOLOWER (*q);
4803 *p = '\0';
4804
4805 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4806 valid system register. This is enforced by construction of the hash
4807 table. */
4808 if (p - buf != q - *str)
4809 return NULL;
4810
4811 o = str_hash_find (sys_ins_regs, buf);
4812 if (!o)
4813 return NULL;
4814
4815 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4816 o->name, o->value, o->flags, 0))
4817 as_bad (_("selected processor does not support system register "
4818 "name '%s'"), buf);
4819 if (aarch64_sys_reg_deprecated_p (o->flags))
4820 as_warn (_("system register name '%s' is deprecated and may be "
4821 "removed in a future release"), buf);
4822
4823 *str = q;
4824 return o;
4825 }
4826 \f
4827 #define po_char_or_fail(chr) do { \
4828 if (! skip_past_char (&str, chr)) \
4829 goto failure; \
4830 } while (0)
4831
4832 #define po_reg_or_fail(regtype) do { \
4833 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4834 if (val == PARSE_FAIL) \
4835 { \
4836 set_default_error (); \
4837 goto failure; \
4838 } \
4839 } while (0)
4840
4841 #define po_int_reg_or_fail(reg_type) do { \
4842 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4843 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4844 { \
4845 set_default_error (); \
4846 goto failure; \
4847 } \
4848 info->reg.regno = reg->number; \
4849 info->qualifier = qualifier; \
4850 } while (0)
4851
4852 #define po_imm_nc_or_fail() do { \
4853 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4854 goto failure; \
4855 } while (0)
4856
4857 #define po_imm_or_fail(min, max) do { \
4858 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4859 goto failure; \
4860 if (val < min || val > max) \
4861 { \
4862 set_fatal_syntax_error (_("immediate value out of range "\
4863 #min " to "#max)); \
4864 goto failure; \
4865 } \
4866 } while (0)
4867
4868 #define po_enum_or_fail(array) do { \
4869 if (!parse_enum_string (&str, &val, array, \
4870 ARRAY_SIZE (array), imm_reg_type)) \
4871 goto failure; \
4872 } while (0)
4873
4874 #define po_misc_or_fail(expr) do { \
4875 if (!expr) \
4876 goto failure; \
4877 } while (0)
4878 \f
4879 /* encode the 12-bit imm field of Add/sub immediate */
4880 static inline uint32_t
4881 encode_addsub_imm (uint32_t imm)
4882 {
4883 return imm << 10;
4884 }
4885
4886 /* encode the shift amount field of Add/sub immediate */
4887 static inline uint32_t
4888 encode_addsub_imm_shift_amount (uint32_t cnt)
4889 {
4890 return cnt << 22;
4891 }
4892
4893
4894 /* encode the imm field of Adr instruction */
4895 static inline uint32_t
4896 encode_adr_imm (uint32_t imm)
4897 {
4898 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4899 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4900 }
4901
4902 /* encode the immediate field of Move wide immediate */
4903 static inline uint32_t
4904 encode_movw_imm (uint32_t imm)
4905 {
4906 return imm << 5;
4907 }
4908
4909 /* encode the 26-bit offset of unconditional branch */
4910 static inline uint32_t
4911 encode_branch_ofs_26 (uint32_t ofs)
4912 {
4913 return ofs & ((1 << 26) - 1);
4914 }
4915
4916 /* encode the 19-bit offset of conditional branch and compare & branch */
4917 static inline uint32_t
4918 encode_cond_branch_ofs_19 (uint32_t ofs)
4919 {
4920 return (ofs & ((1 << 19) - 1)) << 5;
4921 }
4922
4923 /* encode the 19-bit offset of ld literal */
4924 static inline uint32_t
4925 encode_ld_lit_ofs_19 (uint32_t ofs)
4926 {
4927 return (ofs & ((1 << 19) - 1)) << 5;
4928 }
4929
4930 /* Encode the 14-bit offset of test & branch. */
4931 static inline uint32_t
4932 encode_tst_branch_ofs_14 (uint32_t ofs)
4933 {
4934 return (ofs & ((1 << 14) - 1)) << 5;
4935 }
4936
4937 /* Encode the 16-bit imm field of svc/hvc/smc. */
4938 static inline uint32_t
4939 encode_svc_imm (uint32_t imm)
4940 {
4941 return imm << 5;
4942 }
4943
4944 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4945 static inline uint32_t
4946 reencode_addsub_switch_add_sub (uint32_t opcode)
4947 {
4948 return opcode ^ (1 << 30);
4949 }
4950
4951 static inline uint32_t
4952 reencode_movzn_to_movz (uint32_t opcode)
4953 {
4954 return opcode | (1 << 30);
4955 }
4956
4957 static inline uint32_t
4958 reencode_movzn_to_movn (uint32_t opcode)
4959 {
4960 return opcode & ~(1 << 30);
4961 }
4962
4963 /* Overall per-instruction processing. */
4964
4965 /* We need to be able to fix up arbitrary expressions in some statements.
4966 This is so that we can handle symbols that are an arbitrary distance from
4967 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4968 which returns part of an address in a form which will be valid for
4969 a data instruction. We do this by pushing the expression into a symbol
4970 in the expr_section, and creating a fix for that. */
4971
4972 static fixS *
4973 fix_new_aarch64 (fragS * frag,
4974 int where,
4975 short int size,
4976 expressionS * exp,
4977 int pc_rel,
4978 int reloc)
4979 {
4980 fixS *new_fix;
4981
4982 switch (exp->X_op)
4983 {
4984 case O_constant:
4985 case O_symbol:
4986 case O_add:
4987 case O_subtract:
4988 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4989 break;
4990
4991 default:
4992 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4993 pc_rel, reloc);
4994 break;
4995 }
4996 return new_fix;
4997 }
4998 \f
4999 /* Diagnostics on operands errors. */
5000
5001 /* By default, output verbose error message.
5002 Disable the verbose error message by -mno-verbose-error. */
5003 static int verbose_error_p = 1;
5004
5005 #ifdef DEBUG_AARCH64
5006 /* N.B. this is only for the purpose of debugging. */
5007 const char* operand_mismatch_kind_names[] =
5008 {
5009 "AARCH64_OPDE_NIL",
5010 "AARCH64_OPDE_RECOVERABLE",
5011 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5012 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5013 "AARCH64_OPDE_SYNTAX_ERROR",
5014 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5015 "AARCH64_OPDE_INVALID_VARIANT",
5016 "AARCH64_OPDE_OUT_OF_RANGE",
5017 "AARCH64_OPDE_UNALIGNED",
5018 "AARCH64_OPDE_REG_LIST",
5019 "AARCH64_OPDE_OTHER_ERROR",
5020 };
5021 #endif /* DEBUG_AARCH64 */
5022
5023 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5024
5025 When multiple errors of different kinds are found in the same assembly
5026 line, only the error of the highest severity will be picked up for
5027 issuing the diagnostics. */
5028
5029 static inline bool
5030 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5031 enum aarch64_operand_error_kind rhs)
5032 {
5033 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5034 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5035 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5036 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5037 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5038 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5039 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5040 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5041 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5042 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5043 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5044 return lhs > rhs;
5045 }
5046
5047 /* Helper routine to get the mnemonic name from the assembly instruction
5048 line; should only be called for the diagnosis purpose, as there is
5049 string copy operation involved, which may affect the runtime
5050 performance if used in elsewhere. */
5051
5052 static const char*
5053 get_mnemonic_name (const char *str)
5054 {
5055 static char mnemonic[32];
5056 char *ptr;
5057
5058 /* Get the first 15 bytes and assume that the full name is included. */
5059 strncpy (mnemonic, str, 31);
5060 mnemonic[31] = '\0';
5061
5062 /* Scan up to the end of the mnemonic, which must end in white space,
5063 '.', or end of string. */
5064 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5065 ;
5066
5067 *ptr = '\0';
5068
5069 /* Append '...' to the truncated long name. */
5070 if (ptr - mnemonic == 31)
5071 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5072
5073 return mnemonic;
5074 }
5075
5076 static void
5077 reset_aarch64_instruction (aarch64_instruction *instruction)
5078 {
5079 memset (instruction, '\0', sizeof (aarch64_instruction));
5080 instruction->reloc.type = BFD_RELOC_UNUSED;
5081 }
5082
5083 /* Data structures storing one user error in the assembly code related to
5084 operands. */
5085
5086 struct operand_error_record
5087 {
5088 const aarch64_opcode *opcode;
5089 aarch64_operand_error detail;
5090 struct operand_error_record *next;
5091 };
5092
5093 typedef struct operand_error_record operand_error_record;
5094
5095 struct operand_errors
5096 {
5097 operand_error_record *head;
5098 operand_error_record *tail;
5099 };
5100
5101 typedef struct operand_errors operand_errors;
5102
5103 /* Top-level data structure reporting user errors for the current line of
5104 the assembly code.
5105 The way md_assemble works is that all opcodes sharing the same mnemonic
5106 name are iterated to find a match to the assembly line. In this data
5107 structure, each of the such opcodes will have one operand_error_record
5108 allocated and inserted. In other words, excessive errors related with
5109 a single opcode are disregarded. */
5110 operand_errors operand_error_report;
5111
5112 /* Free record nodes. */
5113 static operand_error_record *free_opnd_error_record_nodes = NULL;
5114
5115 /* Initialize the data structure that stores the operand mismatch
5116 information on assembling one line of the assembly code. */
5117 static void
5118 init_operand_error_report (void)
5119 {
5120 if (operand_error_report.head != NULL)
5121 {
5122 gas_assert (operand_error_report.tail != NULL);
5123 operand_error_report.tail->next = free_opnd_error_record_nodes;
5124 free_opnd_error_record_nodes = operand_error_report.head;
5125 operand_error_report.head = NULL;
5126 operand_error_report.tail = NULL;
5127 return;
5128 }
5129 gas_assert (operand_error_report.tail == NULL);
5130 }
5131
5132 /* Return TRUE if some operand error has been recorded during the
5133 parsing of the current assembly line using the opcode *OPCODE;
5134 otherwise return FALSE. */
5135 static inline bool
5136 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5137 {
5138 operand_error_record *record = operand_error_report.head;
5139 return record && record->opcode == opcode;
5140 }
5141
5142 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5143 OPCODE field is initialized with OPCODE.
5144 N.B. only one record for each opcode, i.e. the maximum of one error is
5145 recorded for each instruction template. */
5146
5147 static void
5148 add_operand_error_record (const operand_error_record* new_record)
5149 {
5150 const aarch64_opcode *opcode = new_record->opcode;
5151 operand_error_record* record = operand_error_report.head;
5152
5153 /* The record may have been created for this opcode. If not, we need
5154 to prepare one. */
5155 if (! opcode_has_operand_error_p (opcode))
5156 {
5157 /* Get one empty record. */
5158 if (free_opnd_error_record_nodes == NULL)
5159 {
5160 record = XNEW (operand_error_record);
5161 }
5162 else
5163 {
5164 record = free_opnd_error_record_nodes;
5165 free_opnd_error_record_nodes = record->next;
5166 }
5167 record->opcode = opcode;
5168 /* Insert at the head. */
5169 record->next = operand_error_report.head;
5170 operand_error_report.head = record;
5171 if (operand_error_report.tail == NULL)
5172 operand_error_report.tail = record;
5173 }
5174 else if (record->detail.kind != AARCH64_OPDE_NIL
5175 && record->detail.index <= new_record->detail.index
5176 && operand_error_higher_severity_p (record->detail.kind,
5177 new_record->detail.kind))
5178 {
5179 /* In the case of multiple errors found on operands related with a
5180 single opcode, only record the error of the leftmost operand and
5181 only if the error is of higher severity. */
5182 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5183 " the existing error %s on operand %d",
5184 operand_mismatch_kind_names[new_record->detail.kind],
5185 new_record->detail.index,
5186 operand_mismatch_kind_names[record->detail.kind],
5187 record->detail.index);
5188 return;
5189 }
5190
5191 record->detail = new_record->detail;
5192 }
5193
5194 static inline void
5195 record_operand_error_info (const aarch64_opcode *opcode,
5196 aarch64_operand_error *error_info)
5197 {
5198 operand_error_record record;
5199 record.opcode = opcode;
5200 record.detail = *error_info;
5201 add_operand_error_record (&record);
5202 }
5203
5204 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5205 error message *ERROR, for operand IDX (count from 0). */
5206
5207 static void
5208 record_operand_error (const aarch64_opcode *opcode, int idx,
5209 enum aarch64_operand_error_kind kind,
5210 const char* error)
5211 {
5212 aarch64_operand_error info;
5213 memset(&info, 0, sizeof (info));
5214 info.index = idx;
5215 info.kind = kind;
5216 info.error = error;
5217 info.non_fatal = false;
5218 record_operand_error_info (opcode, &info);
5219 }
5220
5221 static void
5222 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5223 enum aarch64_operand_error_kind kind,
5224 const char* error, const int *extra_data)
5225 {
5226 aarch64_operand_error info;
5227 info.index = idx;
5228 info.kind = kind;
5229 info.error = error;
5230 info.data[0].i = extra_data[0];
5231 info.data[1].i = extra_data[1];
5232 info.data[2].i = extra_data[2];
5233 info.non_fatal = false;
5234 record_operand_error_info (opcode, &info);
5235 }
5236
5237 static void
5238 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5239 const char* error, int lower_bound,
5240 int upper_bound)
5241 {
5242 int data[3] = {lower_bound, upper_bound, 0};
5243 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5244 error, data);
5245 }
5246
5247 /* Remove the operand error record for *OPCODE. */
5248 static void ATTRIBUTE_UNUSED
5249 remove_operand_error_record (const aarch64_opcode *opcode)
5250 {
5251 if (opcode_has_operand_error_p (opcode))
5252 {
5253 operand_error_record* record = operand_error_report.head;
5254 gas_assert (record != NULL && operand_error_report.tail != NULL);
5255 operand_error_report.head = record->next;
5256 record->next = free_opnd_error_record_nodes;
5257 free_opnd_error_record_nodes = record;
5258 if (operand_error_report.head == NULL)
5259 {
5260 gas_assert (operand_error_report.tail == record);
5261 operand_error_report.tail = NULL;
5262 }
5263 }
5264 }
5265
5266 /* Given the instruction in *INSTR, return the index of the best matched
5267 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5268
5269 Return -1 if there is no qualifier sequence; return the first match
5270 if there is multiple matches found. */
5271
5272 static int
5273 find_best_match (const aarch64_inst *instr,
5274 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5275 {
5276 int i, num_opnds, max_num_matched, idx;
5277
5278 num_opnds = aarch64_num_of_operands (instr->opcode);
5279 if (num_opnds == 0)
5280 {
5281 DEBUG_TRACE ("no operand");
5282 return -1;
5283 }
5284
5285 max_num_matched = 0;
5286 idx = 0;
5287
5288 /* For each pattern. */
5289 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5290 {
5291 int j, num_matched;
5292 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5293
5294 /* Most opcodes has much fewer patterns in the list. */
5295 if (empty_qualifier_sequence_p (qualifiers))
5296 {
5297 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5298 break;
5299 }
5300
5301 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5302 if (*qualifiers == instr->operands[j].qualifier)
5303 ++num_matched;
5304
5305 if (num_matched > max_num_matched)
5306 {
5307 max_num_matched = num_matched;
5308 idx = i;
5309 }
5310 }
5311
5312 DEBUG_TRACE ("return with %d", idx);
5313 return idx;
5314 }
5315
5316 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5317 corresponding operands in *INSTR. */
5318
5319 static inline void
5320 assign_qualifier_sequence (aarch64_inst *instr,
5321 const aarch64_opnd_qualifier_t *qualifiers)
5322 {
5323 int i = 0;
5324 int num_opnds = aarch64_num_of_operands (instr->opcode);
5325 gas_assert (num_opnds);
5326 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5327 instr->operands[i].qualifier = *qualifiers;
5328 }
5329
5330 /* Callback used by aarch64_print_operand to apply STYLE to the
5331 disassembler output created from FMT and ARGS. The STYLER object holds
5332 any required state. Must return a pointer to a string (created from FMT
5333 and ARGS) that will continue to be valid until the complete disassembled
5334 instruction has been printed.
5335
5336 We don't currently add any styling to the output of the disassembler as
5337 used within assembler error messages, and so STYLE is ignored here. A
5338 new string is allocated on the obstack help within STYLER and returned
5339 to the caller. */
5340
5341 static const char *aarch64_apply_style
5342 (struct aarch64_styler *styler,
5343 enum disassembler_style style ATTRIBUTE_UNUSED,
5344 const char *fmt, va_list args)
5345 {
5346 int res;
5347 char *ptr;
5348 struct obstack *stack = (struct obstack *) styler->state;
5349 va_list ap;
5350
5351 /* Calculate the required space. */
5352 va_copy (ap, args);
5353 res = vsnprintf (NULL, 0, fmt, ap);
5354 va_end (ap);
5355 gas_assert (res >= 0);
5356
5357 /* Allocate space on the obstack and format the result. */
5358 ptr = (char *) obstack_alloc (stack, res + 1);
5359 res = vsnprintf (ptr, (res + 1), fmt, args);
5360 gas_assert (res >= 0);
5361
5362 return ptr;
5363 }
5364
5365 /* Print operands for the diagnosis purpose. */
5366
5367 static void
5368 print_operands (char *buf, const aarch64_opcode *opcode,
5369 const aarch64_opnd_info *opnds)
5370 {
5371 int i;
5372 struct aarch64_styler styler;
5373 struct obstack content;
5374 obstack_init (&content);
5375
5376 styler.apply_style = aarch64_apply_style;
5377 styler.state = (void *) &content;
5378
5379 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5380 {
5381 char str[128];
5382 char cmt[128];
5383
5384 /* We regard the opcode operand info more, however we also look into
5385 the inst->operands to support the disassembling of the optional
5386 operand.
5387 The two operand code should be the same in all cases, apart from
5388 when the operand can be optional. */
5389 if (opcode->operands[i] == AARCH64_OPND_NIL
5390 || opnds[i].type == AARCH64_OPND_NIL)
5391 break;
5392
5393 /* Generate the operand string in STR. */
5394 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5395 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5396
5397 /* Delimiter. */
5398 if (str[0] != '\0')
5399 strcat (buf, i == 0 ? " " : ", ");
5400
5401 /* Append the operand string. */
5402 strcat (buf, str);
5403
5404 /* Append a comment. This works because only the last operand ever
5405 adds a comment. If that ever changes then we'll need to be
5406 smarter here. */
5407 if (cmt[0] != '\0')
5408 {
5409 strcat (buf, "\t// ");
5410 strcat (buf, cmt);
5411 }
5412 }
5413
5414 obstack_free (&content, NULL);
5415 }
5416
5417 /* Send to stderr a string as information. */
5418
5419 static void
5420 output_info (const char *format, ...)
5421 {
5422 const char *file;
5423 unsigned int line;
5424 va_list args;
5425
5426 file = as_where (&line);
5427 if (file)
5428 {
5429 if (line != 0)
5430 fprintf (stderr, "%s:%u: ", file, line);
5431 else
5432 fprintf (stderr, "%s: ", file);
5433 }
5434 fprintf (stderr, _("Info: "));
5435 va_start (args, format);
5436 vfprintf (stderr, format, args);
5437 va_end (args);
5438 (void) putc ('\n', stderr);
5439 }
5440
5441 /* Output one operand error record. */
5442
5443 static void
5444 output_operand_error_record (const operand_error_record *record, char *str)
5445 {
5446 const aarch64_operand_error *detail = &record->detail;
5447 int idx = detail->index;
5448 const aarch64_opcode *opcode = record->opcode;
5449 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5450 : AARCH64_OPND_NIL);
5451
5452 typedef void (*handler_t)(const char *format, ...);
5453 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5454
5455 switch (detail->kind)
5456 {
5457 case AARCH64_OPDE_NIL:
5458 gas_assert (0);
5459 break;
5460
5461 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5462 handler (_("this `%s' should have an immediately preceding `%s'"
5463 " -- `%s'"),
5464 detail->data[0].s, detail->data[1].s, str);
5465 break;
5466
5467 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5468 handler (_("the preceding `%s' should be followed by `%s` rather"
5469 " than `%s` -- `%s'"),
5470 detail->data[1].s, detail->data[0].s, opcode->name, str);
5471 break;
5472
5473 case AARCH64_OPDE_SYNTAX_ERROR:
5474 case AARCH64_OPDE_RECOVERABLE:
5475 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5476 case AARCH64_OPDE_OTHER_ERROR:
5477 /* Use the prepared error message if there is, otherwise use the
5478 operand description string to describe the error. */
5479 if (detail->error != NULL)
5480 {
5481 if (idx < 0)
5482 handler (_("%s -- `%s'"), detail->error, str);
5483 else
5484 handler (_("%s at operand %d -- `%s'"),
5485 detail->error, idx + 1, str);
5486 }
5487 else
5488 {
5489 gas_assert (idx >= 0);
5490 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5491 aarch64_get_operand_desc (opd_code), str);
5492 }
5493 break;
5494
5495 case AARCH64_OPDE_INVALID_VARIANT:
5496 handler (_("operand mismatch -- `%s'"), str);
5497 if (verbose_error_p)
5498 {
5499 /* We will try to correct the erroneous instruction and also provide
5500 more information e.g. all other valid variants.
5501
5502 The string representation of the corrected instruction and other
5503 valid variants are generated by
5504
5505 1) obtaining the intermediate representation of the erroneous
5506 instruction;
5507 2) manipulating the IR, e.g. replacing the operand qualifier;
5508 3) printing out the instruction by calling the printer functions
5509 shared with the disassembler.
5510
5511 The limitation of this method is that the exact input assembly
5512 line cannot be accurately reproduced in some cases, for example an
5513 optional operand present in the actual assembly line will be
5514 omitted in the output; likewise for the optional syntax rules,
5515 e.g. the # before the immediate. Another limitation is that the
5516 assembly symbols and relocation operations in the assembly line
5517 currently cannot be printed out in the error report. Last but not
5518 least, when there is other error(s) co-exist with this error, the
5519 'corrected' instruction may be still incorrect, e.g. given
5520 'ldnp h0,h1,[x0,#6]!'
5521 this diagnosis will provide the version:
5522 'ldnp s0,s1,[x0,#6]!'
5523 which is still not right. */
5524 size_t len = strlen (get_mnemonic_name (str));
5525 int i, qlf_idx;
5526 bool result;
5527 char buf[2048];
5528 aarch64_inst *inst_base = &inst.base;
5529 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5530
5531 /* Init inst. */
5532 reset_aarch64_instruction (&inst);
5533 inst_base->opcode = opcode;
5534
5535 /* Reset the error report so that there is no side effect on the
5536 following operand parsing. */
5537 init_operand_error_report ();
5538
5539 /* Fill inst. */
5540 result = parse_operands (str + len, opcode)
5541 && programmer_friendly_fixup (&inst);
5542 gas_assert (result);
5543 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5544 NULL, NULL, insn_sequence);
5545 gas_assert (!result);
5546
5547 /* Find the most matched qualifier sequence. */
5548 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5549 gas_assert (qlf_idx > -1);
5550
5551 /* Assign the qualifiers. */
5552 assign_qualifier_sequence (inst_base,
5553 opcode->qualifiers_list[qlf_idx]);
5554
5555 /* Print the hint. */
5556 output_info (_(" did you mean this?"));
5557 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5558 print_operands (buf, opcode, inst_base->operands);
5559 output_info (_(" %s"), buf);
5560
5561 /* Print out other variant(s) if there is any. */
5562 if (qlf_idx != 0 ||
5563 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5564 output_info (_(" other valid variant(s):"));
5565
5566 /* For each pattern. */
5567 qualifiers_list = opcode->qualifiers_list;
5568 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5569 {
5570 /* Most opcodes has much fewer patterns in the list.
5571 First NIL qualifier indicates the end in the list. */
5572 if (empty_qualifier_sequence_p (*qualifiers_list))
5573 break;
5574
5575 if (i != qlf_idx)
5576 {
5577 /* Mnemonics name. */
5578 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5579
5580 /* Assign the qualifiers. */
5581 assign_qualifier_sequence (inst_base, *qualifiers_list);
5582
5583 /* Print instruction. */
5584 print_operands (buf, opcode, inst_base->operands);
5585
5586 output_info (_(" %s"), buf);
5587 }
5588 }
5589 }
5590 break;
5591
5592 case AARCH64_OPDE_UNTIED_IMMS:
5593 handler (_("operand %d must have the same immediate value "
5594 "as operand 1 -- `%s'"),
5595 detail->index + 1, str);
5596 break;
5597
5598 case AARCH64_OPDE_UNTIED_OPERAND:
5599 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5600 detail->index + 1, str);
5601 break;
5602
5603 case AARCH64_OPDE_OUT_OF_RANGE:
5604 if (detail->data[0].i != detail->data[1].i)
5605 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5606 detail->error ? detail->error : _("immediate value"),
5607 detail->data[0].i, detail->data[1].i, idx + 1, str);
5608 else
5609 handler (_("%s must be %d at operand %d -- `%s'"),
5610 detail->error ? detail->error : _("immediate value"),
5611 detail->data[0].i, idx + 1, str);
5612 break;
5613
5614 case AARCH64_OPDE_REG_LIST:
5615 if (detail->data[0].i == 1)
5616 handler (_("invalid number of registers in the list; "
5617 "only 1 register is expected at operand %d -- `%s'"),
5618 idx + 1, str);
5619 else
5620 handler (_("invalid number of registers in the list; "
5621 "%d registers are expected at operand %d -- `%s'"),
5622 detail->data[0].i, idx + 1, str);
5623 break;
5624
5625 case AARCH64_OPDE_UNALIGNED:
5626 handler (_("immediate value must be a multiple of "
5627 "%d at operand %d -- `%s'"),
5628 detail->data[0].i, idx + 1, str);
5629 break;
5630
5631 default:
5632 gas_assert (0);
5633 break;
5634 }
5635 }
5636
5637 /* Process and output the error message about the operand mismatching.
5638
5639 When this function is called, the operand error information had
5640 been collected for an assembly line and there will be multiple
5641 errors in the case of multiple instruction templates; output the
5642 error message that most closely describes the problem.
5643
5644 The errors to be printed can be filtered on printing all errors
5645 or only non-fatal errors. This distinction has to be made because
5646 the error buffer may already be filled with fatal errors we don't want to
5647 print due to the different instruction templates. */
5648
5649 static void
5650 output_operand_error_report (char *str, bool non_fatal_only)
5651 {
5652 int largest_error_pos;
5653 const char *msg = NULL;
5654 enum aarch64_operand_error_kind kind;
5655 operand_error_record *curr;
5656 operand_error_record *head = operand_error_report.head;
5657 operand_error_record *record = NULL;
5658
5659 /* No error to report. */
5660 if (head == NULL)
5661 return;
5662
5663 gas_assert (head != NULL && operand_error_report.tail != NULL);
5664
5665 /* Only one error. */
5666 if (head == operand_error_report.tail)
5667 {
5668 /* If the only error is a non-fatal one and we don't want to print it,
5669 just exit. */
5670 if (!non_fatal_only || head->detail.non_fatal)
5671 {
5672 DEBUG_TRACE ("single opcode entry with error kind: %s",
5673 operand_mismatch_kind_names[head->detail.kind]);
5674 output_operand_error_record (head, str);
5675 }
5676 return;
5677 }
5678
5679 /* Find the error kind of the highest severity. */
5680 DEBUG_TRACE ("multiple opcode entries with error kind");
5681 kind = AARCH64_OPDE_NIL;
5682 for (curr = head; curr != NULL; curr = curr->next)
5683 {
5684 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5685 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5686 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5687 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5688 kind = curr->detail.kind;
5689 }
5690
5691 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5692
5693 /* Pick up one of errors of KIND to report. */
5694 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5695 for (curr = head; curr != NULL; curr = curr->next)
5696 {
5697 /* If we don't want to print non-fatal errors then don't consider them
5698 at all. */
5699 if (curr->detail.kind != kind
5700 || (non_fatal_only && !curr->detail.non_fatal))
5701 continue;
5702 /* If there are multiple errors, pick up the one with the highest
5703 mismatching operand index. In the case of multiple errors with
5704 the equally highest operand index, pick up the first one or the
5705 first one with non-NULL error message. */
5706 if (curr->detail.index > largest_error_pos
5707 || (curr->detail.index == largest_error_pos && msg == NULL
5708 && curr->detail.error != NULL))
5709 {
5710 largest_error_pos = curr->detail.index;
5711 record = curr;
5712 msg = record->detail.error;
5713 }
5714 }
5715
5716 /* The way errors are collected in the back-end is a bit non-intuitive. But
5717 essentially, because each operand template is tried recursively you may
5718 always have errors collected from the previous tried OPND. These are
5719 usually skipped if there is one successful match. However now with the
5720 non-fatal errors we have to ignore those previously collected hard errors
5721 when we're only interested in printing the non-fatal ones. This condition
5722 prevents us from printing errors that are not appropriate, since we did
5723 match a condition, but it also has warnings that it wants to print. */
5724 if (non_fatal_only && !record)
5725 return;
5726
5727 gas_assert (largest_error_pos != -2 && record != NULL);
5728 DEBUG_TRACE ("Pick up error kind %s to report",
5729 operand_mismatch_kind_names[record->detail.kind]);
5730
5731 /* Output. */
5732 output_operand_error_record (record, str);
5733 }
5734 \f
5735 /* Write an AARCH64 instruction to buf - always little-endian. */
5736 static void
5737 put_aarch64_insn (char *buf, uint32_t insn)
5738 {
5739 unsigned char *where = (unsigned char *) buf;
5740 where[0] = insn;
5741 where[1] = insn >> 8;
5742 where[2] = insn >> 16;
5743 where[3] = insn >> 24;
5744 }
5745
5746 static uint32_t
5747 get_aarch64_insn (char *buf)
5748 {
5749 unsigned char *where = (unsigned char *) buf;
5750 uint32_t result;
5751 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5752 | ((uint32_t) where[3] << 24)));
5753 return result;
5754 }
5755
5756 static void
5757 output_inst (struct aarch64_inst *new_inst)
5758 {
5759 char *to = NULL;
5760
5761 to = frag_more (INSN_SIZE);
5762
5763 frag_now->tc_frag_data.recorded = 1;
5764
5765 put_aarch64_insn (to, inst.base.value);
5766
5767 if (inst.reloc.type != BFD_RELOC_UNUSED)
5768 {
5769 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5770 INSN_SIZE, &inst.reloc.exp,
5771 inst.reloc.pc_rel,
5772 inst.reloc.type);
5773 DEBUG_TRACE ("Prepared relocation fix up");
5774 /* Don't check the addend value against the instruction size,
5775 that's the job of our code in md_apply_fix(). */
5776 fixp->fx_no_overflow = 1;
5777 if (new_inst != NULL)
5778 fixp->tc_fix_data.inst = new_inst;
5779 if (aarch64_gas_internal_fixup_p ())
5780 {
5781 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5782 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5783 fixp->fx_addnumber = inst.reloc.flags;
5784 }
5785 }
5786
5787 dwarf2_emit_insn (INSN_SIZE);
5788 }
5789
5790 /* Link together opcodes of the same name. */
5791
5792 struct templates
5793 {
5794 const aarch64_opcode *opcode;
5795 struct templates *next;
5796 };
5797
5798 typedef struct templates templates;
5799
5800 static templates *
5801 lookup_mnemonic (const char *start, int len)
5802 {
5803 templates *templ = NULL;
5804
5805 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5806 return templ;
5807 }
5808
5809 /* Subroutine of md_assemble, responsible for looking up the primary
5810 opcode from the mnemonic the user wrote. BASE points to the beginning
5811 of the mnemonic, DOT points to the first '.' within the mnemonic
5812 (if any) and END points to the end of the mnemonic. */
5813
5814 static templates *
5815 opcode_lookup (char *base, char *dot, char *end)
5816 {
5817 const aarch64_cond *cond;
5818 char condname[16];
5819 int len;
5820
5821 if (dot == end)
5822 return 0;
5823
5824 inst.cond = COND_ALWAYS;
5825
5826 /* Handle a possible condition. */
5827 if (dot)
5828 {
5829 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5830 if (!cond)
5831 return 0;
5832 inst.cond = cond->value;
5833 len = dot - base;
5834 }
5835 else
5836 len = end - base;
5837
5838 if (inst.cond == COND_ALWAYS)
5839 {
5840 /* Look for unaffixed mnemonic. */
5841 return lookup_mnemonic (base, len);
5842 }
5843 else if (len <= 13)
5844 {
5845 /* append ".c" to mnemonic if conditional */
5846 memcpy (condname, base, len);
5847 memcpy (condname + len, ".c", 2);
5848 base = condname;
5849 len += 2;
5850 return lookup_mnemonic (base, len);
5851 }
5852
5853 return NULL;
5854 }
5855
5856 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5857 to a corresponding operand qualifier. */
5858
5859 static inline aarch64_opnd_qualifier_t
5860 vectype_to_qualifier (const struct vector_type_el *vectype)
5861 {
5862 /* Element size in bytes indexed by vector_el_type. */
5863 const unsigned char ele_size[5]
5864 = {1, 2, 4, 8, 16};
5865 const unsigned int ele_base [5] =
5866 {
5867 AARCH64_OPND_QLF_V_4B,
5868 AARCH64_OPND_QLF_V_2H,
5869 AARCH64_OPND_QLF_V_2S,
5870 AARCH64_OPND_QLF_V_1D,
5871 AARCH64_OPND_QLF_V_1Q
5872 };
5873
5874 if (!vectype->defined || vectype->type == NT_invtype)
5875 goto vectype_conversion_fail;
5876
5877 if (vectype->type == NT_zero)
5878 return AARCH64_OPND_QLF_P_Z;
5879 if (vectype->type == NT_merge)
5880 return AARCH64_OPND_QLF_P_M;
5881
5882 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5883
5884 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5885 {
5886 /* Special case S_4B. */
5887 if (vectype->type == NT_b && vectype->width == 4)
5888 return AARCH64_OPND_QLF_S_4B;
5889
5890 /* Special case S_2H. */
5891 if (vectype->type == NT_h && vectype->width == 2)
5892 return AARCH64_OPND_QLF_S_2H;
5893
5894 /* Vector element register. */
5895 return AARCH64_OPND_QLF_S_B + vectype->type;
5896 }
5897 else
5898 {
5899 /* Vector register. */
5900 int reg_size = ele_size[vectype->type] * vectype->width;
5901 unsigned offset;
5902 unsigned shift;
5903 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5904 goto vectype_conversion_fail;
5905
5906 /* The conversion is by calculating the offset from the base operand
5907 qualifier for the vector type. The operand qualifiers are regular
5908 enough that the offset can established by shifting the vector width by
5909 a vector-type dependent amount. */
5910 shift = 0;
5911 if (vectype->type == NT_b)
5912 shift = 3;
5913 else if (vectype->type == NT_h || vectype->type == NT_s)
5914 shift = 2;
5915 else if (vectype->type >= NT_d)
5916 shift = 1;
5917 else
5918 gas_assert (0);
5919
5920 offset = ele_base [vectype->type] + (vectype->width >> shift);
5921 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5922 && offset <= AARCH64_OPND_QLF_V_1Q);
5923 return offset;
5924 }
5925
5926 vectype_conversion_fail:
5927 first_error (_("bad vector arrangement type"));
5928 return AARCH64_OPND_QLF_NIL;
5929 }
5930
5931 /* Process an optional operand that is found omitted from the assembly line.
5932 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5933 instruction's opcode entry while IDX is the index of this omitted operand.
5934 */
5935
5936 static void
5937 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5938 int idx, aarch64_opnd_info *operand)
5939 {
5940 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5941 gas_assert (optional_operand_p (opcode, idx));
5942 gas_assert (!operand->present);
5943
5944 switch (type)
5945 {
5946 case AARCH64_OPND_Rd:
5947 case AARCH64_OPND_Rn:
5948 case AARCH64_OPND_Rm:
5949 case AARCH64_OPND_Rt:
5950 case AARCH64_OPND_Rt2:
5951 case AARCH64_OPND_Rt_LS64:
5952 case AARCH64_OPND_Rt_SP:
5953 case AARCH64_OPND_Rs:
5954 case AARCH64_OPND_Ra:
5955 case AARCH64_OPND_Rt_SYS:
5956 case AARCH64_OPND_Rd_SP:
5957 case AARCH64_OPND_Rn_SP:
5958 case AARCH64_OPND_Rm_SP:
5959 case AARCH64_OPND_Fd:
5960 case AARCH64_OPND_Fn:
5961 case AARCH64_OPND_Fm:
5962 case AARCH64_OPND_Fa:
5963 case AARCH64_OPND_Ft:
5964 case AARCH64_OPND_Ft2:
5965 case AARCH64_OPND_Sd:
5966 case AARCH64_OPND_Sn:
5967 case AARCH64_OPND_Sm:
5968 case AARCH64_OPND_Va:
5969 case AARCH64_OPND_Vd:
5970 case AARCH64_OPND_Vn:
5971 case AARCH64_OPND_Vm:
5972 case AARCH64_OPND_VdD1:
5973 case AARCH64_OPND_VnD1:
5974 operand->reg.regno = default_value;
5975 break;
5976
5977 case AARCH64_OPND_Ed:
5978 case AARCH64_OPND_En:
5979 case AARCH64_OPND_Em:
5980 case AARCH64_OPND_Em16:
5981 case AARCH64_OPND_SM3_IMM2:
5982 operand->reglane.regno = default_value;
5983 break;
5984
5985 case AARCH64_OPND_IDX:
5986 case AARCH64_OPND_BIT_NUM:
5987 case AARCH64_OPND_IMMR:
5988 case AARCH64_OPND_IMMS:
5989 case AARCH64_OPND_SHLL_IMM:
5990 case AARCH64_OPND_IMM_VLSL:
5991 case AARCH64_OPND_IMM_VLSR:
5992 case AARCH64_OPND_CCMP_IMM:
5993 case AARCH64_OPND_FBITS:
5994 case AARCH64_OPND_UIMM4:
5995 case AARCH64_OPND_UIMM3_OP1:
5996 case AARCH64_OPND_UIMM3_OP2:
5997 case AARCH64_OPND_IMM:
5998 case AARCH64_OPND_IMM_2:
5999 case AARCH64_OPND_WIDTH:
6000 case AARCH64_OPND_UIMM7:
6001 case AARCH64_OPND_NZCV:
6002 case AARCH64_OPND_SVE_PATTERN:
6003 case AARCH64_OPND_SVE_PRFOP:
6004 operand->imm.value = default_value;
6005 break;
6006
6007 case AARCH64_OPND_SVE_PATTERN_SCALED:
6008 operand->imm.value = default_value;
6009 operand->shifter.kind = AARCH64_MOD_MUL;
6010 operand->shifter.amount = 1;
6011 break;
6012
6013 case AARCH64_OPND_EXCEPTION:
6014 inst.reloc.type = BFD_RELOC_UNUSED;
6015 break;
6016
6017 case AARCH64_OPND_BARRIER_ISB:
6018 operand->barrier = aarch64_barrier_options + default_value;
6019 break;
6020
6021 case AARCH64_OPND_BTI_TARGET:
6022 operand->hint_option = aarch64_hint_options + default_value;
6023 break;
6024
6025 default:
6026 break;
6027 }
6028 }
6029
6030 /* Process the relocation type for move wide instructions.
6031 Return TRUE on success; otherwise return FALSE. */
6032
6033 static bool
6034 process_movw_reloc_info (void)
6035 {
6036 int is32;
6037 unsigned shift;
6038
6039 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6040
6041 if (inst.base.opcode->op == OP_MOVK)
6042 switch (inst.reloc.type)
6043 {
6044 case BFD_RELOC_AARCH64_MOVW_G0_S:
6045 case BFD_RELOC_AARCH64_MOVW_G1_S:
6046 case BFD_RELOC_AARCH64_MOVW_G2_S:
6047 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6048 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6049 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6050 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6051 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6052 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6053 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6054 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6055 set_syntax_error
6056 (_("the specified relocation type is not allowed for MOVK"));
6057 return false;
6058 default:
6059 break;
6060 }
6061
6062 switch (inst.reloc.type)
6063 {
6064 case BFD_RELOC_AARCH64_MOVW_G0:
6065 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6066 case BFD_RELOC_AARCH64_MOVW_G0_S:
6067 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6068 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6069 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6070 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6071 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6072 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6073 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6074 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6075 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6076 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6077 shift = 0;
6078 break;
6079 case BFD_RELOC_AARCH64_MOVW_G1:
6080 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6081 case BFD_RELOC_AARCH64_MOVW_G1_S:
6082 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6083 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6084 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6085 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6086 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6087 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6088 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6089 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6090 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6091 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6092 shift = 16;
6093 break;
6094 case BFD_RELOC_AARCH64_MOVW_G2:
6095 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6096 case BFD_RELOC_AARCH64_MOVW_G2_S:
6097 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6098 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6099 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6100 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6101 if (is32)
6102 {
6103 set_fatal_syntax_error
6104 (_("the specified relocation type is not allowed for 32-bit "
6105 "register"));
6106 return false;
6107 }
6108 shift = 32;
6109 break;
6110 case BFD_RELOC_AARCH64_MOVW_G3:
6111 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6112 if (is32)
6113 {
6114 set_fatal_syntax_error
6115 (_("the specified relocation type is not allowed for 32-bit "
6116 "register"));
6117 return false;
6118 }
6119 shift = 48;
6120 break;
6121 default:
6122 /* More cases should be added when more MOVW-related relocation types
6123 are supported in GAS. */
6124 gas_assert (aarch64_gas_internal_fixup_p ());
6125 /* The shift amount should have already been set by the parser. */
6126 return true;
6127 }
6128 inst.base.operands[1].shifter.amount = shift;
6129 return true;
6130 }
6131
6132 /* A primitive log calculator. */
6133
6134 static inline unsigned int
6135 get_logsz (unsigned int size)
6136 {
6137 const unsigned char ls[16] =
6138 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6139 if (size > 16)
6140 {
6141 gas_assert (0);
6142 return -1;
6143 }
6144 gas_assert (ls[size - 1] != (unsigned char)-1);
6145 return ls[size - 1];
6146 }
6147
6148 /* Determine and return the real reloc type code for an instruction
6149 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6150
6151 static inline bfd_reloc_code_real_type
6152 ldst_lo12_determine_real_reloc_type (void)
6153 {
6154 unsigned logsz, max_logsz;
6155 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6156 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6157
6158 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6159 {
6160 BFD_RELOC_AARCH64_LDST8_LO12,
6161 BFD_RELOC_AARCH64_LDST16_LO12,
6162 BFD_RELOC_AARCH64_LDST32_LO12,
6163 BFD_RELOC_AARCH64_LDST64_LO12,
6164 BFD_RELOC_AARCH64_LDST128_LO12
6165 },
6166 {
6167 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6168 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6169 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6170 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6171 BFD_RELOC_AARCH64_NONE
6172 },
6173 {
6174 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6175 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6176 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6177 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6178 BFD_RELOC_AARCH64_NONE
6179 },
6180 {
6181 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6182 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6183 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6184 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6185 BFD_RELOC_AARCH64_NONE
6186 },
6187 {
6188 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6189 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6190 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6191 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6192 BFD_RELOC_AARCH64_NONE
6193 }
6194 };
6195
6196 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6197 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6198 || (inst.reloc.type
6199 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6200 || (inst.reloc.type
6201 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6202 || (inst.reloc.type
6203 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6204 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6205
6206 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6207 opd1_qlf =
6208 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6209 1, opd0_qlf, 0);
6210 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6211
6212 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6213
6214 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6215 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6216 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6217 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6218 max_logsz = 3;
6219 else
6220 max_logsz = 4;
6221
6222 if (logsz > max_logsz)
6223 {
6224 /* SEE PR 27904 for an example of this. */
6225 set_fatal_syntax_error
6226 (_("relocation qualifier does not match instruction size"));
6227 return BFD_RELOC_AARCH64_NONE;
6228 }
6229
6230 /* In reloc.c, these pseudo relocation types should be defined in similar
6231 order as above reloc_ldst_lo12 array. Because the array index calculation
6232 below relies on this. */
6233 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6234 }
6235
6236 /* Check whether a register list REGINFO is valid. The registers must be
6237 numbered in increasing order (modulo 32), in increments of one or two.
6238
6239 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6240 increments of two.
6241
6242 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6243
6244 static bool
6245 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6246 {
6247 uint32_t i, nb_regs, prev_regno, incr;
6248
6249 nb_regs = 1 + (reginfo & 0x3);
6250 reginfo >>= 2;
6251 prev_regno = reginfo & 0x1f;
6252 incr = accept_alternate ? 2 : 1;
6253
6254 for (i = 1; i < nb_regs; ++i)
6255 {
6256 uint32_t curr_regno;
6257 reginfo >>= 5;
6258 curr_regno = reginfo & 0x1f;
6259 if (curr_regno != ((prev_regno + incr) & 0x1f))
6260 return false;
6261 prev_regno = curr_regno;
6262 }
6263
6264 return true;
6265 }
6266
6267 /* Generic instruction operand parser. This does no encoding and no
6268 semantic validation; it merely squirrels values away in the inst
6269 structure. Returns TRUE or FALSE depending on whether the
6270 specified grammar matched. */
6271
6272 static bool
6273 parse_operands (char *str, const aarch64_opcode *opcode)
6274 {
6275 int i;
6276 char *backtrack_pos = 0;
6277 const enum aarch64_opnd *operands = opcode->operands;
6278 aarch64_reg_type imm_reg_type;
6279
6280 clear_error ();
6281 skip_whitespace (str);
6282
6283 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6284 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6285 else
6286 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6287
6288 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6289 {
6290 int64_t val;
6291 const reg_entry *reg;
6292 int comma_skipped_p = 0;
6293 aarch64_reg_type rtype;
6294 struct vector_type_el vectype;
6295 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6296 aarch64_opnd_info *info = &inst.base.operands[i];
6297 aarch64_reg_type reg_type;
6298
6299 DEBUG_TRACE ("parse operand %d", i);
6300
6301 /* Assign the operand code. */
6302 info->type = operands[i];
6303
6304 if (optional_operand_p (opcode, i))
6305 {
6306 /* Remember where we are in case we need to backtrack. */
6307 gas_assert (!backtrack_pos);
6308 backtrack_pos = str;
6309 }
6310
6311 /* Expect comma between operands; the backtrack mechanism will take
6312 care of cases of omitted optional operand. */
6313 if (i > 0 && ! skip_past_char (&str, ','))
6314 {
6315 set_syntax_error (_("comma expected between operands"));
6316 goto failure;
6317 }
6318 else
6319 comma_skipped_p = 1;
6320
6321 switch (operands[i])
6322 {
6323 case AARCH64_OPND_Rd:
6324 case AARCH64_OPND_Rn:
6325 case AARCH64_OPND_Rm:
6326 case AARCH64_OPND_Rt:
6327 case AARCH64_OPND_Rt2:
6328 case AARCH64_OPND_Rs:
6329 case AARCH64_OPND_Ra:
6330 case AARCH64_OPND_Rt_LS64:
6331 case AARCH64_OPND_Rt_SYS:
6332 case AARCH64_OPND_PAIRREG:
6333 case AARCH64_OPND_SVE_Rm:
6334 po_int_reg_or_fail (REG_TYPE_R_Z);
6335
6336 /* In LS64 load/store instructions Rt register number must be even
6337 and <=22. */
6338 if (operands[i] == AARCH64_OPND_Rt_LS64)
6339 {
6340 /* We've already checked if this is valid register.
6341 This will check if register number (Rt) is not undefined for LS64
6342 instructions:
6343 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6344 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6345 {
6346 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6347 goto failure;
6348 }
6349 }
6350 break;
6351
6352 case AARCH64_OPND_Rd_SP:
6353 case AARCH64_OPND_Rn_SP:
6354 case AARCH64_OPND_Rt_SP:
6355 case AARCH64_OPND_SVE_Rn_SP:
6356 case AARCH64_OPND_Rm_SP:
6357 po_int_reg_or_fail (REG_TYPE_R_SP);
6358 break;
6359
6360 case AARCH64_OPND_Rm_EXT:
6361 case AARCH64_OPND_Rm_SFT:
6362 po_misc_or_fail (parse_shifter_operand
6363 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6364 ? SHIFTED_ARITH_IMM
6365 : SHIFTED_LOGIC_IMM)));
6366 if (!info->shifter.operator_present)
6367 {
6368 /* Default to LSL if not present. Libopcodes prefers shifter
6369 kind to be explicit. */
6370 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6371 info->shifter.kind = AARCH64_MOD_LSL;
6372 /* For Rm_EXT, libopcodes will carry out further check on whether
6373 or not stack pointer is used in the instruction (Recall that
6374 "the extend operator is not optional unless at least one of
6375 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6376 }
6377 break;
6378
6379 case AARCH64_OPND_Fd:
6380 case AARCH64_OPND_Fn:
6381 case AARCH64_OPND_Fm:
6382 case AARCH64_OPND_Fa:
6383 case AARCH64_OPND_Ft:
6384 case AARCH64_OPND_Ft2:
6385 case AARCH64_OPND_Sd:
6386 case AARCH64_OPND_Sn:
6387 case AARCH64_OPND_Sm:
6388 case AARCH64_OPND_SVE_VZn:
6389 case AARCH64_OPND_SVE_Vd:
6390 case AARCH64_OPND_SVE_Vm:
6391 case AARCH64_OPND_SVE_Vn:
6392 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6393 if (val == PARSE_FAIL)
6394 {
6395 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6396 goto failure;
6397 }
6398 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6399
6400 info->reg.regno = val;
6401 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6402 break;
6403
6404 case AARCH64_OPND_SVE_Pd:
6405 case AARCH64_OPND_SVE_Pg3:
6406 case AARCH64_OPND_SVE_Pg4_5:
6407 case AARCH64_OPND_SVE_Pg4_10:
6408 case AARCH64_OPND_SVE_Pg4_16:
6409 case AARCH64_OPND_SVE_Pm:
6410 case AARCH64_OPND_SVE_Pn:
6411 case AARCH64_OPND_SVE_Pt:
6412 case AARCH64_OPND_SME_Pm:
6413 reg_type = REG_TYPE_PN;
6414 goto vector_reg;
6415
6416 case AARCH64_OPND_SVE_Za_5:
6417 case AARCH64_OPND_SVE_Za_16:
6418 case AARCH64_OPND_SVE_Zd:
6419 case AARCH64_OPND_SVE_Zm_5:
6420 case AARCH64_OPND_SVE_Zm_16:
6421 case AARCH64_OPND_SVE_Zn:
6422 case AARCH64_OPND_SVE_Zt:
6423 reg_type = REG_TYPE_ZN;
6424 goto vector_reg;
6425
6426 case AARCH64_OPND_Va:
6427 case AARCH64_OPND_Vd:
6428 case AARCH64_OPND_Vn:
6429 case AARCH64_OPND_Vm:
6430 reg_type = REG_TYPE_VN;
6431 vector_reg:
6432 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6433 if (val == PARSE_FAIL)
6434 {
6435 first_error (_(get_reg_expected_msg (reg_type)));
6436 goto failure;
6437 }
6438 if (vectype.defined & NTA_HASINDEX)
6439 goto failure;
6440
6441 info->reg.regno = val;
6442 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6443 && vectype.type == NT_invtype)
6444 /* Unqualified Pn and Zn registers are allowed in certain
6445 contexts. Rely on F_STRICT qualifier checking to catch
6446 invalid uses. */
6447 info->qualifier = AARCH64_OPND_QLF_NIL;
6448 else
6449 {
6450 info->qualifier = vectype_to_qualifier (&vectype);
6451 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6452 goto failure;
6453 }
6454 break;
6455
6456 case AARCH64_OPND_VdD1:
6457 case AARCH64_OPND_VnD1:
6458 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6459 if (val == PARSE_FAIL)
6460 {
6461 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6462 goto failure;
6463 }
6464 if (vectype.type != NT_d || vectype.index != 1)
6465 {
6466 set_fatal_syntax_error
6467 (_("the top half of a 128-bit FP/SIMD register is expected"));
6468 goto failure;
6469 }
6470 info->reg.regno = val;
6471 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6472 here; it is correct for the purpose of encoding/decoding since
6473 only the register number is explicitly encoded in the related
6474 instructions, although this appears a bit hacky. */
6475 info->qualifier = AARCH64_OPND_QLF_S_D;
6476 break;
6477
6478 case AARCH64_OPND_SVE_Zm3_INDEX:
6479 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6480 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6481 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6482 case AARCH64_OPND_SVE_Zm4_INDEX:
6483 case AARCH64_OPND_SVE_Zn_INDEX:
6484 reg_type = REG_TYPE_ZN;
6485 goto vector_reg_index;
6486
6487 case AARCH64_OPND_Ed:
6488 case AARCH64_OPND_En:
6489 case AARCH64_OPND_Em:
6490 case AARCH64_OPND_Em16:
6491 case AARCH64_OPND_SM3_IMM2:
6492 reg_type = REG_TYPE_VN;
6493 vector_reg_index:
6494 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6495 if (val == PARSE_FAIL)
6496 {
6497 first_error (_(get_reg_expected_msg (reg_type)));
6498 goto failure;
6499 }
6500 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6501 goto failure;
6502
6503 info->reglane.regno = val;
6504 info->reglane.index = vectype.index;
6505 info->qualifier = vectype_to_qualifier (&vectype);
6506 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6507 goto failure;
6508 break;
6509
6510 case AARCH64_OPND_SVE_ZnxN:
6511 case AARCH64_OPND_SVE_ZtxN:
6512 reg_type = REG_TYPE_ZN;
6513 goto vector_reg_list;
6514
6515 case AARCH64_OPND_LVn:
6516 case AARCH64_OPND_LVt:
6517 case AARCH64_OPND_LVt_AL:
6518 case AARCH64_OPND_LEt:
6519 reg_type = REG_TYPE_VN;
6520 vector_reg_list:
6521 if (reg_type == REG_TYPE_ZN
6522 && get_opcode_dependent_value (opcode) == 1
6523 && *str != '{')
6524 {
6525 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6526 if (val == PARSE_FAIL)
6527 {
6528 first_error (_(get_reg_expected_msg (reg_type)));
6529 goto failure;
6530 }
6531 info->reglist.first_regno = val;
6532 info->reglist.num_regs = 1;
6533 }
6534 else
6535 {
6536 val = parse_vector_reg_list (&str, reg_type, &vectype);
6537 if (val == PARSE_FAIL)
6538 goto failure;
6539
6540 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6541 {
6542 set_fatal_syntax_error (_("invalid register list"));
6543 goto failure;
6544 }
6545
6546 if (vectype.width != 0 && *str != ',')
6547 {
6548 set_fatal_syntax_error
6549 (_("expected element type rather than vector type"));
6550 goto failure;
6551 }
6552
6553 info->reglist.first_regno = (val >> 2) & 0x1f;
6554 info->reglist.num_regs = (val & 0x3) + 1;
6555 }
6556 if (operands[i] == AARCH64_OPND_LEt)
6557 {
6558 if (!(vectype.defined & NTA_HASINDEX))
6559 goto failure;
6560 info->reglist.has_index = 1;
6561 info->reglist.index = vectype.index;
6562 }
6563 else
6564 {
6565 if (vectype.defined & NTA_HASINDEX)
6566 goto failure;
6567 if (!(vectype.defined & NTA_HASTYPE))
6568 {
6569 if (reg_type == REG_TYPE_ZN)
6570 set_fatal_syntax_error (_("missing type suffix"));
6571 goto failure;
6572 }
6573 }
6574 info->qualifier = vectype_to_qualifier (&vectype);
6575 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6576 goto failure;
6577 break;
6578
6579 case AARCH64_OPND_CRn:
6580 case AARCH64_OPND_CRm:
6581 {
6582 char prefix = *(str++);
6583 if (prefix != 'c' && prefix != 'C')
6584 goto failure;
6585
6586 po_imm_nc_or_fail ();
6587 if (val > 15)
6588 {
6589 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6590 goto failure;
6591 }
6592 info->qualifier = AARCH64_OPND_QLF_CR;
6593 info->imm.value = val;
6594 break;
6595 }
6596
6597 case AARCH64_OPND_SHLL_IMM:
6598 case AARCH64_OPND_IMM_VLSR:
6599 po_imm_or_fail (1, 64);
6600 info->imm.value = val;
6601 break;
6602
6603 case AARCH64_OPND_CCMP_IMM:
6604 case AARCH64_OPND_SIMM5:
6605 case AARCH64_OPND_FBITS:
6606 case AARCH64_OPND_TME_UIMM16:
6607 case AARCH64_OPND_UIMM4:
6608 case AARCH64_OPND_UIMM4_ADDG:
6609 case AARCH64_OPND_UIMM10:
6610 case AARCH64_OPND_UIMM3_OP1:
6611 case AARCH64_OPND_UIMM3_OP2:
6612 case AARCH64_OPND_IMM_VLSL:
6613 case AARCH64_OPND_IMM:
6614 case AARCH64_OPND_IMM_2:
6615 case AARCH64_OPND_WIDTH:
6616 case AARCH64_OPND_SVE_INV_LIMM:
6617 case AARCH64_OPND_SVE_LIMM:
6618 case AARCH64_OPND_SVE_LIMM_MOV:
6619 case AARCH64_OPND_SVE_SHLIMM_PRED:
6620 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6621 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6622 case AARCH64_OPND_SVE_SHRIMM_PRED:
6623 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6624 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6625 case AARCH64_OPND_SVE_SIMM5:
6626 case AARCH64_OPND_SVE_SIMM5B:
6627 case AARCH64_OPND_SVE_SIMM6:
6628 case AARCH64_OPND_SVE_SIMM8:
6629 case AARCH64_OPND_SVE_UIMM3:
6630 case AARCH64_OPND_SVE_UIMM7:
6631 case AARCH64_OPND_SVE_UIMM8:
6632 case AARCH64_OPND_SVE_UIMM8_53:
6633 case AARCH64_OPND_IMM_ROT1:
6634 case AARCH64_OPND_IMM_ROT2:
6635 case AARCH64_OPND_IMM_ROT3:
6636 case AARCH64_OPND_SVE_IMM_ROT1:
6637 case AARCH64_OPND_SVE_IMM_ROT2:
6638 case AARCH64_OPND_SVE_IMM_ROT3:
6639 po_imm_nc_or_fail ();
6640 info->imm.value = val;
6641 break;
6642
6643 case AARCH64_OPND_SVE_AIMM:
6644 case AARCH64_OPND_SVE_ASIMM:
6645 po_imm_nc_or_fail ();
6646 info->imm.value = val;
6647 skip_whitespace (str);
6648 if (skip_past_comma (&str))
6649 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6650 else
6651 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6652 break;
6653
6654 case AARCH64_OPND_SVE_PATTERN:
6655 po_enum_or_fail (aarch64_sve_pattern_array);
6656 info->imm.value = val;
6657 break;
6658
6659 case AARCH64_OPND_SVE_PATTERN_SCALED:
6660 po_enum_or_fail (aarch64_sve_pattern_array);
6661 info->imm.value = val;
6662 if (skip_past_comma (&str)
6663 && !parse_shift (&str, info, SHIFTED_MUL))
6664 goto failure;
6665 if (!info->shifter.operator_present)
6666 {
6667 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6668 info->shifter.kind = AARCH64_MOD_MUL;
6669 info->shifter.amount = 1;
6670 }
6671 break;
6672
6673 case AARCH64_OPND_SVE_PRFOP:
6674 po_enum_or_fail (aarch64_sve_prfop_array);
6675 info->imm.value = val;
6676 break;
6677
6678 case AARCH64_OPND_UIMM7:
6679 po_imm_or_fail (0, 127);
6680 info->imm.value = val;
6681 break;
6682
6683 case AARCH64_OPND_IDX:
6684 case AARCH64_OPND_MASK:
6685 case AARCH64_OPND_BIT_NUM:
6686 case AARCH64_OPND_IMMR:
6687 case AARCH64_OPND_IMMS:
6688 po_imm_or_fail (0, 63);
6689 info->imm.value = val;
6690 break;
6691
6692 case AARCH64_OPND_IMM0:
6693 po_imm_nc_or_fail ();
6694 if (val != 0)
6695 {
6696 set_fatal_syntax_error (_("immediate zero expected"));
6697 goto failure;
6698 }
6699 info->imm.value = 0;
6700 break;
6701
6702 case AARCH64_OPND_FPIMM0:
6703 {
6704 int qfloat;
6705 bool res1 = false, res2 = false;
6706 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6707 it is probably not worth the effort to support it. */
6708 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6709 imm_reg_type))
6710 && (error_p ()
6711 || !(res2 = parse_constant_immediate (&str, &val,
6712 imm_reg_type))))
6713 goto failure;
6714 if ((res1 && qfloat == 0) || (res2 && val == 0))
6715 {
6716 info->imm.value = 0;
6717 info->imm.is_fp = 1;
6718 break;
6719 }
6720 set_fatal_syntax_error (_("immediate zero expected"));
6721 goto failure;
6722 }
6723
6724 case AARCH64_OPND_IMM_MOV:
6725 {
6726 char *saved = str;
6727 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6728 reg_name_p (str, REG_TYPE_VN))
6729 goto failure;
6730 str = saved;
6731 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6732 GE_OPT_PREFIX, REJECT_ABSENT));
6733 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6734 later. fix_mov_imm_insn will try to determine a machine
6735 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6736 message if the immediate cannot be moved by a single
6737 instruction. */
6738 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6739 inst.base.operands[i].skip = 1;
6740 }
6741 break;
6742
6743 case AARCH64_OPND_SIMD_IMM:
6744 case AARCH64_OPND_SIMD_IMM_SFT:
6745 if (! parse_big_immediate (&str, &val, imm_reg_type))
6746 goto failure;
6747 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6748 /* addr_off_p */ 0,
6749 /* need_libopcodes_p */ 1,
6750 /* skip_p */ 1);
6751 /* Parse shift.
6752 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6753 shift, we don't check it here; we leave the checking to
6754 the libopcodes (operand_general_constraint_met_p). By
6755 doing this, we achieve better diagnostics. */
6756 if (skip_past_comma (&str)
6757 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6758 goto failure;
6759 if (!info->shifter.operator_present
6760 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6761 {
6762 /* Default to LSL if not present. Libopcodes prefers shifter
6763 kind to be explicit. */
6764 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6765 info->shifter.kind = AARCH64_MOD_LSL;
6766 }
6767 break;
6768
6769 case AARCH64_OPND_FPIMM:
6770 case AARCH64_OPND_SIMD_FPIMM:
6771 case AARCH64_OPND_SVE_FPIMM8:
6772 {
6773 int qfloat;
6774 bool dp_p;
6775
6776 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6777 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6778 || !aarch64_imm_float_p (qfloat))
6779 {
6780 if (!error_p ())
6781 set_fatal_syntax_error (_("invalid floating-point"
6782 " constant"));
6783 goto failure;
6784 }
6785 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6786 inst.base.operands[i].imm.is_fp = 1;
6787 }
6788 break;
6789
6790 case AARCH64_OPND_SVE_I1_HALF_ONE:
6791 case AARCH64_OPND_SVE_I1_HALF_TWO:
6792 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6793 {
6794 int qfloat;
6795 bool dp_p;
6796
6797 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6798 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6799 {
6800 if (!error_p ())
6801 set_fatal_syntax_error (_("invalid floating-point"
6802 " constant"));
6803 goto failure;
6804 }
6805 inst.base.operands[i].imm.value = qfloat;
6806 inst.base.operands[i].imm.is_fp = 1;
6807 }
6808 break;
6809
6810 case AARCH64_OPND_LIMM:
6811 po_misc_or_fail (parse_shifter_operand (&str, info,
6812 SHIFTED_LOGIC_IMM));
6813 if (info->shifter.operator_present)
6814 {
6815 set_fatal_syntax_error
6816 (_("shift not allowed for bitmask immediate"));
6817 goto failure;
6818 }
6819 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6820 /* addr_off_p */ 0,
6821 /* need_libopcodes_p */ 1,
6822 /* skip_p */ 1);
6823 break;
6824
6825 case AARCH64_OPND_AIMM:
6826 if (opcode->op == OP_ADD)
6827 /* ADD may have relocation types. */
6828 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6829 SHIFTED_ARITH_IMM));
6830 else
6831 po_misc_or_fail (parse_shifter_operand (&str, info,
6832 SHIFTED_ARITH_IMM));
6833 switch (inst.reloc.type)
6834 {
6835 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6836 info->shifter.amount = 12;
6837 break;
6838 case BFD_RELOC_UNUSED:
6839 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6840 if (info->shifter.kind != AARCH64_MOD_NONE)
6841 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6842 inst.reloc.pc_rel = 0;
6843 break;
6844 default:
6845 break;
6846 }
6847 info->imm.value = 0;
6848 if (!info->shifter.operator_present)
6849 {
6850 /* Default to LSL if not present. Libopcodes prefers shifter
6851 kind to be explicit. */
6852 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6853 info->shifter.kind = AARCH64_MOD_LSL;
6854 }
6855 break;
6856
6857 case AARCH64_OPND_HALF:
6858 {
6859 /* #<imm16> or relocation. */
6860 int internal_fixup_p;
6861 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6862 if (internal_fixup_p)
6863 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6864 skip_whitespace (str);
6865 if (skip_past_comma (&str))
6866 {
6867 /* {, LSL #<shift>} */
6868 if (! aarch64_gas_internal_fixup_p ())
6869 {
6870 set_fatal_syntax_error (_("can't mix relocation modifier "
6871 "with explicit shift"));
6872 goto failure;
6873 }
6874 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6875 }
6876 else
6877 inst.base.operands[i].shifter.amount = 0;
6878 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6879 inst.base.operands[i].imm.value = 0;
6880 if (! process_movw_reloc_info ())
6881 goto failure;
6882 }
6883 break;
6884
6885 case AARCH64_OPND_EXCEPTION:
6886 case AARCH64_OPND_UNDEFINED:
6887 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6888 imm_reg_type));
6889 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6890 /* addr_off_p */ 0,
6891 /* need_libopcodes_p */ 0,
6892 /* skip_p */ 1);
6893 break;
6894
6895 case AARCH64_OPND_NZCV:
6896 {
6897 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6898 if (nzcv != NULL)
6899 {
6900 str += 4;
6901 info->imm.value = nzcv->value;
6902 break;
6903 }
6904 po_imm_or_fail (0, 15);
6905 info->imm.value = val;
6906 }
6907 break;
6908
6909 case AARCH64_OPND_COND:
6910 case AARCH64_OPND_COND1:
6911 {
6912 char *start = str;
6913 do
6914 str++;
6915 while (ISALPHA (*str));
6916 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6917 if (info->cond == NULL)
6918 {
6919 set_syntax_error (_("invalid condition"));
6920 goto failure;
6921 }
6922 else if (operands[i] == AARCH64_OPND_COND1
6923 && (info->cond->value & 0xe) == 0xe)
6924 {
6925 /* Do not allow AL or NV. */
6926 set_default_error ();
6927 goto failure;
6928 }
6929 }
6930 break;
6931
6932 case AARCH64_OPND_ADDR_ADRP:
6933 po_misc_or_fail (parse_adrp (&str));
6934 /* Clear the value as operand needs to be relocated. */
6935 info->imm.value = 0;
6936 break;
6937
6938 case AARCH64_OPND_ADDR_PCREL14:
6939 case AARCH64_OPND_ADDR_PCREL19:
6940 case AARCH64_OPND_ADDR_PCREL21:
6941 case AARCH64_OPND_ADDR_PCREL26:
6942 po_misc_or_fail (parse_address (&str, info));
6943 if (!info->addr.pcrel)
6944 {
6945 set_syntax_error (_("invalid pc-relative address"));
6946 goto failure;
6947 }
6948 if (inst.gen_lit_pool
6949 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6950 {
6951 /* Only permit "=value" in the literal load instructions.
6952 The literal will be generated by programmer_friendly_fixup. */
6953 set_syntax_error (_("invalid use of \"=immediate\""));
6954 goto failure;
6955 }
6956 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6957 {
6958 set_syntax_error (_("unrecognized relocation suffix"));
6959 goto failure;
6960 }
6961 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6962 {
6963 info->imm.value = inst.reloc.exp.X_add_number;
6964 inst.reloc.type = BFD_RELOC_UNUSED;
6965 }
6966 else
6967 {
6968 info->imm.value = 0;
6969 if (inst.reloc.type == BFD_RELOC_UNUSED)
6970 switch (opcode->iclass)
6971 {
6972 case compbranch:
6973 case condbranch:
6974 /* e.g. CBZ or B.COND */
6975 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6976 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6977 break;
6978 case testbranch:
6979 /* e.g. TBZ */
6980 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6981 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6982 break;
6983 case branch_imm:
6984 /* e.g. B or BL */
6985 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6986 inst.reloc.type =
6987 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6988 : BFD_RELOC_AARCH64_JUMP26;
6989 break;
6990 case loadlit:
6991 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6992 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6993 break;
6994 case pcreladdr:
6995 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6996 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6997 break;
6998 default:
6999 gas_assert (0);
7000 abort ();
7001 }
7002 inst.reloc.pc_rel = 1;
7003 }
7004 break;
7005
7006 case AARCH64_OPND_ADDR_SIMPLE:
7007 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7008 {
7009 /* [<Xn|SP>{, #<simm>}] */
7010 char *start = str;
7011 /* First use the normal address-parsing routines, to get
7012 the usual syntax errors. */
7013 po_misc_or_fail (parse_address (&str, info));
7014 if (info->addr.pcrel || info->addr.offset.is_reg
7015 || !info->addr.preind || info->addr.postind
7016 || info->addr.writeback)
7017 {
7018 set_syntax_error (_("invalid addressing mode"));
7019 goto failure;
7020 }
7021
7022 /* Then retry, matching the specific syntax of these addresses. */
7023 str = start;
7024 po_char_or_fail ('[');
7025 po_reg_or_fail (REG_TYPE_R64_SP);
7026 /* Accept optional ", #0". */
7027 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7028 && skip_past_char (&str, ','))
7029 {
7030 skip_past_char (&str, '#');
7031 if (! skip_past_char (&str, '0'))
7032 {
7033 set_fatal_syntax_error
7034 (_("the optional immediate offset can only be 0"));
7035 goto failure;
7036 }
7037 }
7038 po_char_or_fail (']');
7039 break;
7040 }
7041
7042 case AARCH64_OPND_ADDR_REGOFF:
7043 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7044 po_misc_or_fail (parse_address (&str, info));
7045 regoff_addr:
7046 if (info->addr.pcrel || !info->addr.offset.is_reg
7047 || !info->addr.preind || info->addr.postind
7048 || info->addr.writeback)
7049 {
7050 set_syntax_error (_("invalid addressing mode"));
7051 goto failure;
7052 }
7053 if (!info->shifter.operator_present)
7054 {
7055 /* Default to LSL if not present. Libopcodes prefers shifter
7056 kind to be explicit. */
7057 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7058 info->shifter.kind = AARCH64_MOD_LSL;
7059 }
7060 /* Qualifier to be deduced by libopcodes. */
7061 break;
7062
7063 case AARCH64_OPND_ADDR_SIMM7:
7064 po_misc_or_fail (parse_address (&str, info));
7065 if (info->addr.pcrel || info->addr.offset.is_reg
7066 || (!info->addr.preind && !info->addr.postind))
7067 {
7068 set_syntax_error (_("invalid addressing mode"));
7069 goto failure;
7070 }
7071 if (inst.reloc.type != BFD_RELOC_UNUSED)
7072 {
7073 set_syntax_error (_("relocation not allowed"));
7074 goto failure;
7075 }
7076 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7077 /* addr_off_p */ 1,
7078 /* need_libopcodes_p */ 1,
7079 /* skip_p */ 0);
7080 break;
7081
7082 case AARCH64_OPND_ADDR_SIMM9:
7083 case AARCH64_OPND_ADDR_SIMM9_2:
7084 case AARCH64_OPND_ADDR_SIMM11:
7085 case AARCH64_OPND_ADDR_SIMM13:
7086 po_misc_or_fail (parse_address (&str, info));
7087 if (info->addr.pcrel || info->addr.offset.is_reg
7088 || (!info->addr.preind && !info->addr.postind)
7089 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7090 && info->addr.writeback))
7091 {
7092 set_syntax_error (_("invalid addressing mode"));
7093 goto failure;
7094 }
7095 if (inst.reloc.type != BFD_RELOC_UNUSED)
7096 {
7097 set_syntax_error (_("relocation not allowed"));
7098 goto failure;
7099 }
7100 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7101 /* addr_off_p */ 1,
7102 /* need_libopcodes_p */ 1,
7103 /* skip_p */ 0);
7104 break;
7105
7106 case AARCH64_OPND_ADDR_SIMM10:
7107 case AARCH64_OPND_ADDR_OFFSET:
7108 po_misc_or_fail (parse_address (&str, info));
7109 if (info->addr.pcrel || info->addr.offset.is_reg
7110 || !info->addr.preind || info->addr.postind)
7111 {
7112 set_syntax_error (_("invalid addressing mode"));
7113 goto failure;
7114 }
7115 if (inst.reloc.type != BFD_RELOC_UNUSED)
7116 {
7117 set_syntax_error (_("relocation not allowed"));
7118 goto failure;
7119 }
7120 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7121 /* addr_off_p */ 1,
7122 /* need_libopcodes_p */ 1,
7123 /* skip_p */ 0);
7124 break;
7125
7126 case AARCH64_OPND_ADDR_UIMM12:
7127 po_misc_or_fail (parse_address (&str, info));
7128 if (info->addr.pcrel || info->addr.offset.is_reg
7129 || !info->addr.preind || info->addr.writeback)
7130 {
7131 set_syntax_error (_("invalid addressing mode"));
7132 goto failure;
7133 }
7134 if (inst.reloc.type == BFD_RELOC_UNUSED)
7135 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7136 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7137 || (inst.reloc.type
7138 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7139 || (inst.reloc.type
7140 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7141 || (inst.reloc.type
7142 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7143 || (inst.reloc.type
7144 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7145 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7146 /* Leave qualifier to be determined by libopcodes. */
7147 break;
7148
7149 case AARCH64_OPND_SIMD_ADDR_POST:
7150 /* [<Xn|SP>], <Xm|#<amount>> */
7151 po_misc_or_fail (parse_address (&str, info));
7152 if (!info->addr.postind || !info->addr.writeback)
7153 {
7154 set_syntax_error (_("invalid addressing mode"));
7155 goto failure;
7156 }
7157 if (!info->addr.offset.is_reg)
7158 {
7159 if (inst.reloc.exp.X_op == O_constant)
7160 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7161 else
7162 {
7163 set_fatal_syntax_error
7164 (_("writeback value must be an immediate constant"));
7165 goto failure;
7166 }
7167 }
7168 /* No qualifier. */
7169 break;
7170
7171 case AARCH64_OPND_SME_SM_ZA:
7172 /* { SM | ZA } */
7173 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7174 {
7175 set_syntax_error (_("unknown or missing PSTATE field name"));
7176 goto failure;
7177 }
7178 info->reg.regno = val;
7179 break;
7180
7181 case AARCH64_OPND_SME_PnT_Wm_imm:
7182 /* <Pn>.<T>[<Wm>, #<imm>] */
7183 {
7184 int index_base_reg;
7185 int imm;
7186 val = parse_sme_pred_reg_with_index (&str,
7187 &index_base_reg,
7188 &imm,
7189 &qualifier);
7190 if (val == PARSE_FAIL)
7191 goto failure;
7192
7193 info->za_tile_vector.regno = val;
7194 info->za_tile_vector.index.regno = index_base_reg;
7195 info->za_tile_vector.index.imm = imm;
7196 info->qualifier = qualifier;
7197 break;
7198 }
7199
7200 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7201 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7202 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7203 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7204 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7205 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7206 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7207 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7208 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7209 case AARCH64_OPND_SVE_ADDR_RI_U6:
7210 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7211 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7212 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7213 /* [X<n>{, #imm, MUL VL}]
7214 [X<n>{, #imm}]
7215 but recognizing SVE registers. */
7216 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7217 &offset_qualifier));
7218 if (base_qualifier != AARCH64_OPND_QLF_X)
7219 {
7220 set_syntax_error (_("invalid addressing mode"));
7221 goto failure;
7222 }
7223 sve_regimm:
7224 if (info->addr.pcrel || info->addr.offset.is_reg
7225 || !info->addr.preind || info->addr.writeback)
7226 {
7227 set_syntax_error (_("invalid addressing mode"));
7228 goto failure;
7229 }
7230 if (inst.reloc.type != BFD_RELOC_UNUSED
7231 || inst.reloc.exp.X_op != O_constant)
7232 {
7233 /* Make sure this has priority over
7234 "invalid addressing mode". */
7235 set_fatal_syntax_error (_("constant offset required"));
7236 goto failure;
7237 }
7238 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7239 break;
7240
7241 case AARCH64_OPND_SVE_ADDR_R:
7242 /* [<Xn|SP>{, <R><m>}]
7243 but recognizing SVE registers. */
7244 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7245 &offset_qualifier));
7246 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7247 {
7248 offset_qualifier = AARCH64_OPND_QLF_X;
7249 info->addr.offset.is_reg = 1;
7250 info->addr.offset.regno = 31;
7251 }
7252 else if (base_qualifier != AARCH64_OPND_QLF_X
7253 || offset_qualifier != AARCH64_OPND_QLF_X)
7254 {
7255 set_syntax_error (_("invalid addressing mode"));
7256 goto failure;
7257 }
7258 goto regoff_addr;
7259
7260 case AARCH64_OPND_SVE_ADDR_RR:
7261 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7262 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7263 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7264 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7265 case AARCH64_OPND_SVE_ADDR_RX:
7266 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7267 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7268 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7269 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7270 but recognizing SVE registers. */
7271 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7272 &offset_qualifier));
7273 if (base_qualifier != AARCH64_OPND_QLF_X
7274 || offset_qualifier != AARCH64_OPND_QLF_X)
7275 {
7276 set_syntax_error (_("invalid addressing mode"));
7277 goto failure;
7278 }
7279 goto regoff_addr;
7280
7281 case AARCH64_OPND_SVE_ADDR_RZ:
7282 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7283 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7284 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7285 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7286 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7287 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7288 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7289 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7290 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7291 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7292 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7293 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7294 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7295 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7296 &offset_qualifier));
7297 if (base_qualifier != AARCH64_OPND_QLF_X
7298 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7299 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7300 {
7301 set_syntax_error (_("invalid addressing mode"));
7302 goto failure;
7303 }
7304 info->qualifier = offset_qualifier;
7305 goto regoff_addr;
7306
7307 case AARCH64_OPND_SVE_ADDR_ZX:
7308 /* [Zn.<T>{, <Xm>}]. */
7309 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7310 &offset_qualifier));
7311 /* Things to check:
7312 base_qualifier either S_S or S_D
7313 offset_qualifier must be X
7314 */
7315 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7316 && base_qualifier != AARCH64_OPND_QLF_S_D)
7317 || offset_qualifier != AARCH64_OPND_QLF_X)
7318 {
7319 set_syntax_error (_("invalid addressing mode"));
7320 goto failure;
7321 }
7322 info->qualifier = base_qualifier;
7323 if (!info->addr.offset.is_reg || info->addr.pcrel
7324 || !info->addr.preind || info->addr.writeback
7325 || info->shifter.operator_present != 0)
7326 {
7327 set_syntax_error (_("invalid addressing mode"));
7328 goto failure;
7329 }
7330 info->shifter.kind = AARCH64_MOD_LSL;
7331 break;
7332
7333
7334 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7335 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7336 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7337 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7338 /* [Z<n>.<T>{, #imm}] */
7339 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7340 &offset_qualifier));
7341 if (base_qualifier != AARCH64_OPND_QLF_S_S
7342 && base_qualifier != AARCH64_OPND_QLF_S_D)
7343 {
7344 set_syntax_error (_("invalid addressing mode"));
7345 goto failure;
7346 }
7347 info->qualifier = base_qualifier;
7348 goto sve_regimm;
7349
7350 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7351 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7352 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7353 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7354 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7355
7356 We don't reject:
7357
7358 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7359
7360 here since we get better error messages by leaving it to
7361 the qualifier checking routines. */
7362 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7363 &offset_qualifier));
7364 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7365 && base_qualifier != AARCH64_OPND_QLF_S_D)
7366 || offset_qualifier != base_qualifier)
7367 {
7368 set_syntax_error (_("invalid addressing mode"));
7369 goto failure;
7370 }
7371 info->qualifier = base_qualifier;
7372 goto regoff_addr;
7373
7374 case AARCH64_OPND_SYSREG:
7375 {
7376 uint32_t sysreg_flags;
7377 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7378 &sysreg_flags)) == PARSE_FAIL)
7379 {
7380 set_syntax_error (_("unknown or missing system register name"));
7381 goto failure;
7382 }
7383 inst.base.operands[i].sysreg.value = val;
7384 inst.base.operands[i].sysreg.flags = sysreg_flags;
7385 break;
7386 }
7387
7388 case AARCH64_OPND_PSTATEFIELD:
7389 {
7390 uint32_t sysreg_flags;
7391 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7392 &sysreg_flags)) == PARSE_FAIL)
7393 {
7394 set_syntax_error (_("unknown or missing PSTATE field name"));
7395 goto failure;
7396 }
7397 inst.base.operands[i].pstatefield = val;
7398 inst.base.operands[i].sysreg.flags = sysreg_flags;
7399 break;
7400 }
7401
7402 case AARCH64_OPND_SYSREG_IC:
7403 inst.base.operands[i].sysins_op =
7404 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7405 goto sys_reg_ins;
7406
7407 case AARCH64_OPND_SYSREG_DC:
7408 inst.base.operands[i].sysins_op =
7409 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7410 goto sys_reg_ins;
7411
7412 case AARCH64_OPND_SYSREG_AT:
7413 inst.base.operands[i].sysins_op =
7414 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7415 goto sys_reg_ins;
7416
7417 case AARCH64_OPND_SYSREG_SR:
7418 inst.base.operands[i].sysins_op =
7419 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7420 goto sys_reg_ins;
7421
7422 case AARCH64_OPND_SYSREG_TLBI:
7423 inst.base.operands[i].sysins_op =
7424 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7425 sys_reg_ins:
7426 if (inst.base.operands[i].sysins_op == NULL)
7427 {
7428 set_fatal_syntax_error ( _("unknown or missing operation name"));
7429 goto failure;
7430 }
7431 break;
7432
7433 case AARCH64_OPND_BARRIER:
7434 case AARCH64_OPND_BARRIER_ISB:
7435 val = parse_barrier (&str);
7436 if (val != PARSE_FAIL
7437 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7438 {
7439 /* ISB only accepts options name 'sy'. */
7440 set_syntax_error
7441 (_("the specified option is not accepted in ISB"));
7442 /* Turn off backtrack as this optional operand is present. */
7443 backtrack_pos = 0;
7444 goto failure;
7445 }
7446 if (val != PARSE_FAIL
7447 && operands[i] == AARCH64_OPND_BARRIER)
7448 {
7449 /* Regular barriers accept options CRm (C0-C15).
7450 DSB nXS barrier variant accepts values > 15. */
7451 if (val < 0 || val > 15)
7452 {
7453 set_syntax_error (_("the specified option is not accepted in DSB"));
7454 goto failure;
7455 }
7456 }
7457 /* This is an extension to accept a 0..15 immediate. */
7458 if (val == PARSE_FAIL)
7459 po_imm_or_fail (0, 15);
7460 info->barrier = aarch64_barrier_options + val;
7461 break;
7462
7463 case AARCH64_OPND_BARRIER_DSB_NXS:
7464 val = parse_barrier (&str);
7465 if (val != PARSE_FAIL)
7466 {
7467 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7468 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7469 {
7470 set_syntax_error (_("the specified option is not accepted in DSB"));
7471 /* Turn off backtrack as this optional operand is present. */
7472 backtrack_pos = 0;
7473 goto failure;
7474 }
7475 }
7476 else
7477 {
7478 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7479 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7480 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7481 goto failure;
7482 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7483 {
7484 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7485 goto failure;
7486 }
7487 }
7488 /* Option index is encoded as 2-bit value in val<3:2>. */
7489 val = (val >> 2) - 4;
7490 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7491 break;
7492
7493 case AARCH64_OPND_PRFOP:
7494 val = parse_pldop (&str);
7495 /* This is an extension to accept a 0..31 immediate. */
7496 if (val == PARSE_FAIL)
7497 po_imm_or_fail (0, 31);
7498 inst.base.operands[i].prfop = aarch64_prfops + val;
7499 break;
7500
7501 case AARCH64_OPND_BARRIER_PSB:
7502 val = parse_barrier_psb (&str, &(info->hint_option));
7503 if (val == PARSE_FAIL)
7504 goto failure;
7505 break;
7506
7507 case AARCH64_OPND_BTI_TARGET:
7508 val = parse_bti_operand (&str, &(info->hint_option));
7509 if (val == PARSE_FAIL)
7510 goto failure;
7511 break;
7512
7513 case AARCH64_OPND_SME_ZAda_2b:
7514 case AARCH64_OPND_SME_ZAda_3b:
7515 val = parse_sme_zada_operand (&str, &qualifier);
7516 if (val == PARSE_FAIL)
7517 goto failure;
7518 info->reg.regno = val;
7519 info->qualifier = qualifier;
7520 break;
7521
7522 case AARCH64_OPND_SME_ZA_HV_idx_src:
7523 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7524 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7525 {
7526 enum sme_hv_slice slice_indicator;
7527 int vector_select_register;
7528 int imm;
7529
7530 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7531 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7532 &slice_indicator,
7533 &vector_select_register,
7534 &imm,
7535 &qualifier);
7536 else
7537 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7538 &vector_select_register,
7539 &imm,
7540 &qualifier);
7541 if (val == PARSE_FAIL)
7542 goto failure;
7543 info->za_tile_vector.regno = val;
7544 info->za_tile_vector.index.regno = vector_select_register;
7545 info->za_tile_vector.index.imm = imm;
7546 info->za_tile_vector.v = slice_indicator;
7547 info->qualifier = qualifier;
7548 break;
7549 }
7550
7551 case AARCH64_OPND_SME_list_of_64bit_tiles:
7552 val = parse_sme_list_of_64bit_tiles (&str);
7553 if (val == PARSE_FAIL)
7554 goto failure;
7555 info->imm.value = val;
7556 break;
7557
7558 case AARCH64_OPND_SME_ZA_array:
7559 {
7560 int imm;
7561 val = parse_sme_za_array (&str, &imm);
7562 if (val == PARSE_FAIL)
7563 goto failure;
7564 info->za_tile_vector.index.regno = val;
7565 info->za_tile_vector.index.imm = imm;
7566 break;
7567 }
7568
7569 case AARCH64_OPND_MOPS_ADDR_Rd:
7570 case AARCH64_OPND_MOPS_ADDR_Rs:
7571 po_char_or_fail ('[');
7572 if (!parse_x0_to_x30 (&str, info))
7573 goto failure;
7574 po_char_or_fail (']');
7575 po_char_or_fail ('!');
7576 break;
7577
7578 case AARCH64_OPND_MOPS_WB_Rn:
7579 if (!parse_x0_to_x30 (&str, info))
7580 goto failure;
7581 po_char_or_fail ('!');
7582 break;
7583
7584 default:
7585 as_fatal (_("unhandled operand code %d"), operands[i]);
7586 }
7587
7588 /* If we get here, this operand was successfully parsed. */
7589 inst.base.operands[i].present = 1;
7590 continue;
7591
7592 failure:
7593 /* The parse routine should already have set the error, but in case
7594 not, set a default one here. */
7595 if (! error_p ())
7596 set_default_error ();
7597
7598 if (! backtrack_pos)
7599 goto parse_operands_return;
7600
7601 {
7602 /* We reach here because this operand is marked as optional, and
7603 either no operand was supplied or the operand was supplied but it
7604 was syntactically incorrect. In the latter case we report an
7605 error. In the former case we perform a few more checks before
7606 dropping through to the code to insert the default operand. */
7607
7608 char *tmp = backtrack_pos;
7609 char endchar = END_OF_INSN;
7610
7611 if (i != (aarch64_num_of_operands (opcode) - 1))
7612 endchar = ',';
7613 skip_past_char (&tmp, ',');
7614
7615 if (*tmp != endchar)
7616 /* The user has supplied an operand in the wrong format. */
7617 goto parse_operands_return;
7618
7619 /* Make sure there is not a comma before the optional operand.
7620 For example the fifth operand of 'sys' is optional:
7621
7622 sys #0,c0,c0,#0, <--- wrong
7623 sys #0,c0,c0,#0 <--- correct. */
7624 if (comma_skipped_p && i && endchar == END_OF_INSN)
7625 {
7626 set_fatal_syntax_error
7627 (_("unexpected comma before the omitted optional operand"));
7628 goto parse_operands_return;
7629 }
7630 }
7631
7632 /* Reaching here means we are dealing with an optional operand that is
7633 omitted from the assembly line. */
7634 gas_assert (optional_operand_p (opcode, i));
7635 info->present = 0;
7636 process_omitted_operand (operands[i], opcode, i, info);
7637
7638 /* Try again, skipping the optional operand at backtrack_pos. */
7639 str = backtrack_pos;
7640 backtrack_pos = 0;
7641
7642 /* Clear any error record after the omitted optional operand has been
7643 successfully handled. */
7644 clear_error ();
7645 }
7646
7647 /* Check if we have parsed all the operands. */
7648 if (*str != '\0' && ! error_p ())
7649 {
7650 /* Set I to the index of the last present operand; this is
7651 for the purpose of diagnostics. */
7652 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7653 ;
7654 set_fatal_syntax_error
7655 (_("unexpected characters following instruction"));
7656 }
7657
7658 parse_operands_return:
7659
7660 if (error_p ())
7661 {
7662 DEBUG_TRACE ("parsing FAIL: %s - %s",
7663 operand_mismatch_kind_names[get_error_kind ()],
7664 get_error_message ());
7665 /* Record the operand error properly; this is useful when there
7666 are multiple instruction templates for a mnemonic name, so that
7667 later on, we can select the error that most closely describes
7668 the problem. */
7669 record_operand_error (opcode, i, get_error_kind (),
7670 get_error_message ());
7671 return false;
7672 }
7673 else
7674 {
7675 DEBUG_TRACE ("parsing SUCCESS");
7676 return true;
7677 }
7678 }
7679
7680 /* It does some fix-up to provide some programmer friendly feature while
7681 keeping the libopcodes happy, i.e. libopcodes only accepts
7682 the preferred architectural syntax.
7683 Return FALSE if there is any failure; otherwise return TRUE. */
7684
7685 static bool
7686 programmer_friendly_fixup (aarch64_instruction *instr)
7687 {
7688 aarch64_inst *base = &instr->base;
7689 const aarch64_opcode *opcode = base->opcode;
7690 enum aarch64_op op = opcode->op;
7691 aarch64_opnd_info *operands = base->operands;
7692
7693 DEBUG_TRACE ("enter");
7694
7695 switch (opcode->iclass)
7696 {
7697 case testbranch:
7698 /* TBNZ Xn|Wn, #uimm6, label
7699 Test and Branch Not Zero: conditionally jumps to label if bit number
7700 uimm6 in register Xn is not zero. The bit number implies the width of
7701 the register, which may be written and should be disassembled as Wn if
7702 uimm is less than 32. */
7703 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7704 {
7705 if (operands[1].imm.value >= 32)
7706 {
7707 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7708 0, 31);
7709 return false;
7710 }
7711 operands[0].qualifier = AARCH64_OPND_QLF_X;
7712 }
7713 break;
7714 case loadlit:
7715 /* LDR Wt, label | =value
7716 As a convenience assemblers will typically permit the notation
7717 "=value" in conjunction with the pc-relative literal load instructions
7718 to automatically place an immediate value or symbolic address in a
7719 nearby literal pool and generate a hidden label which references it.
7720 ISREG has been set to 0 in the case of =value. */
7721 if (instr->gen_lit_pool
7722 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7723 {
7724 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7725 if (op == OP_LDRSW_LIT)
7726 size = 4;
7727 if (instr->reloc.exp.X_op != O_constant
7728 && instr->reloc.exp.X_op != O_big
7729 && instr->reloc.exp.X_op != O_symbol)
7730 {
7731 record_operand_error (opcode, 1,
7732 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7733 _("constant expression expected"));
7734 return false;
7735 }
7736 if (! add_to_lit_pool (&instr->reloc.exp, size))
7737 {
7738 record_operand_error (opcode, 1,
7739 AARCH64_OPDE_OTHER_ERROR,
7740 _("literal pool insertion failed"));
7741 return false;
7742 }
7743 }
7744 break;
7745 case log_shift:
7746 case bitfield:
7747 /* UXT[BHW] Wd, Wn
7748 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7749 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7750 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7751 A programmer-friendly assembler should accept a destination Xd in
7752 place of Wd, however that is not the preferred form for disassembly.
7753 */
7754 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7755 && operands[1].qualifier == AARCH64_OPND_QLF_W
7756 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7757 operands[0].qualifier = AARCH64_OPND_QLF_W;
7758 break;
7759
7760 case addsub_ext:
7761 {
7762 /* In the 64-bit form, the final register operand is written as Wm
7763 for all but the (possibly omitted) UXTX/LSL and SXTX
7764 operators.
7765 As a programmer-friendly assembler, we accept e.g.
7766 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7767 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7768 int idx = aarch64_operand_index (opcode->operands,
7769 AARCH64_OPND_Rm_EXT);
7770 gas_assert (idx == 1 || idx == 2);
7771 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7772 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7773 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7774 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7775 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7776 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7777 }
7778 break;
7779
7780 default:
7781 break;
7782 }
7783
7784 DEBUG_TRACE ("exit with SUCCESS");
7785 return true;
7786 }
7787
7788 /* Check for loads and stores that will cause unpredictable behavior. */
7789
7790 static void
7791 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7792 {
7793 aarch64_inst *base = &instr->base;
7794 const aarch64_opcode *opcode = base->opcode;
7795 const aarch64_opnd_info *opnds = base->operands;
7796 switch (opcode->iclass)
7797 {
7798 case ldst_pos:
7799 case ldst_imm9:
7800 case ldst_imm10:
7801 case ldst_unscaled:
7802 case ldst_unpriv:
7803 /* Loading/storing the base register is unpredictable if writeback. */
7804 if ((aarch64_get_operand_class (opnds[0].type)
7805 == AARCH64_OPND_CLASS_INT_REG)
7806 && opnds[0].reg.regno == opnds[1].addr.base_regno
7807 && opnds[1].addr.base_regno != REG_SP
7808 /* Exempt STG/STZG/ST2G/STZ2G. */
7809 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7810 && opnds[1].addr.writeback)
7811 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7812 break;
7813
7814 case ldstpair_off:
7815 case ldstnapair_offs:
7816 case ldstpair_indexed:
7817 /* Loading/storing the base register is unpredictable if writeback. */
7818 if ((aarch64_get_operand_class (opnds[0].type)
7819 == AARCH64_OPND_CLASS_INT_REG)
7820 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7821 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7822 && opnds[2].addr.base_regno != REG_SP
7823 /* Exempt STGP. */
7824 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7825 && opnds[2].addr.writeback)
7826 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7827 /* Load operations must load different registers. */
7828 if ((opcode->opcode & (1 << 22))
7829 && opnds[0].reg.regno == opnds[1].reg.regno)
7830 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7831 break;
7832
7833 case ldstexcl:
7834 if ((aarch64_get_operand_class (opnds[0].type)
7835 == AARCH64_OPND_CLASS_INT_REG)
7836 && (aarch64_get_operand_class (opnds[1].type)
7837 == AARCH64_OPND_CLASS_INT_REG))
7838 {
7839 if ((opcode->opcode & (1 << 22)))
7840 {
7841 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7842 if ((opcode->opcode & (1 << 21))
7843 && opnds[0].reg.regno == opnds[1].reg.regno)
7844 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7845 }
7846 else
7847 {
7848 /* Store-Exclusive is unpredictable if Rt == Rs. */
7849 if (opnds[0].reg.regno == opnds[1].reg.regno)
7850 as_warn
7851 (_("unpredictable: identical transfer and status registers"
7852 " --`%s'"),str);
7853
7854 if (opnds[0].reg.regno == opnds[2].reg.regno)
7855 {
7856 if (!(opcode->opcode & (1 << 21)))
7857 /* Store-Exclusive is unpredictable if Rn == Rs. */
7858 as_warn
7859 (_("unpredictable: identical base and status registers"
7860 " --`%s'"),str);
7861 else
7862 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7863 as_warn
7864 (_("unpredictable: "
7865 "identical transfer and status registers"
7866 " --`%s'"),str);
7867 }
7868
7869 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7870 if ((opcode->opcode & (1 << 21))
7871 && opnds[0].reg.regno == opnds[3].reg.regno
7872 && opnds[3].reg.regno != REG_SP)
7873 as_warn (_("unpredictable: identical base and status registers"
7874 " --`%s'"),str);
7875 }
7876 }
7877 break;
7878
7879 default:
7880 break;
7881 }
7882 }
7883
7884 static void
7885 force_automatic_sequence_close (void)
7886 {
7887 struct aarch64_segment_info_type *tc_seg_info;
7888
7889 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7890 if (tc_seg_info->insn_sequence.instr)
7891 {
7892 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7893 _("previous `%s' sequence has not been closed"),
7894 tc_seg_info->insn_sequence.instr->opcode->name);
7895 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7896 }
7897 }
7898
7899 /* A wrapper function to interface with libopcodes on encoding and
7900 record the error message if there is any.
7901
7902 Return TRUE on success; otherwise return FALSE. */
7903
7904 static bool
7905 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7906 aarch64_insn *code)
7907 {
7908 aarch64_operand_error error_info;
7909 memset (&error_info, '\0', sizeof (error_info));
7910 error_info.kind = AARCH64_OPDE_NIL;
7911 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7912 && !error_info.non_fatal)
7913 return true;
7914
7915 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7916 record_operand_error_info (opcode, &error_info);
7917 return error_info.non_fatal;
7918 }
7919
7920 #ifdef DEBUG_AARCH64
7921 static inline void
7922 dump_opcode_operands (const aarch64_opcode *opcode)
7923 {
7924 int i = 0;
7925 while (opcode->operands[i] != AARCH64_OPND_NIL)
7926 {
7927 aarch64_verbose ("\t\t opnd%d: %s", i,
7928 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7929 ? aarch64_get_operand_name (opcode->operands[i])
7930 : aarch64_get_operand_desc (opcode->operands[i]));
7931 ++i;
7932 }
7933 }
7934 #endif /* DEBUG_AARCH64 */
7935
7936 /* This is the guts of the machine-dependent assembler. STR points to a
7937 machine dependent instruction. This function is supposed to emit
7938 the frags/bytes it assembles to. */
7939
7940 void
7941 md_assemble (char *str)
7942 {
7943 templates *template;
7944 const aarch64_opcode *opcode;
7945 struct aarch64_segment_info_type *tc_seg_info;
7946 aarch64_inst *inst_base;
7947 unsigned saved_cond;
7948
7949 /* Align the previous label if needed. */
7950 if (last_label_seen != NULL)
7951 {
7952 symbol_set_frag (last_label_seen, frag_now);
7953 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7954 S_SET_SEGMENT (last_label_seen, now_seg);
7955 }
7956
7957 /* Update the current insn_sequence from the segment. */
7958 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7959 insn_sequence = &tc_seg_info->insn_sequence;
7960 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7961
7962 inst.reloc.type = BFD_RELOC_UNUSED;
7963
7964 DEBUG_TRACE ("\n\n");
7965 DEBUG_TRACE ("==============================");
7966 DEBUG_TRACE ("Enter md_assemble with %s", str);
7967
7968 /* Scan up to the end of the mnemonic, which must end in whitespace,
7969 '.', or end of string. */
7970 char *p = str;
7971 char *dot = 0;
7972 for (; is_part_of_name (*p); p++)
7973 if (*p == '.' && !dot)
7974 dot = p;
7975
7976 if (p == str)
7977 {
7978 as_bad (_("unknown mnemonic -- `%s'"), str);
7979 return;
7980 }
7981
7982 if (!dot && create_register_alias (str, p))
7983 return;
7984
7985 template = opcode_lookup (str, dot, p);
7986 if (!template)
7987 {
7988 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7989 str);
7990 return;
7991 }
7992
7993 skip_whitespace (p);
7994 if (*p == ',')
7995 {
7996 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7997 get_mnemonic_name (str), str);
7998 return;
7999 }
8000
8001 init_operand_error_report ();
8002
8003 /* Sections are assumed to start aligned. In executable section, there is no
8004 MAP_DATA symbol pending. So we only align the address during
8005 MAP_DATA --> MAP_INSN transition.
8006 For other sections, this is not guaranteed. */
8007 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8008 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8009 frag_align_code (2, 0);
8010
8011 saved_cond = inst.cond;
8012 reset_aarch64_instruction (&inst);
8013 inst.cond = saved_cond;
8014
8015 /* Iterate through all opcode entries with the same mnemonic name. */
8016 do
8017 {
8018 opcode = template->opcode;
8019
8020 DEBUG_TRACE ("opcode %s found", opcode->name);
8021 #ifdef DEBUG_AARCH64
8022 if (debug_dump)
8023 dump_opcode_operands (opcode);
8024 #endif /* DEBUG_AARCH64 */
8025
8026 mapping_state (MAP_INSN);
8027
8028 inst_base = &inst.base;
8029 inst_base->opcode = opcode;
8030
8031 /* Truly conditionally executed instructions, e.g. b.cond. */
8032 if (opcode->flags & F_COND)
8033 {
8034 gas_assert (inst.cond != COND_ALWAYS);
8035 inst_base->cond = get_cond_from_value (inst.cond);
8036 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8037 }
8038 else if (inst.cond != COND_ALWAYS)
8039 {
8040 /* It shouldn't arrive here, where the assembly looks like a
8041 conditional instruction but the found opcode is unconditional. */
8042 gas_assert (0);
8043 continue;
8044 }
8045
8046 if (parse_operands (p, opcode)
8047 && programmer_friendly_fixup (&inst)
8048 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8049 {
8050 /* Check that this instruction is supported for this CPU. */
8051 if (!opcode->avariant
8052 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8053 {
8054 as_bad (_("selected processor does not support `%s'"), str);
8055 return;
8056 }
8057
8058 warn_unpredictable_ldst (&inst, str);
8059
8060 if (inst.reloc.type == BFD_RELOC_UNUSED
8061 || !inst.reloc.need_libopcodes_p)
8062 output_inst (NULL);
8063 else
8064 {
8065 /* If there is relocation generated for the instruction,
8066 store the instruction information for the future fix-up. */
8067 struct aarch64_inst *copy;
8068 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8069 copy = XNEW (struct aarch64_inst);
8070 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8071 output_inst (copy);
8072 }
8073
8074 /* Issue non-fatal messages if any. */
8075 output_operand_error_report (str, true);
8076 return;
8077 }
8078
8079 template = template->next;
8080 if (template != NULL)
8081 {
8082 reset_aarch64_instruction (&inst);
8083 inst.cond = saved_cond;
8084 }
8085 }
8086 while (template != NULL);
8087
8088 /* Issue the error messages if any. */
8089 output_operand_error_report (str, false);
8090 }
8091
8092 /* Various frobbings of labels and their addresses. */
8093
8094 void
8095 aarch64_start_line_hook (void)
8096 {
8097 last_label_seen = NULL;
8098 }
8099
8100 void
8101 aarch64_frob_label (symbolS * sym)
8102 {
8103 last_label_seen = sym;
8104
8105 dwarf2_emit_label (sym);
8106 }
8107
8108 void
8109 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8110 {
8111 /* Check to see if we have a block to close. */
8112 force_automatic_sequence_close ();
8113 }
8114
8115 int
8116 aarch64_data_in_code (void)
8117 {
8118 if (startswith (input_line_pointer + 1, "data:"))
8119 {
8120 *input_line_pointer = '/';
8121 input_line_pointer += 5;
8122 *input_line_pointer = 0;
8123 return 1;
8124 }
8125
8126 return 0;
8127 }
8128
8129 char *
8130 aarch64_canonicalize_symbol_name (char *name)
8131 {
8132 int len;
8133
8134 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8135 *(name + len - 5) = 0;
8136
8137 return name;
8138 }
8139 \f
8140 /* Table of all register names defined by default. The user can
8141 define additional names with .req. Note that all register names
8142 should appear in both upper and lowercase variants. Some registers
8143 also have mixed-case names. */
8144
8145 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8146 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8147 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8148 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8149 #define REGSET16(p,t) \
8150 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8151 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8152 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8153 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8154 #define REGSET16S(p,s,t) \
8155 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8156 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8157 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8158 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8159 #define REGSET31(p,t) \
8160 REGSET16(p, t), \
8161 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8162 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8163 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8164 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8165 #define REGSET(p,t) \
8166 REGSET31(p,t), REGNUM(p,31,t)
8167
8168 /* These go into aarch64_reg_hsh hash-table. */
8169 static const reg_entry reg_names[] = {
8170 /* Integer registers. */
8171 REGSET31 (x, R_64), REGSET31 (X, R_64),
8172 REGSET31 (w, R_32), REGSET31 (W, R_32),
8173
8174 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8175 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8176 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8177 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8178 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8179 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8180
8181 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8182 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8183
8184 /* Floating-point single precision registers. */
8185 REGSET (s, FP_S), REGSET (S, FP_S),
8186
8187 /* Floating-point double precision registers. */
8188 REGSET (d, FP_D), REGSET (D, FP_D),
8189
8190 /* Floating-point half precision registers. */
8191 REGSET (h, FP_H), REGSET (H, FP_H),
8192
8193 /* Floating-point byte precision registers. */
8194 REGSET (b, FP_B), REGSET (B, FP_B),
8195
8196 /* Floating-point quad precision registers. */
8197 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8198
8199 /* FP/SIMD registers. */
8200 REGSET (v, VN), REGSET (V, VN),
8201
8202 /* SVE vector registers. */
8203 REGSET (z, ZN), REGSET (Z, ZN),
8204
8205 /* SVE predicate registers. */
8206 REGSET16 (p, PN), REGSET16 (P, PN),
8207
8208 /* SME ZA tile registers. */
8209 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8210
8211 /* SME ZA tile registers (horizontal slice). */
8212 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8213
8214 /* SME ZA tile registers (vertical slice). */
8215 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8216 };
8217
8218 #undef REGDEF
8219 #undef REGDEF_ALIAS
8220 #undef REGNUM
8221 #undef REGSET16
8222 #undef REGSET31
8223 #undef REGSET
8224
8225 #define N 1
8226 #define n 0
8227 #define Z 1
8228 #define z 0
8229 #define C 1
8230 #define c 0
8231 #define V 1
8232 #define v 0
8233 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8234 static const asm_nzcv nzcv_names[] = {
8235 {"nzcv", B (n, z, c, v)},
8236 {"nzcV", B (n, z, c, V)},
8237 {"nzCv", B (n, z, C, v)},
8238 {"nzCV", B (n, z, C, V)},
8239 {"nZcv", B (n, Z, c, v)},
8240 {"nZcV", B (n, Z, c, V)},
8241 {"nZCv", B (n, Z, C, v)},
8242 {"nZCV", B (n, Z, C, V)},
8243 {"Nzcv", B (N, z, c, v)},
8244 {"NzcV", B (N, z, c, V)},
8245 {"NzCv", B (N, z, C, v)},
8246 {"NzCV", B (N, z, C, V)},
8247 {"NZcv", B (N, Z, c, v)},
8248 {"NZcV", B (N, Z, c, V)},
8249 {"NZCv", B (N, Z, C, v)},
8250 {"NZCV", B (N, Z, C, V)}
8251 };
8252
8253 #undef N
8254 #undef n
8255 #undef Z
8256 #undef z
8257 #undef C
8258 #undef c
8259 #undef V
8260 #undef v
8261 #undef B
8262 \f
8263 /* MD interface: bits in the object file. */
8264
8265 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8266 for use in the a.out file, and stores them in the array pointed to by buf.
8267 This knows about the endian-ness of the target machine and does
8268 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8269 2 (short) and 4 (long) Floating numbers are put out as a series of
8270 LITTLENUMS (shorts, here at least). */
8271
8272 void
8273 md_number_to_chars (char *buf, valueT val, int n)
8274 {
8275 if (target_big_endian)
8276 number_to_chars_bigendian (buf, val, n);
8277 else
8278 number_to_chars_littleendian (buf, val, n);
8279 }
8280
8281 /* MD interface: Sections. */
8282
8283 /* Estimate the size of a frag before relaxing. Assume everything fits in
8284 4 bytes. */
8285
8286 int
8287 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8288 {
8289 fragp->fr_var = 4;
8290 return 4;
8291 }
8292
8293 /* Round up a section size to the appropriate boundary. */
8294
8295 valueT
8296 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8297 {
8298 return size;
8299 }
8300
8301 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8302 of an rs_align_code fragment.
8303
8304 Here we fill the frag with the appropriate info for padding the
8305 output stream. The resulting frag will consist of a fixed (fr_fix)
8306 and of a repeating (fr_var) part.
8307
8308 The fixed content is always emitted before the repeating content and
8309 these two parts are used as follows in constructing the output:
8310 - the fixed part will be used to align to a valid instruction word
8311 boundary, in case that we start at a misaligned address; as no
8312 executable instruction can live at the misaligned location, we
8313 simply fill with zeros;
8314 - the variable part will be used to cover the remaining padding and
8315 we fill using the AArch64 NOP instruction.
8316
8317 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8318 enough storage space for up to 3 bytes for padding the back to a valid
8319 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8320
8321 void
8322 aarch64_handle_align (fragS * fragP)
8323 {
8324 /* NOP = d503201f */
8325 /* AArch64 instructions are always little-endian. */
8326 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8327
8328 int bytes, fix, noop_size;
8329 char *p;
8330
8331 if (fragP->fr_type != rs_align_code)
8332 return;
8333
8334 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8335 p = fragP->fr_literal + fragP->fr_fix;
8336
8337 #ifdef OBJ_ELF
8338 gas_assert (fragP->tc_frag_data.recorded);
8339 #endif
8340
8341 noop_size = sizeof (aarch64_noop);
8342
8343 fix = bytes & (noop_size - 1);
8344 if (fix)
8345 {
8346 #if defined OBJ_ELF || defined OBJ_COFF
8347 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8348 #endif
8349 memset (p, 0, fix);
8350 p += fix;
8351 fragP->fr_fix += fix;
8352 }
8353
8354 if (noop_size)
8355 memcpy (p, aarch64_noop, noop_size);
8356 fragP->fr_var = noop_size;
8357 }
8358
8359 /* Perform target specific initialisation of a frag.
8360 Note - despite the name this initialisation is not done when the frag
8361 is created, but only when its type is assigned. A frag can be created
8362 and used a long time before its type is set, so beware of assuming that
8363 this initialisation is performed first. */
8364
8365 #ifndef OBJ_ELF
8366 void
8367 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8368 int max_chars ATTRIBUTE_UNUSED)
8369 {
8370 }
8371
8372 #else /* OBJ_ELF is defined. */
8373 void
8374 aarch64_init_frag (fragS * fragP, int max_chars)
8375 {
8376 /* Record a mapping symbol for alignment frags. We will delete this
8377 later if the alignment ends up empty. */
8378 if (!fragP->tc_frag_data.recorded)
8379 fragP->tc_frag_data.recorded = 1;
8380
8381 /* PR 21809: Do not set a mapping state for debug sections
8382 - it just confuses other tools. */
8383 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8384 return;
8385
8386 switch (fragP->fr_type)
8387 {
8388 case rs_align_test:
8389 case rs_fill:
8390 mapping_state_2 (MAP_DATA, max_chars);
8391 break;
8392 case rs_align:
8393 /* PR 20364: We can get alignment frags in code sections,
8394 so do not just assume that we should use the MAP_DATA state. */
8395 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8396 break;
8397 case rs_align_code:
8398 mapping_state_2 (MAP_INSN, max_chars);
8399 break;
8400 default:
8401 break;
8402 }
8403 }
8404 #endif /* OBJ_ELF */
8405 \f
8406 /* Initialize the DWARF-2 unwind information for this procedure. */
8407
8408 void
8409 tc_aarch64_frame_initial_instructions (void)
8410 {
8411 cfi_add_CFA_def_cfa (REG_SP, 0);
8412 }
8413
8414 /* Convert REGNAME to a DWARF-2 register number. */
8415
8416 int
8417 tc_aarch64_regname_to_dw2regnum (char *regname)
8418 {
8419 const reg_entry *reg = parse_reg (&regname);
8420 if (reg == NULL)
8421 return -1;
8422
8423 switch (reg->type)
8424 {
8425 case REG_TYPE_SP_32:
8426 case REG_TYPE_SP_64:
8427 case REG_TYPE_R_32:
8428 case REG_TYPE_R_64:
8429 return reg->number;
8430
8431 case REG_TYPE_FP_B:
8432 case REG_TYPE_FP_H:
8433 case REG_TYPE_FP_S:
8434 case REG_TYPE_FP_D:
8435 case REG_TYPE_FP_Q:
8436 return reg->number + 64;
8437
8438 default:
8439 break;
8440 }
8441 return -1;
8442 }
8443
8444 /* Implement DWARF2_ADDR_SIZE. */
8445
8446 int
8447 aarch64_dwarf2_addr_size (void)
8448 {
8449 if (ilp32_p)
8450 return 4;
8451 else if (llp64_p)
8452 return 8;
8453 return bfd_arch_bits_per_address (stdoutput) / 8;
8454 }
8455
8456 /* MD interface: Symbol and relocation handling. */
8457
8458 /* Return the address within the segment that a PC-relative fixup is
8459 relative to. For AArch64 PC-relative fixups applied to instructions
8460 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8461
8462 long
8463 md_pcrel_from_section (fixS * fixP, segT seg)
8464 {
8465 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8466
8467 /* If this is pc-relative and we are going to emit a relocation
8468 then we just want to put out any pipeline compensation that the linker
8469 will need. Otherwise we want to use the calculated base. */
8470 if (fixP->fx_pcrel
8471 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8472 || aarch64_force_relocation (fixP)))
8473 base = 0;
8474
8475 /* AArch64 should be consistent for all pc-relative relocations. */
8476 return base + AARCH64_PCREL_OFFSET;
8477 }
8478
8479 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8480 Otherwise we have no need to default values of symbols. */
8481
8482 symbolS *
8483 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8484 {
8485 #ifdef OBJ_ELF
8486 if (name[0] == '_' && name[1] == 'G'
8487 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8488 {
8489 if (!GOT_symbol)
8490 {
8491 if (symbol_find (name))
8492 as_bad (_("GOT already in the symbol table"));
8493
8494 GOT_symbol = symbol_new (name, undefined_section,
8495 &zero_address_frag, 0);
8496 }
8497
8498 return GOT_symbol;
8499 }
8500 #endif
8501
8502 return 0;
8503 }
8504
8505 /* Return non-zero if the indicated VALUE has overflowed the maximum
8506 range expressible by a unsigned number with the indicated number of
8507 BITS. */
8508
8509 static bool
8510 unsigned_overflow (valueT value, unsigned bits)
8511 {
8512 valueT lim;
8513 if (bits >= sizeof (valueT) * 8)
8514 return false;
8515 lim = (valueT) 1 << bits;
8516 return (value >= lim);
8517 }
8518
8519
8520 /* Return non-zero if the indicated VALUE has overflowed the maximum
8521 range expressible by an signed number with the indicated number of
8522 BITS. */
8523
8524 static bool
8525 signed_overflow (offsetT value, unsigned bits)
8526 {
8527 offsetT lim;
8528 if (bits >= sizeof (offsetT) * 8)
8529 return false;
8530 lim = (offsetT) 1 << (bits - 1);
8531 return (value < -lim || value >= lim);
8532 }
8533
8534 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8535 unsigned immediate offset load/store instruction, try to encode it as
8536 an unscaled, 9-bit, signed immediate offset load/store instruction.
8537 Return TRUE if it is successful; otherwise return FALSE.
8538
8539 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8540 in response to the standard LDR/STR mnemonics when the immediate offset is
8541 unambiguous, i.e. when it is negative or unaligned. */
8542
8543 static bool
8544 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8545 {
8546 int idx;
8547 enum aarch64_op new_op;
8548 const aarch64_opcode *new_opcode;
8549
8550 gas_assert (instr->opcode->iclass == ldst_pos);
8551
8552 switch (instr->opcode->op)
8553 {
8554 case OP_LDRB_POS:new_op = OP_LDURB; break;
8555 case OP_STRB_POS: new_op = OP_STURB; break;
8556 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8557 case OP_LDRH_POS: new_op = OP_LDURH; break;
8558 case OP_STRH_POS: new_op = OP_STURH; break;
8559 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8560 case OP_LDR_POS: new_op = OP_LDUR; break;
8561 case OP_STR_POS: new_op = OP_STUR; break;
8562 case OP_LDRF_POS: new_op = OP_LDURV; break;
8563 case OP_STRF_POS: new_op = OP_STURV; break;
8564 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8565 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8566 default: new_op = OP_NIL; break;
8567 }
8568
8569 if (new_op == OP_NIL)
8570 return false;
8571
8572 new_opcode = aarch64_get_opcode (new_op);
8573 gas_assert (new_opcode != NULL);
8574
8575 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8576 instr->opcode->op, new_opcode->op);
8577
8578 aarch64_replace_opcode (instr, new_opcode);
8579
8580 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8581 qualifier matching may fail because the out-of-date qualifier will
8582 prevent the operand being updated with a new and correct qualifier. */
8583 idx = aarch64_operand_index (instr->opcode->operands,
8584 AARCH64_OPND_ADDR_SIMM9);
8585 gas_assert (idx == 1);
8586 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8587
8588 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8589
8590 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8591 insn_sequence))
8592 return false;
8593
8594 return true;
8595 }
8596
8597 /* Called by fix_insn to fix a MOV immediate alias instruction.
8598
8599 Operand for a generic move immediate instruction, which is an alias
8600 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8601 a 32-bit/64-bit immediate value into general register. An assembler error
8602 shall result if the immediate cannot be created by a single one of these
8603 instructions. If there is a choice, then to ensure reversability an
8604 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8605
8606 static void
8607 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8608 {
8609 const aarch64_opcode *opcode;
8610
8611 /* Need to check if the destination is SP/ZR. The check has to be done
8612 before any aarch64_replace_opcode. */
8613 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8614 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8615
8616 instr->operands[1].imm.value = value;
8617 instr->operands[1].skip = 0;
8618
8619 if (try_mov_wide_p)
8620 {
8621 /* Try the MOVZ alias. */
8622 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8623 aarch64_replace_opcode (instr, opcode);
8624 if (aarch64_opcode_encode (instr->opcode, instr,
8625 &instr->value, NULL, NULL, insn_sequence))
8626 {
8627 put_aarch64_insn (buf, instr->value);
8628 return;
8629 }
8630 /* Try the MOVK alias. */
8631 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8632 aarch64_replace_opcode (instr, opcode);
8633 if (aarch64_opcode_encode (instr->opcode, instr,
8634 &instr->value, NULL, NULL, insn_sequence))
8635 {
8636 put_aarch64_insn (buf, instr->value);
8637 return;
8638 }
8639 }
8640
8641 if (try_mov_bitmask_p)
8642 {
8643 /* Try the ORR alias. */
8644 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8645 aarch64_replace_opcode (instr, opcode);
8646 if (aarch64_opcode_encode (instr->opcode, instr,
8647 &instr->value, NULL, NULL, insn_sequence))
8648 {
8649 put_aarch64_insn (buf, instr->value);
8650 return;
8651 }
8652 }
8653
8654 as_bad_where (fixP->fx_file, fixP->fx_line,
8655 _("immediate cannot be moved by a single instruction"));
8656 }
8657
8658 /* An instruction operand which is immediate related may have symbol used
8659 in the assembly, e.g.
8660
8661 mov w0, u32
8662 .set u32, 0x00ffff00
8663
8664 At the time when the assembly instruction is parsed, a referenced symbol,
8665 like 'u32' in the above example may not have been seen; a fixS is created
8666 in such a case and is handled here after symbols have been resolved.
8667 Instruction is fixed up with VALUE using the information in *FIXP plus
8668 extra information in FLAGS.
8669
8670 This function is called by md_apply_fix to fix up instructions that need
8671 a fix-up described above but does not involve any linker-time relocation. */
8672
8673 static void
8674 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8675 {
8676 int idx;
8677 uint32_t insn;
8678 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8679 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8680 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8681
8682 if (new_inst)
8683 {
8684 /* Now the instruction is about to be fixed-up, so the operand that
8685 was previously marked as 'ignored' needs to be unmarked in order
8686 to get the encoding done properly. */
8687 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8688 new_inst->operands[idx].skip = 0;
8689 }
8690
8691 gas_assert (opnd != AARCH64_OPND_NIL);
8692
8693 switch (opnd)
8694 {
8695 case AARCH64_OPND_EXCEPTION:
8696 case AARCH64_OPND_UNDEFINED:
8697 if (unsigned_overflow (value, 16))
8698 as_bad_where (fixP->fx_file, fixP->fx_line,
8699 _("immediate out of range"));
8700 insn = get_aarch64_insn (buf);
8701 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8702 put_aarch64_insn (buf, insn);
8703 break;
8704
8705 case AARCH64_OPND_AIMM:
8706 /* ADD or SUB with immediate.
8707 NOTE this assumes we come here with a add/sub shifted reg encoding
8708 3 322|2222|2 2 2 21111 111111
8709 1 098|7654|3 2 1 09876 543210 98765 43210
8710 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8711 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8712 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8713 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8714 ->
8715 3 322|2222|2 2 221111111111
8716 1 098|7654|3 2 109876543210 98765 43210
8717 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8718 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8719 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8720 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8721 Fields sf Rn Rd are already set. */
8722 insn = get_aarch64_insn (buf);
8723 if (value < 0)
8724 {
8725 /* Add <-> sub. */
8726 insn = reencode_addsub_switch_add_sub (insn);
8727 value = -value;
8728 }
8729
8730 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8731 && unsigned_overflow (value, 12))
8732 {
8733 /* Try to shift the value by 12 to make it fit. */
8734 if (((value >> 12) << 12) == value
8735 && ! unsigned_overflow (value, 12 + 12))
8736 {
8737 value >>= 12;
8738 insn |= encode_addsub_imm_shift_amount (1);
8739 }
8740 }
8741
8742 if (unsigned_overflow (value, 12))
8743 as_bad_where (fixP->fx_file, fixP->fx_line,
8744 _("immediate out of range"));
8745
8746 insn |= encode_addsub_imm (value);
8747
8748 put_aarch64_insn (buf, insn);
8749 break;
8750
8751 case AARCH64_OPND_SIMD_IMM:
8752 case AARCH64_OPND_SIMD_IMM_SFT:
8753 case AARCH64_OPND_LIMM:
8754 /* Bit mask immediate. */
8755 gas_assert (new_inst != NULL);
8756 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8757 new_inst->operands[idx].imm.value = value;
8758 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8759 &new_inst->value, NULL, NULL, insn_sequence))
8760 put_aarch64_insn (buf, new_inst->value);
8761 else
8762 as_bad_where (fixP->fx_file, fixP->fx_line,
8763 _("invalid immediate"));
8764 break;
8765
8766 case AARCH64_OPND_HALF:
8767 /* 16-bit unsigned immediate. */
8768 if (unsigned_overflow (value, 16))
8769 as_bad_where (fixP->fx_file, fixP->fx_line,
8770 _("immediate out of range"));
8771 insn = get_aarch64_insn (buf);
8772 insn |= encode_movw_imm (value & 0xffff);
8773 put_aarch64_insn (buf, insn);
8774 break;
8775
8776 case AARCH64_OPND_IMM_MOV:
8777 /* Operand for a generic move immediate instruction, which is
8778 an alias instruction that generates a single MOVZ, MOVN or ORR
8779 instruction to loads a 32-bit/64-bit immediate value into general
8780 register. An assembler error shall result if the immediate cannot be
8781 created by a single one of these instructions. If there is a choice,
8782 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8783 and MOVZ or MOVN to ORR. */
8784 gas_assert (new_inst != NULL);
8785 fix_mov_imm_insn (fixP, buf, new_inst, value);
8786 break;
8787
8788 case AARCH64_OPND_ADDR_SIMM7:
8789 case AARCH64_OPND_ADDR_SIMM9:
8790 case AARCH64_OPND_ADDR_SIMM9_2:
8791 case AARCH64_OPND_ADDR_SIMM10:
8792 case AARCH64_OPND_ADDR_UIMM12:
8793 case AARCH64_OPND_ADDR_SIMM11:
8794 case AARCH64_OPND_ADDR_SIMM13:
8795 /* Immediate offset in an address. */
8796 insn = get_aarch64_insn (buf);
8797
8798 gas_assert (new_inst != NULL && new_inst->value == insn);
8799 gas_assert (new_inst->opcode->operands[1] == opnd
8800 || new_inst->opcode->operands[2] == opnd);
8801
8802 /* Get the index of the address operand. */
8803 if (new_inst->opcode->operands[1] == opnd)
8804 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8805 idx = 1;
8806 else
8807 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8808 idx = 2;
8809
8810 /* Update the resolved offset value. */
8811 new_inst->operands[idx].addr.offset.imm = value;
8812
8813 /* Encode/fix-up. */
8814 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8815 &new_inst->value, NULL, NULL, insn_sequence))
8816 {
8817 put_aarch64_insn (buf, new_inst->value);
8818 break;
8819 }
8820 else if (new_inst->opcode->iclass == ldst_pos
8821 && try_to_encode_as_unscaled_ldst (new_inst))
8822 {
8823 put_aarch64_insn (buf, new_inst->value);
8824 break;
8825 }
8826
8827 as_bad_where (fixP->fx_file, fixP->fx_line,
8828 _("immediate offset out of range"));
8829 break;
8830
8831 default:
8832 gas_assert (0);
8833 as_fatal (_("unhandled operand code %d"), opnd);
8834 }
8835 }
8836
8837 /* Apply a fixup (fixP) to segment data, once it has been determined
8838 by our caller that we have all the info we need to fix it up.
8839
8840 Parameter valP is the pointer to the value of the bits. */
8841
8842 void
8843 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8844 {
8845 offsetT value = *valP;
8846 uint32_t insn;
8847 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8848 int scale;
8849 unsigned flags = fixP->fx_addnumber;
8850
8851 DEBUG_TRACE ("\n\n");
8852 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8853 DEBUG_TRACE ("Enter md_apply_fix");
8854
8855 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8856
8857 /* Note whether this will delete the relocation. */
8858
8859 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8860 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8861 fixP->fx_done = 1;
8862
8863 /* Process the relocations. */
8864 switch (fixP->fx_r_type)
8865 {
8866 case BFD_RELOC_NONE:
8867 /* This will need to go in the object file. */
8868 fixP->fx_done = 0;
8869 break;
8870
8871 case BFD_RELOC_8:
8872 case BFD_RELOC_8_PCREL:
8873 if (fixP->fx_done || !seg->use_rela_p)
8874 md_number_to_chars (buf, value, 1);
8875 break;
8876
8877 case BFD_RELOC_16:
8878 case BFD_RELOC_16_PCREL:
8879 if (fixP->fx_done || !seg->use_rela_p)
8880 md_number_to_chars (buf, value, 2);
8881 break;
8882
8883 case BFD_RELOC_32:
8884 case BFD_RELOC_32_PCREL:
8885 if (fixP->fx_done || !seg->use_rela_p)
8886 md_number_to_chars (buf, value, 4);
8887 break;
8888
8889 case BFD_RELOC_64:
8890 case BFD_RELOC_64_PCREL:
8891 if (fixP->fx_done || !seg->use_rela_p)
8892 md_number_to_chars (buf, value, 8);
8893 break;
8894
8895 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8896 /* We claim that these fixups have been processed here, even if
8897 in fact we generate an error because we do not have a reloc
8898 for them, so tc_gen_reloc() will reject them. */
8899 fixP->fx_done = 1;
8900 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8901 {
8902 as_bad_where (fixP->fx_file, fixP->fx_line,
8903 _("undefined symbol %s used as an immediate value"),
8904 S_GET_NAME (fixP->fx_addsy));
8905 goto apply_fix_return;
8906 }
8907 fix_insn (fixP, flags, value);
8908 break;
8909
8910 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8911 if (fixP->fx_done || !seg->use_rela_p)
8912 {
8913 if (value & 3)
8914 as_bad_where (fixP->fx_file, fixP->fx_line,
8915 _("pc-relative load offset not word aligned"));
8916 if (signed_overflow (value, 21))
8917 as_bad_where (fixP->fx_file, fixP->fx_line,
8918 _("pc-relative load offset out of range"));
8919 insn = get_aarch64_insn (buf);
8920 insn |= encode_ld_lit_ofs_19 (value >> 2);
8921 put_aarch64_insn (buf, insn);
8922 }
8923 break;
8924
8925 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8926 if (fixP->fx_done || !seg->use_rela_p)
8927 {
8928 if (signed_overflow (value, 21))
8929 as_bad_where (fixP->fx_file, fixP->fx_line,
8930 _("pc-relative address offset out of range"));
8931 insn = get_aarch64_insn (buf);
8932 insn |= encode_adr_imm (value);
8933 put_aarch64_insn (buf, insn);
8934 }
8935 break;
8936
8937 case BFD_RELOC_AARCH64_BRANCH19:
8938 if (fixP->fx_done || !seg->use_rela_p)
8939 {
8940 if (value & 3)
8941 as_bad_where (fixP->fx_file, fixP->fx_line,
8942 _("conditional branch target not word aligned"));
8943 if (signed_overflow (value, 21))
8944 as_bad_where (fixP->fx_file, fixP->fx_line,
8945 _("conditional branch out of range"));
8946 insn = get_aarch64_insn (buf);
8947 insn |= encode_cond_branch_ofs_19 (value >> 2);
8948 put_aarch64_insn (buf, insn);
8949 }
8950 break;
8951
8952 case BFD_RELOC_AARCH64_TSTBR14:
8953 if (fixP->fx_done || !seg->use_rela_p)
8954 {
8955 if (value & 3)
8956 as_bad_where (fixP->fx_file, fixP->fx_line,
8957 _("conditional branch target not word aligned"));
8958 if (signed_overflow (value, 16))
8959 as_bad_where (fixP->fx_file, fixP->fx_line,
8960 _("conditional branch out of range"));
8961 insn = get_aarch64_insn (buf);
8962 insn |= encode_tst_branch_ofs_14 (value >> 2);
8963 put_aarch64_insn (buf, insn);
8964 }
8965 break;
8966
8967 case BFD_RELOC_AARCH64_CALL26:
8968 case BFD_RELOC_AARCH64_JUMP26:
8969 if (fixP->fx_done || !seg->use_rela_p)
8970 {
8971 if (value & 3)
8972 as_bad_where (fixP->fx_file, fixP->fx_line,
8973 _("branch target not word aligned"));
8974 if (signed_overflow (value, 28))
8975 as_bad_where (fixP->fx_file, fixP->fx_line,
8976 _("branch out of range"));
8977 insn = get_aarch64_insn (buf);
8978 insn |= encode_branch_ofs_26 (value >> 2);
8979 put_aarch64_insn (buf, insn);
8980 }
8981 break;
8982
8983 case BFD_RELOC_AARCH64_MOVW_G0:
8984 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8985 case BFD_RELOC_AARCH64_MOVW_G0_S:
8986 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8987 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8988 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8989 scale = 0;
8990 goto movw_common;
8991 case BFD_RELOC_AARCH64_MOVW_G1:
8992 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8993 case BFD_RELOC_AARCH64_MOVW_G1_S:
8994 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8995 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8996 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8997 scale = 16;
8998 goto movw_common;
8999 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9000 scale = 0;
9001 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9002 /* Should always be exported to object file, see
9003 aarch64_force_relocation(). */
9004 gas_assert (!fixP->fx_done);
9005 gas_assert (seg->use_rela_p);
9006 goto movw_common;
9007 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9008 scale = 16;
9009 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9010 /* Should always be exported to object file, see
9011 aarch64_force_relocation(). */
9012 gas_assert (!fixP->fx_done);
9013 gas_assert (seg->use_rela_p);
9014 goto movw_common;
9015 case BFD_RELOC_AARCH64_MOVW_G2:
9016 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9017 case BFD_RELOC_AARCH64_MOVW_G2_S:
9018 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9019 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9020 scale = 32;
9021 goto movw_common;
9022 case BFD_RELOC_AARCH64_MOVW_G3:
9023 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9024 scale = 48;
9025 movw_common:
9026 if (fixP->fx_done || !seg->use_rela_p)
9027 {
9028 insn = get_aarch64_insn (buf);
9029
9030 if (!fixP->fx_done)
9031 {
9032 /* REL signed addend must fit in 16 bits */
9033 if (signed_overflow (value, 16))
9034 as_bad_where (fixP->fx_file, fixP->fx_line,
9035 _("offset out of range"));
9036 }
9037 else
9038 {
9039 /* Check for overflow and scale. */
9040 switch (fixP->fx_r_type)
9041 {
9042 case BFD_RELOC_AARCH64_MOVW_G0:
9043 case BFD_RELOC_AARCH64_MOVW_G1:
9044 case BFD_RELOC_AARCH64_MOVW_G2:
9045 case BFD_RELOC_AARCH64_MOVW_G3:
9046 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9047 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9048 if (unsigned_overflow (value, scale + 16))
9049 as_bad_where (fixP->fx_file, fixP->fx_line,
9050 _("unsigned value out of range"));
9051 break;
9052 case BFD_RELOC_AARCH64_MOVW_G0_S:
9053 case BFD_RELOC_AARCH64_MOVW_G1_S:
9054 case BFD_RELOC_AARCH64_MOVW_G2_S:
9055 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9056 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9057 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9058 /* NOTE: We can only come here with movz or movn. */
9059 if (signed_overflow (value, scale + 16))
9060 as_bad_where (fixP->fx_file, fixP->fx_line,
9061 _("signed value out of range"));
9062 if (value < 0)
9063 {
9064 /* Force use of MOVN. */
9065 value = ~value;
9066 insn = reencode_movzn_to_movn (insn);
9067 }
9068 else
9069 {
9070 /* Force use of MOVZ. */
9071 insn = reencode_movzn_to_movz (insn);
9072 }
9073 break;
9074 default:
9075 /* Unchecked relocations. */
9076 break;
9077 }
9078 value >>= scale;
9079 }
9080
9081 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9082 insn |= encode_movw_imm (value & 0xffff);
9083
9084 put_aarch64_insn (buf, insn);
9085 }
9086 break;
9087
9088 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9089 fixP->fx_r_type = (ilp32_p
9090 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9091 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9092 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9093 /* Should always be exported to object file, see
9094 aarch64_force_relocation(). */
9095 gas_assert (!fixP->fx_done);
9096 gas_assert (seg->use_rela_p);
9097 break;
9098
9099 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9100 fixP->fx_r_type = (ilp32_p
9101 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9102 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9103 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9104 /* Should always be exported to object file, see
9105 aarch64_force_relocation(). */
9106 gas_assert (!fixP->fx_done);
9107 gas_assert (seg->use_rela_p);
9108 break;
9109
9110 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9111 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9112 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9113 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9114 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9115 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9116 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9117 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9118 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9119 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9120 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9121 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9122 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9123 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9124 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9125 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9126 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9127 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9128 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9129 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9130 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9131 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9132 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9133 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9134 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9135 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9136 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9137 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9138 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9139 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9140 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9141 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9142 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9143 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9144 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9145 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9146 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9147 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9148 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9149 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9150 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9151 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9152 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9153 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9154 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9155 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9156 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9157 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9158 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9159 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9160 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9161 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9162 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9163 /* Should always be exported to object file, see
9164 aarch64_force_relocation(). */
9165 gas_assert (!fixP->fx_done);
9166 gas_assert (seg->use_rela_p);
9167 break;
9168
9169 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9170 /* Should always be exported to object file, see
9171 aarch64_force_relocation(). */
9172 fixP->fx_r_type = (ilp32_p
9173 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9174 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9175 gas_assert (!fixP->fx_done);
9176 gas_assert (seg->use_rela_p);
9177 break;
9178
9179 case BFD_RELOC_AARCH64_ADD_LO12:
9180 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9181 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9182 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9183 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9184 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9185 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9186 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9187 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9188 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9189 case BFD_RELOC_AARCH64_LDST128_LO12:
9190 case BFD_RELOC_AARCH64_LDST16_LO12:
9191 case BFD_RELOC_AARCH64_LDST32_LO12:
9192 case BFD_RELOC_AARCH64_LDST64_LO12:
9193 case BFD_RELOC_AARCH64_LDST8_LO12:
9194 /* Should always be exported to object file, see
9195 aarch64_force_relocation(). */
9196 gas_assert (!fixP->fx_done);
9197 gas_assert (seg->use_rela_p);
9198 break;
9199
9200 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9201 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9202 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9203 break;
9204
9205 case BFD_RELOC_UNUSED:
9206 /* An error will already have been reported. */
9207 break;
9208
9209 default:
9210 as_bad_where (fixP->fx_file, fixP->fx_line,
9211 _("unexpected %s fixup"),
9212 bfd_get_reloc_code_name (fixP->fx_r_type));
9213 break;
9214 }
9215
9216 apply_fix_return:
9217 /* Free the allocated the struct aarch64_inst.
9218 N.B. currently there are very limited number of fix-up types actually use
9219 this field, so the impact on the performance should be minimal . */
9220 free (fixP->tc_fix_data.inst);
9221
9222 return;
9223 }
9224
9225 /* Translate internal representation of relocation info to BFD target
9226 format. */
9227
9228 arelent *
9229 tc_gen_reloc (asection * section, fixS * fixp)
9230 {
9231 arelent *reloc;
9232 bfd_reloc_code_real_type code;
9233
9234 reloc = XNEW (arelent);
9235
9236 reloc->sym_ptr_ptr = XNEW (asymbol *);
9237 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9238 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9239
9240 if (fixp->fx_pcrel)
9241 {
9242 if (section->use_rela_p)
9243 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9244 else
9245 fixp->fx_offset = reloc->address;
9246 }
9247 reloc->addend = fixp->fx_offset;
9248
9249 code = fixp->fx_r_type;
9250 switch (code)
9251 {
9252 case BFD_RELOC_16:
9253 if (fixp->fx_pcrel)
9254 code = BFD_RELOC_16_PCREL;
9255 break;
9256
9257 case BFD_RELOC_32:
9258 if (fixp->fx_pcrel)
9259 code = BFD_RELOC_32_PCREL;
9260 break;
9261
9262 case BFD_RELOC_64:
9263 if (fixp->fx_pcrel)
9264 code = BFD_RELOC_64_PCREL;
9265 break;
9266
9267 default:
9268 break;
9269 }
9270
9271 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9272 if (reloc->howto == NULL)
9273 {
9274 as_bad_where (fixp->fx_file, fixp->fx_line,
9275 _
9276 ("cannot represent %s relocation in this object file format"),
9277 bfd_get_reloc_code_name (code));
9278 return NULL;
9279 }
9280
9281 return reloc;
9282 }
9283
9284 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9285
9286 void
9287 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9288 {
9289 bfd_reloc_code_real_type type;
9290 int pcrel = 0;
9291
9292 /* Pick a reloc.
9293 FIXME: @@ Should look at CPU word size. */
9294 switch (size)
9295 {
9296 case 1:
9297 type = BFD_RELOC_8;
9298 break;
9299 case 2:
9300 type = BFD_RELOC_16;
9301 break;
9302 case 4:
9303 type = BFD_RELOC_32;
9304 break;
9305 case 8:
9306 type = BFD_RELOC_64;
9307 break;
9308 default:
9309 as_bad (_("cannot do %u-byte relocation"), size);
9310 type = BFD_RELOC_UNUSED;
9311 break;
9312 }
9313
9314 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9315 }
9316
9317 /* Implement md_after_parse_args. This is the earliest time we need to decide
9318 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9319
9320 void
9321 aarch64_after_parse_args (void)
9322 {
9323 if (aarch64_abi != AARCH64_ABI_NONE)
9324 return;
9325
9326 #ifdef OBJ_ELF
9327 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9328 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9329 aarch64_abi = AARCH64_ABI_ILP32;
9330 else
9331 aarch64_abi = AARCH64_ABI_LP64;
9332 #else
9333 aarch64_abi = AARCH64_ABI_LLP64;
9334 #endif
9335 }
9336
9337 #ifdef OBJ_ELF
9338 const char *
9339 elf64_aarch64_target_format (void)
9340 {
9341 #ifdef TE_CLOUDABI
9342 /* FIXME: What to do for ilp32_p ? */
9343 if (target_big_endian)
9344 return "elf64-bigaarch64-cloudabi";
9345 else
9346 return "elf64-littleaarch64-cloudabi";
9347 #else
9348 if (target_big_endian)
9349 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9350 else
9351 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9352 #endif
9353 }
9354
9355 void
9356 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9357 {
9358 elf_frob_symbol (symp, puntp);
9359 }
9360 #elif defined OBJ_COFF
9361 const char *
9362 coff_aarch64_target_format (void)
9363 {
9364 return "pe-aarch64-little";
9365 }
9366 #endif
9367
9368 /* MD interface: Finalization. */
9369
9370 /* A good place to do this, although this was probably not intended
9371 for this kind of use. We need to dump the literal pool before
9372 references are made to a null symbol pointer. */
9373
9374 void
9375 aarch64_cleanup (void)
9376 {
9377 literal_pool *pool;
9378
9379 for (pool = list_of_pools; pool; pool = pool->next)
9380 {
9381 /* Put it at the end of the relevant section. */
9382 subseg_set (pool->section, pool->sub_section);
9383 s_ltorg (0);
9384 }
9385 }
9386
9387 #ifdef OBJ_ELF
9388 /* Remove any excess mapping symbols generated for alignment frags in
9389 SEC. We may have created a mapping symbol before a zero byte
9390 alignment; remove it if there's a mapping symbol after the
9391 alignment. */
9392 static void
9393 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9394 void *dummy ATTRIBUTE_UNUSED)
9395 {
9396 segment_info_type *seginfo = seg_info (sec);
9397 fragS *fragp;
9398
9399 if (seginfo == NULL || seginfo->frchainP == NULL)
9400 return;
9401
9402 for (fragp = seginfo->frchainP->frch_root;
9403 fragp != NULL; fragp = fragp->fr_next)
9404 {
9405 symbolS *sym = fragp->tc_frag_data.last_map;
9406 fragS *next = fragp->fr_next;
9407
9408 /* Variable-sized frags have been converted to fixed size by
9409 this point. But if this was variable-sized to start with,
9410 there will be a fixed-size frag after it. So don't handle
9411 next == NULL. */
9412 if (sym == NULL || next == NULL)
9413 continue;
9414
9415 if (S_GET_VALUE (sym) < next->fr_address)
9416 /* Not at the end of this frag. */
9417 continue;
9418 know (S_GET_VALUE (sym) == next->fr_address);
9419
9420 do
9421 {
9422 if (next->tc_frag_data.first_map != NULL)
9423 {
9424 /* Next frag starts with a mapping symbol. Discard this
9425 one. */
9426 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9427 break;
9428 }
9429
9430 if (next->fr_next == NULL)
9431 {
9432 /* This mapping symbol is at the end of the section. Discard
9433 it. */
9434 know (next->fr_fix == 0 && next->fr_var == 0);
9435 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9436 break;
9437 }
9438
9439 /* As long as we have empty frags without any mapping symbols,
9440 keep looking. */
9441 /* If the next frag is non-empty and does not start with a
9442 mapping symbol, then this mapping symbol is required. */
9443 if (next->fr_address != next->fr_next->fr_address)
9444 break;
9445
9446 next = next->fr_next;
9447 }
9448 while (next != NULL);
9449 }
9450 }
9451 #endif
9452
9453 /* Adjust the symbol table. */
9454
9455 void
9456 aarch64_adjust_symtab (void)
9457 {
9458 #ifdef OBJ_ELF
9459 /* Remove any overlapping mapping symbols generated by alignment frags. */
9460 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9461 /* Now do generic ELF adjustments. */
9462 elf_adjust_symtab ();
9463 #endif
9464 }
9465
9466 static void
9467 checked_hash_insert (htab_t table, const char *key, void *value)
9468 {
9469 str_hash_insert (table, key, value, 0);
9470 }
9471
9472 static void
9473 sysreg_hash_insert (htab_t table, const char *key, void *value)
9474 {
9475 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9476 checked_hash_insert (table, key, value);
9477 }
9478
9479 static void
9480 fill_instruction_hash_table (void)
9481 {
9482 const aarch64_opcode *opcode = aarch64_opcode_table;
9483
9484 while (opcode->name != NULL)
9485 {
9486 templates *templ, *new_templ;
9487 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9488
9489 new_templ = XNEW (templates);
9490 new_templ->opcode = opcode;
9491 new_templ->next = NULL;
9492
9493 if (!templ)
9494 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9495 else
9496 {
9497 new_templ->next = templ->next;
9498 templ->next = new_templ;
9499 }
9500 ++opcode;
9501 }
9502 }
9503
9504 static inline void
9505 convert_to_upper (char *dst, const char *src, size_t num)
9506 {
9507 unsigned int i;
9508 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9509 *dst = TOUPPER (*src);
9510 *dst = '\0';
9511 }
9512
9513 /* Assume STR point to a lower-case string, allocate, convert and return
9514 the corresponding upper-case string. */
9515 static inline const char*
9516 get_upper_str (const char *str)
9517 {
9518 char *ret;
9519 size_t len = strlen (str);
9520 ret = XNEWVEC (char, len + 1);
9521 convert_to_upper (ret, str, len);
9522 return ret;
9523 }
9524
9525 /* MD interface: Initialization. */
9526
9527 void
9528 md_begin (void)
9529 {
9530 unsigned mach;
9531 unsigned int i;
9532
9533 aarch64_ops_hsh = str_htab_create ();
9534 aarch64_cond_hsh = str_htab_create ();
9535 aarch64_shift_hsh = str_htab_create ();
9536 aarch64_sys_regs_hsh = str_htab_create ();
9537 aarch64_pstatefield_hsh = str_htab_create ();
9538 aarch64_sys_regs_ic_hsh = str_htab_create ();
9539 aarch64_sys_regs_dc_hsh = str_htab_create ();
9540 aarch64_sys_regs_at_hsh = str_htab_create ();
9541 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9542 aarch64_sys_regs_sr_hsh = str_htab_create ();
9543 aarch64_reg_hsh = str_htab_create ();
9544 aarch64_barrier_opt_hsh = str_htab_create ();
9545 aarch64_nzcv_hsh = str_htab_create ();
9546 aarch64_pldop_hsh = str_htab_create ();
9547 aarch64_hint_opt_hsh = str_htab_create ();
9548
9549 fill_instruction_hash_table ();
9550
9551 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9552 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9553 (void *) (aarch64_sys_regs + i));
9554
9555 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9556 sysreg_hash_insert (aarch64_pstatefield_hsh,
9557 aarch64_pstatefields[i].name,
9558 (void *) (aarch64_pstatefields + i));
9559
9560 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9561 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9562 aarch64_sys_regs_ic[i].name,
9563 (void *) (aarch64_sys_regs_ic + i));
9564
9565 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9566 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9567 aarch64_sys_regs_dc[i].name,
9568 (void *) (aarch64_sys_regs_dc + i));
9569
9570 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9571 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9572 aarch64_sys_regs_at[i].name,
9573 (void *) (aarch64_sys_regs_at + i));
9574
9575 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9576 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9577 aarch64_sys_regs_tlbi[i].name,
9578 (void *) (aarch64_sys_regs_tlbi + i));
9579
9580 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9581 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9582 aarch64_sys_regs_sr[i].name,
9583 (void *) (aarch64_sys_regs_sr + i));
9584
9585 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9586 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9587 (void *) (reg_names + i));
9588
9589 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9590 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9591 (void *) (nzcv_names + i));
9592
9593 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9594 {
9595 const char *name = aarch64_operand_modifiers[i].name;
9596 checked_hash_insert (aarch64_shift_hsh, name,
9597 (void *) (aarch64_operand_modifiers + i));
9598 /* Also hash the name in the upper case. */
9599 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9600 (void *) (aarch64_operand_modifiers + i));
9601 }
9602
9603 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9604 {
9605 unsigned int j;
9606 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9607 the same condition code. */
9608 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9609 {
9610 const char *name = aarch64_conds[i].names[j];
9611 if (name == NULL)
9612 break;
9613 checked_hash_insert (aarch64_cond_hsh, name,
9614 (void *) (aarch64_conds + i));
9615 /* Also hash the name in the upper case. */
9616 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9617 (void *) (aarch64_conds + i));
9618 }
9619 }
9620
9621 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9622 {
9623 const char *name = aarch64_barrier_options[i].name;
9624 /* Skip xx00 - the unallocated values of option. */
9625 if ((i & 0x3) == 0)
9626 continue;
9627 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9628 (void *) (aarch64_barrier_options + i));
9629 /* Also hash the name in the upper case. */
9630 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9631 (void *) (aarch64_barrier_options + i));
9632 }
9633
9634 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9635 {
9636 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9637 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9638 (void *) (aarch64_barrier_dsb_nxs_options + i));
9639 /* Also hash the name in the upper case. */
9640 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9641 (void *) (aarch64_barrier_dsb_nxs_options + i));
9642 }
9643
9644 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9645 {
9646 const char* name = aarch64_prfops[i].name;
9647 /* Skip the unallocated hint encodings. */
9648 if (name == NULL)
9649 continue;
9650 checked_hash_insert (aarch64_pldop_hsh, name,
9651 (void *) (aarch64_prfops + i));
9652 /* Also hash the name in the upper case. */
9653 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9654 (void *) (aarch64_prfops + i));
9655 }
9656
9657 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9658 {
9659 const char* name = aarch64_hint_options[i].name;
9660 const char* upper_name = get_upper_str(name);
9661
9662 checked_hash_insert (aarch64_hint_opt_hsh, name,
9663 (void *) (aarch64_hint_options + i));
9664
9665 /* Also hash the name in the upper case if not the same. */
9666 if (strcmp (name, upper_name) != 0)
9667 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9668 (void *) (aarch64_hint_options + i));
9669 }
9670
9671 /* Set the cpu variant based on the command-line options. */
9672 if (!mcpu_cpu_opt)
9673 mcpu_cpu_opt = march_cpu_opt;
9674
9675 if (!mcpu_cpu_opt)
9676 mcpu_cpu_opt = &cpu_default;
9677
9678 cpu_variant = *mcpu_cpu_opt;
9679
9680 /* Record the CPU type. */
9681 if(ilp32_p)
9682 mach = bfd_mach_aarch64_ilp32;
9683 else if (llp64_p)
9684 mach = bfd_mach_aarch64_llp64;
9685 else
9686 mach = bfd_mach_aarch64;
9687
9688 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9689 }
9690
9691 /* Command line processing. */
9692
9693 const char *md_shortopts = "m:";
9694
9695 #ifdef AARCH64_BI_ENDIAN
9696 #define OPTION_EB (OPTION_MD_BASE + 0)
9697 #define OPTION_EL (OPTION_MD_BASE + 1)
9698 #else
9699 #if TARGET_BYTES_BIG_ENDIAN
9700 #define OPTION_EB (OPTION_MD_BASE + 0)
9701 #else
9702 #define OPTION_EL (OPTION_MD_BASE + 1)
9703 #endif
9704 #endif
9705
9706 struct option md_longopts[] = {
9707 #ifdef OPTION_EB
9708 {"EB", no_argument, NULL, OPTION_EB},
9709 #endif
9710 #ifdef OPTION_EL
9711 {"EL", no_argument, NULL, OPTION_EL},
9712 #endif
9713 {NULL, no_argument, NULL, 0}
9714 };
9715
9716 size_t md_longopts_size = sizeof (md_longopts);
9717
9718 struct aarch64_option_table
9719 {
9720 const char *option; /* Option name to match. */
9721 const char *help; /* Help information. */
9722 int *var; /* Variable to change. */
9723 int value; /* What to change it to. */
9724 char *deprecated; /* If non-null, print this message. */
9725 };
9726
9727 static struct aarch64_option_table aarch64_opts[] = {
9728 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9729 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9730 NULL},
9731 #ifdef DEBUG_AARCH64
9732 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9733 #endif /* DEBUG_AARCH64 */
9734 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9735 NULL},
9736 {"mno-verbose-error", N_("do not output verbose error messages"),
9737 &verbose_error_p, 0, NULL},
9738 {NULL, NULL, NULL, 0, NULL}
9739 };
9740
9741 struct aarch64_cpu_option_table
9742 {
9743 const char *name;
9744 const aarch64_feature_set value;
9745 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9746 case. */
9747 const char *canonical_name;
9748 };
9749
9750 /* This list should, at a minimum, contain all the cpu names
9751 recognized by GCC. */
9752 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9753 {"all", AARCH64_ANY, NULL},
9754 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9755 AARCH64_FEATURE_CRC), "Cortex-A34"},
9756 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9757 AARCH64_FEATURE_CRC), "Cortex-A35"},
9758 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9759 AARCH64_FEATURE_CRC), "Cortex-A53"},
9760 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9761 AARCH64_FEATURE_CRC), "Cortex-A57"},
9762 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9763 AARCH64_FEATURE_CRC), "Cortex-A72"},
9764 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9765 AARCH64_FEATURE_CRC), "Cortex-A73"},
9766 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9767 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9768 "Cortex-A55"},
9769 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9770 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9771 "Cortex-A75"},
9772 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9773 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9774 "Cortex-A76"},
9775 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9776 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9777 | AARCH64_FEATURE_DOTPROD
9778 | AARCH64_FEATURE_SSBS),
9779 "Cortex-A76AE"},
9780 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9781 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9782 | AARCH64_FEATURE_DOTPROD
9783 | AARCH64_FEATURE_SSBS),
9784 "Cortex-A77"},
9785 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9786 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9787 | AARCH64_FEATURE_DOTPROD
9788 | AARCH64_FEATURE_SSBS),
9789 "Cortex-A65"},
9790 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9791 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9792 | AARCH64_FEATURE_DOTPROD
9793 | AARCH64_FEATURE_SSBS),
9794 "Cortex-A65AE"},
9795 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9796 AARCH64_FEATURE_F16
9797 | AARCH64_FEATURE_RCPC
9798 | AARCH64_FEATURE_DOTPROD
9799 | AARCH64_FEATURE_SSBS
9800 | AARCH64_FEATURE_PROFILE),
9801 "Cortex-A78"},
9802 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9803 AARCH64_FEATURE_F16
9804 | AARCH64_FEATURE_RCPC
9805 | AARCH64_FEATURE_DOTPROD
9806 | AARCH64_FEATURE_SSBS
9807 | AARCH64_FEATURE_PROFILE),
9808 "Cortex-A78AE"},
9809 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9810 AARCH64_FEATURE_DOTPROD
9811 | AARCH64_FEATURE_F16
9812 | AARCH64_FEATURE_FLAGM
9813 | AARCH64_FEATURE_PAC
9814 | AARCH64_FEATURE_PROFILE
9815 | AARCH64_FEATURE_RCPC
9816 | AARCH64_FEATURE_SSBS),
9817 "Cortex-A78C"},
9818 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9819 AARCH64_FEATURE_BFLOAT16
9820 | AARCH64_FEATURE_I8MM
9821 | AARCH64_FEATURE_MEMTAG
9822 | AARCH64_FEATURE_SVE2_BITPERM),
9823 "Cortex-A510"},
9824 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9825 AARCH64_FEATURE_BFLOAT16
9826 | AARCH64_FEATURE_I8MM
9827 | AARCH64_FEATURE_MEMTAG
9828 | AARCH64_FEATURE_SVE2_BITPERM),
9829 "Cortex-A710"},
9830 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9831 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9832 | AARCH64_FEATURE_DOTPROD
9833 | AARCH64_FEATURE_PROFILE),
9834 "Ares"},
9835 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9836 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9837 "Samsung Exynos M1"},
9838 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9839 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9840 | AARCH64_FEATURE_RDMA),
9841 "Qualcomm Falkor"},
9842 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9843 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9844 | AARCH64_FEATURE_DOTPROD
9845 | AARCH64_FEATURE_SSBS),
9846 "Neoverse E1"},
9847 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9848 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9849 | AARCH64_FEATURE_DOTPROD
9850 | AARCH64_FEATURE_PROFILE),
9851 "Neoverse N1"},
9852 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9853 AARCH64_FEATURE_BFLOAT16
9854 | AARCH64_FEATURE_I8MM
9855 | AARCH64_FEATURE_F16
9856 | AARCH64_FEATURE_SVE
9857 | AARCH64_FEATURE_SVE2
9858 | AARCH64_FEATURE_SVE2_BITPERM
9859 | AARCH64_FEATURE_MEMTAG
9860 | AARCH64_FEATURE_RNG),
9861 "Neoverse N2"},
9862 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9863 AARCH64_FEATURE_PROFILE
9864 | AARCH64_FEATURE_CVADP
9865 | AARCH64_FEATURE_SVE
9866 | AARCH64_FEATURE_SSBS
9867 | AARCH64_FEATURE_RNG
9868 | AARCH64_FEATURE_F16
9869 | AARCH64_FEATURE_BFLOAT16
9870 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9871 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9872 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9873 | AARCH64_FEATURE_RDMA),
9874 "Qualcomm QDF24XX"},
9875 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9876 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9877 "Qualcomm Saphira"},
9878 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9879 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9880 "Cavium ThunderX"},
9881 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9882 AARCH64_FEATURE_CRYPTO),
9883 "Broadcom Vulcan"},
9884 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9885 in earlier releases and is superseded by 'xgene1' in all
9886 tools. */
9887 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9888 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9889 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9890 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9891 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9892 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9893 AARCH64_FEATURE_F16
9894 | AARCH64_FEATURE_RCPC
9895 | AARCH64_FEATURE_DOTPROD
9896 | AARCH64_FEATURE_SSBS
9897 | AARCH64_FEATURE_PROFILE),
9898 "Cortex-X1"},
9899 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9900 AARCH64_FEATURE_BFLOAT16
9901 | AARCH64_FEATURE_I8MM
9902 | AARCH64_FEATURE_MEMTAG
9903 | AARCH64_FEATURE_SVE2_BITPERM),
9904 "Cortex-X2"},
9905 {"generic", AARCH64_ARCH_V8, NULL},
9906
9907 {NULL, AARCH64_ARCH_NONE, NULL}
9908 };
9909
9910 struct aarch64_arch_option_table
9911 {
9912 const char *name;
9913 const aarch64_feature_set value;
9914 };
9915
9916 /* This list should, at a minimum, contain all the architecture names
9917 recognized by GCC. */
9918 static const struct aarch64_arch_option_table aarch64_archs[] = {
9919 {"all", AARCH64_ANY},
9920 {"armv8-a", AARCH64_ARCH_V8},
9921 {"armv8.1-a", AARCH64_ARCH_V8_1},
9922 {"armv8.2-a", AARCH64_ARCH_V8_2},
9923 {"armv8.3-a", AARCH64_ARCH_V8_3},
9924 {"armv8.4-a", AARCH64_ARCH_V8_4},
9925 {"armv8.5-a", AARCH64_ARCH_V8_5},
9926 {"armv8.6-a", AARCH64_ARCH_V8_6},
9927 {"armv8.7-a", AARCH64_ARCH_V8_7},
9928 {"armv8.8-a", AARCH64_ARCH_V8_8},
9929 {"armv8-r", AARCH64_ARCH_V8_R},
9930 {"armv9-a", AARCH64_ARCH_V9},
9931 {"armv9.1-a", AARCH64_ARCH_V9_1},
9932 {"armv9.2-a", AARCH64_ARCH_V9_2},
9933 {"armv9.3-a", AARCH64_ARCH_V9_3},
9934 {NULL, AARCH64_ARCH_NONE}
9935 };
9936
9937 /* ISA extensions. */
9938 struct aarch64_option_cpu_value_table
9939 {
9940 const char *name;
9941 const aarch64_feature_set value;
9942 const aarch64_feature_set require; /* Feature dependencies. */
9943 };
9944
9945 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9946 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9947 AARCH64_ARCH_NONE},
9948 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9949 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9950 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9951 AARCH64_ARCH_NONE},
9952 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9953 AARCH64_ARCH_NONE},
9954 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9955 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9956 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9957 AARCH64_ARCH_NONE},
9958 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9959 AARCH64_ARCH_NONE},
9960 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9961 AARCH64_ARCH_NONE},
9962 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9963 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9964 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9965 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9966 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9967 AARCH64_FEATURE (AARCH64_FEATURE_FP
9968 | AARCH64_FEATURE_F16, 0)},
9969 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9970 AARCH64_ARCH_NONE},
9971 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9972 AARCH64_FEATURE (AARCH64_FEATURE_F16
9973 | AARCH64_FEATURE_SIMD
9974 | AARCH64_FEATURE_COMPNUM, 0)},
9975 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9976 AARCH64_ARCH_NONE},
9977 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9978 AARCH64_FEATURE (AARCH64_FEATURE_F16
9979 | AARCH64_FEATURE_SIMD, 0)},
9980 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9981 AARCH64_ARCH_NONE},
9982 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9983 AARCH64_ARCH_NONE},
9984 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9985 AARCH64_ARCH_NONE},
9986 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9987 AARCH64_ARCH_NONE},
9988 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9989 AARCH64_ARCH_NONE},
9990 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9991 AARCH64_ARCH_NONE},
9992 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9993 AARCH64_ARCH_NONE},
9994 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9995 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9996 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9997 AARCH64_ARCH_NONE},
9998 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9999 AARCH64_ARCH_NONE},
10000 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10001 AARCH64_ARCH_NONE},
10002 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10003 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10004 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10005 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10006 | AARCH64_FEATURE_SM4, 0)},
10007 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10008 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10009 | AARCH64_FEATURE_AES, 0)},
10010 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10011 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10012 | AARCH64_FEATURE_SHA3, 0)},
10013 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10014 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10015 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10016 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10017 | AARCH64_FEATURE_BFLOAT16, 0)},
10018 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
10019 AARCH64_FEATURE (AARCH64_FEATURE_SME
10020 | AARCH64_FEATURE_SVE2
10021 | AARCH64_FEATURE_BFLOAT16, 0)},
10022 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
10023 AARCH64_FEATURE (AARCH64_FEATURE_SME
10024 | AARCH64_FEATURE_SVE2
10025 | AARCH64_FEATURE_BFLOAT16, 0)},
10026 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10027 AARCH64_ARCH_NONE},
10028 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10029 AARCH64_ARCH_NONE},
10030 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10031 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10032 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10033 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10034 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10035 AARCH64_ARCH_NONE},
10036 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10037 AARCH64_ARCH_NONE},
10038 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10039 AARCH64_ARCH_NONE},
10040 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10041 AARCH64_ARCH_NONE},
10042 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10043 AARCH64_ARCH_NONE},
10044 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10045 };
10046
10047 struct aarch64_long_option_table
10048 {
10049 const char *option; /* Substring to match. */
10050 const char *help; /* Help information. */
10051 int (*func) (const char *subopt); /* Function to decode sub-option. */
10052 char *deprecated; /* If non-null, print this message. */
10053 };
10054
10055 /* Transitive closure of features depending on set. */
10056 static aarch64_feature_set
10057 aarch64_feature_disable_set (aarch64_feature_set set)
10058 {
10059 const struct aarch64_option_cpu_value_table *opt;
10060 aarch64_feature_set prev = 0;
10061
10062 while (prev != set) {
10063 prev = set;
10064 for (opt = aarch64_features; opt->name != NULL; opt++)
10065 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10066 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10067 }
10068 return set;
10069 }
10070
10071 /* Transitive closure of dependencies of set. */
10072 static aarch64_feature_set
10073 aarch64_feature_enable_set (aarch64_feature_set set)
10074 {
10075 const struct aarch64_option_cpu_value_table *opt;
10076 aarch64_feature_set prev = 0;
10077
10078 while (prev != set) {
10079 prev = set;
10080 for (opt = aarch64_features; opt->name != NULL; opt++)
10081 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10082 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10083 }
10084 return set;
10085 }
10086
10087 static int
10088 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10089 bool ext_only)
10090 {
10091 /* We insist on extensions being added before being removed. We achieve
10092 this by using the ADDING_VALUE variable to indicate whether we are
10093 adding an extension (1) or removing it (0) and only allowing it to
10094 change in the order -1 -> 1 -> 0. */
10095 int adding_value = -1;
10096 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10097
10098 /* Copy the feature set, so that we can modify it. */
10099 *ext_set = **opt_p;
10100 *opt_p = ext_set;
10101
10102 while (str != NULL && *str != 0)
10103 {
10104 const struct aarch64_option_cpu_value_table *opt;
10105 const char *ext = NULL;
10106 int optlen;
10107
10108 if (!ext_only)
10109 {
10110 if (*str != '+')
10111 {
10112 as_bad (_("invalid architectural extension"));
10113 return 0;
10114 }
10115
10116 ext = strchr (++str, '+');
10117 }
10118
10119 if (ext != NULL)
10120 optlen = ext - str;
10121 else
10122 optlen = strlen (str);
10123
10124 if (optlen >= 2 && startswith (str, "no"))
10125 {
10126 if (adding_value != 0)
10127 adding_value = 0;
10128 optlen -= 2;
10129 str += 2;
10130 }
10131 else if (optlen > 0)
10132 {
10133 if (adding_value == -1)
10134 adding_value = 1;
10135 else if (adding_value != 1)
10136 {
10137 as_bad (_("must specify extensions to add before specifying "
10138 "those to remove"));
10139 return false;
10140 }
10141 }
10142
10143 if (optlen == 0)
10144 {
10145 as_bad (_("missing architectural extension"));
10146 return 0;
10147 }
10148
10149 gas_assert (adding_value != -1);
10150
10151 for (opt = aarch64_features; opt->name != NULL; opt++)
10152 if (strncmp (opt->name, str, optlen) == 0)
10153 {
10154 aarch64_feature_set set;
10155
10156 /* Add or remove the extension. */
10157 if (adding_value)
10158 {
10159 set = aarch64_feature_enable_set (opt->value);
10160 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10161 }
10162 else
10163 {
10164 set = aarch64_feature_disable_set (opt->value);
10165 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10166 }
10167 break;
10168 }
10169
10170 if (opt->name == NULL)
10171 {
10172 as_bad (_("unknown architectural extension `%s'"), str);
10173 return 0;
10174 }
10175
10176 str = ext;
10177 };
10178
10179 return 1;
10180 }
10181
10182 static int
10183 aarch64_parse_cpu (const char *str)
10184 {
10185 const struct aarch64_cpu_option_table *opt;
10186 const char *ext = strchr (str, '+');
10187 size_t optlen;
10188
10189 if (ext != NULL)
10190 optlen = ext - str;
10191 else
10192 optlen = strlen (str);
10193
10194 if (optlen == 0)
10195 {
10196 as_bad (_("missing cpu name `%s'"), str);
10197 return 0;
10198 }
10199
10200 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10201 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10202 {
10203 mcpu_cpu_opt = &opt->value;
10204 if (ext != NULL)
10205 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10206
10207 return 1;
10208 }
10209
10210 as_bad (_("unknown cpu `%s'"), str);
10211 return 0;
10212 }
10213
10214 static int
10215 aarch64_parse_arch (const char *str)
10216 {
10217 const struct aarch64_arch_option_table *opt;
10218 const char *ext = strchr (str, '+');
10219 size_t optlen;
10220
10221 if (ext != NULL)
10222 optlen = ext - str;
10223 else
10224 optlen = strlen (str);
10225
10226 if (optlen == 0)
10227 {
10228 as_bad (_("missing architecture name `%s'"), str);
10229 return 0;
10230 }
10231
10232 for (opt = aarch64_archs; opt->name != NULL; opt++)
10233 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10234 {
10235 march_cpu_opt = &opt->value;
10236 if (ext != NULL)
10237 return aarch64_parse_features (ext, &march_cpu_opt, false);
10238
10239 return 1;
10240 }
10241
10242 as_bad (_("unknown architecture `%s'\n"), str);
10243 return 0;
10244 }
10245
10246 /* ABIs. */
10247 struct aarch64_option_abi_value_table
10248 {
10249 const char *name;
10250 enum aarch64_abi_type value;
10251 };
10252
10253 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10254 #ifdef OBJ_ELF
10255 {"ilp32", AARCH64_ABI_ILP32},
10256 {"lp64", AARCH64_ABI_LP64},
10257 #else
10258 {"llp64", AARCH64_ABI_LLP64},
10259 #endif
10260 };
10261
10262 static int
10263 aarch64_parse_abi (const char *str)
10264 {
10265 unsigned int i;
10266
10267 if (str[0] == '\0')
10268 {
10269 as_bad (_("missing abi name `%s'"), str);
10270 return 0;
10271 }
10272
10273 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10274 if (strcmp (str, aarch64_abis[i].name) == 0)
10275 {
10276 aarch64_abi = aarch64_abis[i].value;
10277 return 1;
10278 }
10279
10280 as_bad (_("unknown abi `%s'\n"), str);
10281 return 0;
10282 }
10283
10284 static struct aarch64_long_option_table aarch64_long_opts[] = {
10285 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10286 aarch64_parse_abi, NULL},
10287 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10288 aarch64_parse_cpu, NULL},
10289 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10290 aarch64_parse_arch, NULL},
10291 {NULL, NULL, 0, NULL}
10292 };
10293
10294 int
10295 md_parse_option (int c, const char *arg)
10296 {
10297 struct aarch64_option_table *opt;
10298 struct aarch64_long_option_table *lopt;
10299
10300 switch (c)
10301 {
10302 #ifdef OPTION_EB
10303 case OPTION_EB:
10304 target_big_endian = 1;
10305 break;
10306 #endif
10307
10308 #ifdef OPTION_EL
10309 case OPTION_EL:
10310 target_big_endian = 0;
10311 break;
10312 #endif
10313
10314 case 'a':
10315 /* Listing option. Just ignore these, we don't support additional
10316 ones. */
10317 return 0;
10318
10319 default:
10320 for (opt = aarch64_opts; opt->option != NULL; opt++)
10321 {
10322 if (c == opt->option[0]
10323 && ((arg == NULL && opt->option[1] == 0)
10324 || streq (arg, opt->option + 1)))
10325 {
10326 /* If the option is deprecated, tell the user. */
10327 if (opt->deprecated != NULL)
10328 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10329 arg ? arg : "", _(opt->deprecated));
10330
10331 if (opt->var != NULL)
10332 *opt->var = opt->value;
10333
10334 return 1;
10335 }
10336 }
10337
10338 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10339 {
10340 /* These options are expected to have an argument. */
10341 if (c == lopt->option[0]
10342 && arg != NULL
10343 && startswith (arg, lopt->option + 1))
10344 {
10345 /* If the option is deprecated, tell the user. */
10346 if (lopt->deprecated != NULL)
10347 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10348 _(lopt->deprecated));
10349
10350 /* Call the sup-option parser. */
10351 return lopt->func (arg + strlen (lopt->option) - 1);
10352 }
10353 }
10354
10355 return 0;
10356 }
10357
10358 return 1;
10359 }
10360
10361 void
10362 md_show_usage (FILE * fp)
10363 {
10364 struct aarch64_option_table *opt;
10365 struct aarch64_long_option_table *lopt;
10366
10367 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10368
10369 for (opt = aarch64_opts; opt->option != NULL; opt++)
10370 if (opt->help != NULL)
10371 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10372
10373 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10374 if (lopt->help != NULL)
10375 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10376
10377 #ifdef OPTION_EB
10378 fprintf (fp, _("\
10379 -EB assemble code for a big-endian cpu\n"));
10380 #endif
10381
10382 #ifdef OPTION_EL
10383 fprintf (fp, _("\
10384 -EL assemble code for a little-endian cpu\n"));
10385 #endif
10386 }
10387
10388 /* Parse a .cpu directive. */
10389
10390 static void
10391 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10392 {
10393 const struct aarch64_cpu_option_table *opt;
10394 char saved_char;
10395 char *name;
10396 char *ext;
10397 size_t optlen;
10398
10399 name = input_line_pointer;
10400 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10401 saved_char = *input_line_pointer;
10402 *input_line_pointer = 0;
10403
10404 ext = strchr (name, '+');
10405
10406 if (ext != NULL)
10407 optlen = ext - name;
10408 else
10409 optlen = strlen (name);
10410
10411 /* Skip the first "all" entry. */
10412 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10413 if (strlen (opt->name) == optlen
10414 && strncmp (name, opt->name, optlen) == 0)
10415 {
10416 mcpu_cpu_opt = &opt->value;
10417 if (ext != NULL)
10418 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10419 return;
10420
10421 cpu_variant = *mcpu_cpu_opt;
10422
10423 *input_line_pointer = saved_char;
10424 demand_empty_rest_of_line ();
10425 return;
10426 }
10427 as_bad (_("unknown cpu `%s'"), name);
10428 *input_line_pointer = saved_char;
10429 ignore_rest_of_line ();
10430 }
10431
10432
10433 /* Parse a .arch directive. */
10434
10435 static void
10436 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10437 {
10438 const struct aarch64_arch_option_table *opt;
10439 char saved_char;
10440 char *name;
10441 char *ext;
10442 size_t optlen;
10443
10444 name = input_line_pointer;
10445 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10446 saved_char = *input_line_pointer;
10447 *input_line_pointer = 0;
10448
10449 ext = strchr (name, '+');
10450
10451 if (ext != NULL)
10452 optlen = ext - name;
10453 else
10454 optlen = strlen (name);
10455
10456 /* Skip the first "all" entry. */
10457 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10458 if (strlen (opt->name) == optlen
10459 && strncmp (name, opt->name, optlen) == 0)
10460 {
10461 mcpu_cpu_opt = &opt->value;
10462 if (ext != NULL)
10463 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10464 return;
10465
10466 cpu_variant = *mcpu_cpu_opt;
10467
10468 *input_line_pointer = saved_char;
10469 demand_empty_rest_of_line ();
10470 return;
10471 }
10472
10473 as_bad (_("unknown architecture `%s'\n"), name);
10474 *input_line_pointer = saved_char;
10475 ignore_rest_of_line ();
10476 }
10477
10478 /* Parse a .arch_extension directive. */
10479
10480 static void
10481 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10482 {
10483 char saved_char;
10484 char *ext = input_line_pointer;
10485
10486 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10487 saved_char = *input_line_pointer;
10488 *input_line_pointer = 0;
10489
10490 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10491 return;
10492
10493 cpu_variant = *mcpu_cpu_opt;
10494
10495 *input_line_pointer = saved_char;
10496 demand_empty_rest_of_line ();
10497 }
10498
10499 /* Copy symbol information. */
10500
10501 void
10502 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10503 {
10504 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10505 }
10506
10507 #ifdef OBJ_ELF
10508 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10509 This is needed so AArch64 specific st_other values can be independently
10510 specified for an IFUNC resolver (that is called by the dynamic linker)
10511 and the symbol it resolves (aliased to the resolver). In particular,
10512 if a function symbol has special st_other value set via directives,
10513 then attaching an IFUNC resolver to that symbol should not override
10514 the st_other setting. Requiring the directive on the IFUNC resolver
10515 symbol would be unexpected and problematic in C code, where the two
10516 symbols appear as two independent function declarations. */
10517
10518 void
10519 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10520 {
10521 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10522 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10523 /* If size is unset, copy size from src. Because we don't track whether
10524 .size has been used, we can't differentiate .size dest, 0 from the case
10525 where dest's size is unset. */
10526 if (!destelf->size && S_GET_SIZE (dest) == 0)
10527 {
10528 if (srcelf->size)
10529 {
10530 destelf->size = XNEW (expressionS);
10531 *destelf->size = *srcelf->size;
10532 }
10533 S_SET_SIZE (dest, S_GET_SIZE (src));
10534 }
10535 }
10536 #endif