aarch64: Rework parse_typed_reg interface
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame stack trace info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* SME horizontal or vertical slice indicator, encoded in "V".
118 Values:
119 0 - Horizontal
120 1 - vertical
121 */
122 enum sme_hv_slice
123 {
124 HV_horizontal = 0,
125 HV_vertical = 1
126 };
127
128 /* Bits for DEFINED field in vector_type_el. */
129 #define NTA_HASTYPE 1
130 #define NTA_HASINDEX 2
131 #define NTA_HASVARWIDTH 4
132
133 struct vector_type_el
134 {
135 enum vector_el_type type;
136 unsigned char defined;
137 unsigned width;
138 int64_t index;
139 };
140
141 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
142
143 struct reloc
144 {
145 bfd_reloc_code_real_type type;
146 expressionS exp;
147 int pc_rel;
148 enum aarch64_opnd opnd;
149 uint32_t flags;
150 unsigned need_libopcodes_p : 1;
151 };
152
153 struct aarch64_instruction
154 {
155 /* libopcodes structure for instruction intermediate representation. */
156 aarch64_inst base;
157 /* Record assembly errors found during the parsing. */
158 aarch64_operand_error parsing_error;
159 /* The condition that appears in the assembly line. */
160 int cond;
161 /* Relocation information (including the GAS internal fixup). */
162 struct reloc reloc;
163 /* Need to generate an immediate in the literal pool. */
164 unsigned gen_lit_pool : 1;
165 };
166
167 typedef struct aarch64_instruction aarch64_instruction;
168
169 static aarch64_instruction inst;
170
171 static bool parse_operands (char *, const aarch64_opcode *);
172 static bool programmer_friendly_fixup (aarch64_instruction *);
173
174 /* Diagnostics inline function utilities.
175
176 These are lightweight utilities which should only be called by parse_operands
177 and other parsers. GAS processes each assembly line by parsing it against
178 instruction template(s), in the case of multiple templates (for the same
179 mnemonic name), those templates are tried one by one until one succeeds or
180 all fail. An assembly line may fail a few templates before being
181 successfully parsed; an error saved here in most cases is not a user error
182 but an error indicating the current template is not the right template.
183 Therefore it is very important that errors can be saved at a low cost during
184 the parsing; we don't want to slow down the whole parsing by recording
185 non-user errors in detail.
186
187 Remember that the objective is to help GAS pick up the most appropriate
188 error message in the case of multiple templates, e.g. FMOV which has 8
189 templates. */
190
191 static inline void
192 clear_error (void)
193 {
194 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
195 inst.parsing_error.kind = AARCH64_OPDE_NIL;
196 }
197
198 static inline bool
199 error_p (void)
200 {
201 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
202 }
203
204 static inline void
205 set_error (enum aarch64_operand_error_kind kind, const char *error)
206 {
207 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
208 inst.parsing_error.index = -1;
209 inst.parsing_error.kind = kind;
210 inst.parsing_error.error = error;
211 }
212
213 static inline void
214 set_recoverable_error (const char *error)
215 {
216 set_error (AARCH64_OPDE_RECOVERABLE, error);
217 }
218
219 /* Use the DESC field of the corresponding aarch64_operand entry to compose
220 the error message. */
221 static inline void
222 set_default_error (void)
223 {
224 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
225 }
226
227 static inline void
228 set_syntax_error (const char *error)
229 {
230 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
231 }
232
233 static inline void
234 set_first_syntax_error (const char *error)
235 {
236 if (! error_p ())
237 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
238 }
239
240 static inline void
241 set_fatal_syntax_error (const char *error)
242 {
243 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
244 }
245 \f
246 /* Return value for certain parsers when the parsing fails; those parsers
247 return the information of the parsed result, e.g. register number, on
248 success. */
249 #define PARSE_FAIL -1
250
251 /* This is an invalid condition code that means no conditional field is
252 present. */
253 #define COND_ALWAYS 0x10
254
255 typedef struct
256 {
257 const char *template;
258 uint32_t value;
259 } asm_nzcv;
260
261 struct reloc_entry
262 {
263 char *name;
264 bfd_reloc_code_real_type reloc;
265 };
266
267 /* Macros to define the register types and masks for the purpose
268 of parsing. */
269
270 #undef AARCH64_REG_TYPES
271 #define AARCH64_REG_TYPES \
272 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
273 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
274 BASIC_REG_TYPE(SP_32) /* wsp */ \
275 BASIC_REG_TYPE(SP_64) /* sp */ \
276 BASIC_REG_TYPE(Z_32) /* wzr */ \
277 BASIC_REG_TYPE(Z_64) /* xzr */ \
278 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
279 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
280 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
281 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
282 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
283 BASIC_REG_TYPE(VN) /* v[0-31] */ \
284 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
285 BASIC_REG_TYPE(PN) /* p[0-15] */ \
286 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
287 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
288 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
289 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
290 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
291 /* Typecheck: same, plus SVE registers. */ \
292 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
293 | REG_TYPE(ZN)) \
294 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
295 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
296 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
297 /* Typecheck: same, plus SVE registers. */ \
298 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
299 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
300 | REG_TYPE(ZN)) \
301 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
302 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
303 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
304 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
305 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
306 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
307 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
308 /* Typecheck: any [BHSDQ]P FP. */ \
309 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
310 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
311 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
312 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
314 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
315 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
316 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
317 be used for SVE instructions, since Zn and Pn are valid symbols \
318 in other contexts. */ \
319 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
320 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
321 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
322 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
323 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
324 | REG_TYPE(ZN) | REG_TYPE(PN)) \
325 /* Any integer register; used for error messages only. */ \
326 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
327 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
328 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
329 /* A horizontal or vertical slice of a ZA tile. */ \
330 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
331 /* Pseudo type to mark the end of the enumerator sequence. */ \
332 BASIC_REG_TYPE(MAX)
333
334 #undef BASIC_REG_TYPE
335 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
336 #undef MULTI_REG_TYPE
337 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
338
339 /* Register type enumerators. */
340 typedef enum aarch64_reg_type_
341 {
342 /* A list of REG_TYPE_*. */
343 AARCH64_REG_TYPES
344 } aarch64_reg_type;
345
346 #undef BASIC_REG_TYPE
347 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
348 #undef REG_TYPE
349 #define REG_TYPE(T) (1 << REG_TYPE_##T)
350 #undef MULTI_REG_TYPE
351 #define MULTI_REG_TYPE(T,V) V,
352
353 /* Structure for a hash table entry for a register. */
354 typedef struct
355 {
356 const char *name;
357 unsigned char number;
358 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
359 unsigned char builtin;
360 } reg_entry;
361
362 /* Values indexed by aarch64_reg_type to assist the type checking. */
363 static const unsigned reg_type_masks[] =
364 {
365 AARCH64_REG_TYPES
366 };
367
368 #undef BASIC_REG_TYPE
369 #undef REG_TYPE
370 #undef MULTI_REG_TYPE
371 #undef AARCH64_REG_TYPES
372
373 /* Diagnostics used when we don't get a register of the expected type.
374 Note: this has to synchronized with aarch64_reg_type definitions
375 above. */
376 static const char *
377 get_reg_expected_msg (aarch64_reg_type reg_type)
378 {
379 const char *msg;
380
381 switch (reg_type)
382 {
383 case REG_TYPE_R_32:
384 msg = N_("integer 32-bit register expected");
385 break;
386 case REG_TYPE_R_64:
387 msg = N_("integer 64-bit register expected");
388 break;
389 case REG_TYPE_R_N:
390 msg = N_("integer register expected");
391 break;
392 case REG_TYPE_R64_SP:
393 msg = N_("64-bit integer or SP register expected");
394 break;
395 case REG_TYPE_SVE_BASE:
396 msg = N_("base register expected");
397 break;
398 case REG_TYPE_R_Z:
399 msg = N_("integer or zero register expected");
400 break;
401 case REG_TYPE_SVE_OFFSET:
402 msg = N_("offset register expected");
403 break;
404 case REG_TYPE_R_SP:
405 msg = N_("integer or SP register expected");
406 break;
407 case REG_TYPE_R_Z_SP:
408 msg = N_("integer, zero or SP register expected");
409 break;
410 case REG_TYPE_FP_B:
411 msg = N_("8-bit SIMD scalar register expected");
412 break;
413 case REG_TYPE_FP_H:
414 msg = N_("16-bit SIMD scalar or floating-point half precision "
415 "register expected");
416 break;
417 case REG_TYPE_FP_S:
418 msg = N_("32-bit SIMD scalar or floating-point single precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_D:
422 msg = N_("64-bit SIMD scalar or floating-point double precision "
423 "register expected");
424 break;
425 case REG_TYPE_FP_Q:
426 msg = N_("128-bit SIMD scalar or floating-point quad precision "
427 "register expected");
428 break;
429 case REG_TYPE_R_Z_BHSDQ_V:
430 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
431 msg = N_("register expected");
432 break;
433 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
434 msg = N_("SIMD scalar or floating-point register expected");
435 break;
436 case REG_TYPE_VN: /* any V reg */
437 msg = N_("vector register expected");
438 break;
439 case REG_TYPE_ZN:
440 msg = N_("SVE vector register expected");
441 break;
442 case REG_TYPE_PN:
443 msg = N_("SVE predicate register expected");
444 break;
445 default:
446 as_fatal (_("invalid register type %d"), reg_type);
447 }
448 return msg;
449 }
450
451 /* Some well known registers that we refer to directly elsewhere. */
452 #define REG_SP 31
453 #define REG_ZR 31
454
455 /* Instructions take 4 bytes in the object file. */
456 #define INSN_SIZE 4
457
458 static htab_t aarch64_ops_hsh;
459 static htab_t aarch64_cond_hsh;
460 static htab_t aarch64_shift_hsh;
461 static htab_t aarch64_sys_regs_hsh;
462 static htab_t aarch64_pstatefield_hsh;
463 static htab_t aarch64_sys_regs_ic_hsh;
464 static htab_t aarch64_sys_regs_dc_hsh;
465 static htab_t aarch64_sys_regs_at_hsh;
466 static htab_t aarch64_sys_regs_tlbi_hsh;
467 static htab_t aarch64_sys_regs_sr_hsh;
468 static htab_t aarch64_reg_hsh;
469 static htab_t aarch64_barrier_opt_hsh;
470 static htab_t aarch64_nzcv_hsh;
471 static htab_t aarch64_pldop_hsh;
472 static htab_t aarch64_hint_opt_hsh;
473
474 /* Stuff needed to resolve the label ambiguity
475 As:
476 ...
477 label: <insn>
478 may differ from:
479 ...
480 label:
481 <insn> */
482
483 static symbolS *last_label_seen;
484
485 /* Literal pool structure. Held on a per-section
486 and per-sub-section basis. */
487
488 #define MAX_LITERAL_POOL_SIZE 1024
489 typedef struct literal_expression
490 {
491 expressionS exp;
492 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
493 LITTLENUM_TYPE * bignum;
494 } literal_expression;
495
496 typedef struct literal_pool
497 {
498 literal_expression literals[MAX_LITERAL_POOL_SIZE];
499 unsigned int next_free_entry;
500 unsigned int id;
501 symbolS *symbol;
502 segT section;
503 subsegT sub_section;
504 int size;
505 struct literal_pool *next;
506 } literal_pool;
507
508 /* Pointer to a linked list of literal pools. */
509 static literal_pool *list_of_pools = NULL;
510 \f
511 /* Pure syntax. */
512
513 /* This array holds the chars that always start a comment. If the
514 pre-processor is disabled, these aren't very useful. */
515 const char comment_chars[] = "";
516
517 /* This array holds the chars that only start a comment at the beginning of
518 a line. If the line seems to have the form '# 123 filename'
519 .line and .file directives will appear in the pre-processed output. */
520 /* Note that input_file.c hand checks for '#' at the beginning of the
521 first line of the input file. This is because the compiler outputs
522 #NO_APP at the beginning of its output. */
523 /* Also note that comments like this one will always work. */
524 const char line_comment_chars[] = "#";
525
526 const char line_separator_chars[] = ";";
527
528 /* Chars that can be used to separate mant
529 from exp in floating point numbers. */
530 const char EXP_CHARS[] = "eE";
531
532 /* Chars that mean this number is a floating point constant. */
533 /* As in 0f12.456 */
534 /* or 0d1.2345e12 */
535
536 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
537
538 /* Prefix character that indicates the start of an immediate value. */
539 #define is_immediate_prefix(C) ((C) == '#')
540
541 /* Separator character handling. */
542
543 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
544
545 static inline bool
546 skip_past_char (char **str, char c)
547 {
548 if (**str == c)
549 {
550 (*str)++;
551 return true;
552 }
553 else
554 return false;
555 }
556
557 #define skip_past_comma(str) skip_past_char (str, ',')
558
559 /* Arithmetic expressions (possibly involving symbols). */
560
561 static bool in_aarch64_get_expression = false;
562
563 /* Third argument to aarch64_get_expression. */
564 #define GE_NO_PREFIX false
565 #define GE_OPT_PREFIX true
566
567 /* Fourth argument to aarch64_get_expression. */
568 #define ALLOW_ABSENT false
569 #define REJECT_ABSENT true
570
571 /* Return TRUE if the string pointed by *STR is successfully parsed
572 as an valid expression; *EP will be filled with the information of
573 such an expression. Otherwise return FALSE.
574
575 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
576 If REJECT_ABSENT is true then trat missing expressions as an error. */
577
578 static bool
579 aarch64_get_expression (expressionS * ep,
580 char ** str,
581 bool allow_immediate_prefix,
582 bool reject_absent)
583 {
584 char *save_in;
585 segT seg;
586 bool prefix_present = false;
587
588 if (allow_immediate_prefix)
589 {
590 if (is_immediate_prefix (**str))
591 {
592 (*str)++;
593 prefix_present = true;
594 }
595 }
596
597 memset (ep, 0, sizeof (expressionS));
598
599 save_in = input_line_pointer;
600 input_line_pointer = *str;
601 in_aarch64_get_expression = true;
602 seg = expression (ep);
603 in_aarch64_get_expression = false;
604
605 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
606 {
607 /* We found a bad expression in md_operand(). */
608 *str = input_line_pointer;
609 input_line_pointer = save_in;
610 if (prefix_present && ! error_p ())
611 set_fatal_syntax_error (_("bad expression"));
612 else
613 set_first_syntax_error (_("bad expression"));
614 return false;
615 }
616
617 #ifdef OBJ_AOUT
618 if (seg != absolute_section
619 && seg != text_section
620 && seg != data_section
621 && seg != bss_section
622 && seg != undefined_section)
623 {
624 set_syntax_error (_("bad segment"));
625 *str = input_line_pointer;
626 input_line_pointer = save_in;
627 return false;
628 }
629 #else
630 (void) seg;
631 #endif
632
633 *str = input_line_pointer;
634 input_line_pointer = save_in;
635 return true;
636 }
637
638 /* Turn a string in input_line_pointer into a floating point constant
639 of type TYPE, and store the appropriate bytes in *LITP. The number
640 of LITTLENUMS emitted is stored in *SIZEP. An error message is
641 returned, or NULL on OK. */
642
643 const char *
644 md_atof (int type, char *litP, int *sizeP)
645 {
646 return ieee_md_atof (type, litP, sizeP, target_big_endian);
647 }
648
649 /* We handle all bad expressions here, so that we can report the faulty
650 instruction in the error message. */
651 void
652 md_operand (expressionS * exp)
653 {
654 if (in_aarch64_get_expression)
655 exp->X_op = O_illegal;
656 }
657
658 /* Immediate values. */
659
660 /* Errors may be set multiple times during parsing or bit encoding
661 (particularly in the Neon bits), but usually the earliest error which is set
662 will be the most meaningful. Avoid overwriting it with later (cascading)
663 errors by calling this function. */
664
665 static void
666 first_error (const char *error)
667 {
668 if (! error_p ())
669 set_syntax_error (error);
670 }
671
672 /* Similar to first_error, but this function accepts formatted error
673 message. */
674 static void
675 first_error_fmt (const char *format, ...)
676 {
677 va_list args;
678 enum
679 { size = 100 };
680 /* N.B. this single buffer will not cause error messages for different
681 instructions to pollute each other; this is because at the end of
682 processing of each assembly line, error message if any will be
683 collected by as_bad. */
684 static char buffer[size];
685
686 if (! error_p ())
687 {
688 int ret ATTRIBUTE_UNUSED;
689 va_start (args, format);
690 ret = vsnprintf (buffer, size, format, args);
691 know (ret <= size - 1 && ret >= 0);
692 va_end (args);
693 set_syntax_error (buffer);
694 }
695 }
696
697 /* Internal helper routine converting a vector_type_el structure *VECTYPE
698 to a corresponding operand qualifier. */
699
700 static inline aarch64_opnd_qualifier_t
701 vectype_to_qualifier (const struct vector_type_el *vectype)
702 {
703 /* Element size in bytes indexed by vector_el_type. */
704 const unsigned char ele_size[5]
705 = {1, 2, 4, 8, 16};
706 const unsigned int ele_base [5] =
707 {
708 AARCH64_OPND_QLF_V_4B,
709 AARCH64_OPND_QLF_V_2H,
710 AARCH64_OPND_QLF_V_2S,
711 AARCH64_OPND_QLF_V_1D,
712 AARCH64_OPND_QLF_V_1Q
713 };
714
715 if (!vectype->defined || vectype->type == NT_invtype)
716 goto vectype_conversion_fail;
717
718 if (vectype->type == NT_zero)
719 return AARCH64_OPND_QLF_P_Z;
720 if (vectype->type == NT_merge)
721 return AARCH64_OPND_QLF_P_M;
722
723 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
724
725 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
726 {
727 /* Special case S_4B. */
728 if (vectype->type == NT_b && vectype->width == 4)
729 return AARCH64_OPND_QLF_S_4B;
730
731 /* Special case S_2H. */
732 if (vectype->type == NT_h && vectype->width == 2)
733 return AARCH64_OPND_QLF_S_2H;
734
735 /* Vector element register. */
736 return AARCH64_OPND_QLF_S_B + vectype->type;
737 }
738 else
739 {
740 /* Vector register. */
741 int reg_size = ele_size[vectype->type] * vectype->width;
742 unsigned offset;
743 unsigned shift;
744 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
745 goto vectype_conversion_fail;
746
747 /* The conversion is by calculating the offset from the base operand
748 qualifier for the vector type. The operand qualifiers are regular
749 enough that the offset can established by shifting the vector width by
750 a vector-type dependent amount. */
751 shift = 0;
752 if (vectype->type == NT_b)
753 shift = 3;
754 else if (vectype->type == NT_h || vectype->type == NT_s)
755 shift = 2;
756 else if (vectype->type >= NT_d)
757 shift = 1;
758 else
759 gas_assert (0);
760
761 offset = ele_base [vectype->type] + (vectype->width >> shift);
762 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
763 && offset <= AARCH64_OPND_QLF_V_1Q);
764 return offset;
765 }
766
767 vectype_conversion_fail:
768 first_error (_("bad vector arrangement type"));
769 return AARCH64_OPND_QLF_NIL;
770 }
771
772 /* Register parsing. */
773
774 /* Generic register parser which is called by other specialized
775 register parsers.
776 CCP points to what should be the beginning of a register name.
777 If it is indeed a valid register name, advance CCP over it and
778 return the reg_entry structure; otherwise return NULL.
779 It does not issue diagnostics. */
780
781 static reg_entry *
782 parse_reg (char **ccp)
783 {
784 char *start = *ccp;
785 char *p;
786 reg_entry *reg;
787
788 #ifdef REGISTER_PREFIX
789 if (*start != REGISTER_PREFIX)
790 return NULL;
791 start++;
792 #endif
793
794 p = start;
795 if (!ISALPHA (*p) || !is_name_beginner (*p))
796 return NULL;
797
798 do
799 p++;
800 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
801
802 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
803
804 if (!reg)
805 return NULL;
806
807 *ccp = p;
808 return reg;
809 }
810
811 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
812 return FALSE. */
813 static bool
814 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
815 {
816 return (reg_type_masks[type] & (1 << reg->type)) != 0;
817 }
818
819 /* Try to parse a base or offset register. Allow SVE base and offset
820 registers if REG_TYPE includes SVE registers. Return the register
821 entry on success, setting *QUALIFIER to the register qualifier.
822 Return null otherwise.
823
824 Note that this function does not issue any diagnostics. */
825
826 static const reg_entry *
827 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
828 aarch64_opnd_qualifier_t *qualifier)
829 {
830 char *str = *ccp;
831 const reg_entry *reg = parse_reg (&str);
832
833 if (reg == NULL)
834 return NULL;
835
836 switch (reg->type)
837 {
838 case REG_TYPE_R_32:
839 case REG_TYPE_SP_32:
840 case REG_TYPE_Z_32:
841 *qualifier = AARCH64_OPND_QLF_W;
842 break;
843
844 case REG_TYPE_R_64:
845 case REG_TYPE_SP_64:
846 case REG_TYPE_Z_64:
847 *qualifier = AARCH64_OPND_QLF_X;
848 break;
849
850 case REG_TYPE_ZN:
851 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
852 || str[0] != '.')
853 return NULL;
854 switch (TOLOWER (str[1]))
855 {
856 case 's':
857 *qualifier = AARCH64_OPND_QLF_S_S;
858 break;
859 case 'd':
860 *qualifier = AARCH64_OPND_QLF_S_D;
861 break;
862 default:
863 return NULL;
864 }
865 str += 2;
866 break;
867
868 default:
869 return NULL;
870 }
871
872 *ccp = str;
873
874 return reg;
875 }
876
877 /* Try to parse a base or offset register. Return the register entry
878 on success, setting *QUALIFIER to the register qualifier. Return null
879 otherwise.
880
881 Note that this function does not issue any diagnostics. */
882
883 static const reg_entry *
884 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
885 {
886 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
887 }
888
889 /* Parse the qualifier of a vector register or vector element of type
890 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
891 succeeds; otherwise return FALSE.
892
893 Accept only one occurrence of:
894 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
895 b h s d q */
896 static bool
897 parse_vector_type_for_operand (aarch64_reg_type reg_type,
898 struct vector_type_el *parsed_type, char **str)
899 {
900 char *ptr = *str;
901 unsigned width;
902 unsigned element_size;
903 enum vector_el_type type;
904
905 /* skip '.' */
906 gas_assert (*ptr == '.');
907 ptr++;
908
909 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
910 {
911 width = 0;
912 goto elt_size;
913 }
914 width = strtoul (ptr, &ptr, 10);
915 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
916 {
917 first_error_fmt (_("bad size %d in vector width specifier"), width);
918 return false;
919 }
920
921 elt_size:
922 switch (TOLOWER (*ptr))
923 {
924 case 'b':
925 type = NT_b;
926 element_size = 8;
927 break;
928 case 'h':
929 type = NT_h;
930 element_size = 16;
931 break;
932 case 's':
933 type = NT_s;
934 element_size = 32;
935 break;
936 case 'd':
937 type = NT_d;
938 element_size = 64;
939 break;
940 case 'q':
941 if (reg_type == REG_TYPE_ZN || width == 1)
942 {
943 type = NT_q;
944 element_size = 128;
945 break;
946 }
947 /* fall through. */
948 default:
949 if (*ptr != '\0')
950 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
951 else
952 first_error (_("missing element size"));
953 return false;
954 }
955 if (width != 0 && width * element_size != 64
956 && width * element_size != 128
957 && !(width == 2 && element_size == 16)
958 && !(width == 4 && element_size == 8))
959 {
960 first_error_fmt (_
961 ("invalid element size %d and vector size combination %c"),
962 width, *ptr);
963 return false;
964 }
965 ptr++;
966
967 parsed_type->type = type;
968 parsed_type->width = width;
969
970 *str = ptr;
971
972 return true;
973 }
974
975 /* *STR contains an SVE zero/merge predication suffix. Parse it into
976 *PARSED_TYPE and point *STR at the end of the suffix. */
977
978 static bool
979 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
980 {
981 char *ptr = *str;
982
983 /* Skip '/'. */
984 gas_assert (*ptr == '/');
985 ptr++;
986 switch (TOLOWER (*ptr))
987 {
988 case 'z':
989 parsed_type->type = NT_zero;
990 break;
991 case 'm':
992 parsed_type->type = NT_merge;
993 break;
994 default:
995 if (*ptr != '\0' && *ptr != ',')
996 first_error_fmt (_("unexpected character `%c' in predication type"),
997 *ptr);
998 else
999 first_error (_("missing predication type"));
1000 return false;
1001 }
1002 parsed_type->width = 0;
1003 *str = ptr + 1;
1004 return true;
1005 }
1006
1007 /* Parse a register of the type TYPE.
1008
1009 Return null if the string pointed to by *CCP is not a valid register
1010 name or the parsed register is not of TYPE.
1011
1012 Otherwise return the register, and optionally return the register
1013 shape and element index information in *TYPEINFO.
1014
1015 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list. */
1016
1017 #define PTR_IN_REGLIST (1U << 0)
1018
1019 static const reg_entry *
1020 parse_typed_reg (char **ccp, aarch64_reg_type type,
1021 struct vector_type_el *typeinfo, unsigned int flags)
1022 {
1023 char *str = *ccp;
1024 const reg_entry *reg = parse_reg (&str);
1025 struct vector_type_el atype;
1026 struct vector_type_el parsetype;
1027 bool is_typed_vecreg = false;
1028
1029 atype.defined = 0;
1030 atype.type = NT_invtype;
1031 atype.width = -1;
1032 atype.index = 0;
1033
1034 if (reg == NULL)
1035 {
1036 if (typeinfo)
1037 *typeinfo = atype;
1038 set_default_error ();
1039 return NULL;
1040 }
1041
1042 if (! aarch64_check_reg_type (reg, type))
1043 {
1044 DEBUG_TRACE ("reg type check failed");
1045 set_default_error ();
1046 return NULL;
1047 }
1048 type = reg->type;
1049
1050 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
1051 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
1052 {
1053 if (*str == '.')
1054 {
1055 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1056 return NULL;
1057 }
1058 else
1059 {
1060 if (!parse_predication_for_operand (&parsetype, &str))
1061 return NULL;
1062 }
1063
1064 /* Register if of the form Vn.[bhsdq]. */
1065 is_typed_vecreg = true;
1066
1067 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1068 {
1069 /* The width is always variable; we don't allow an integer width
1070 to be specified. */
1071 gas_assert (parsetype.width == 0);
1072 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1073 }
1074 else if (parsetype.width == 0)
1075 /* Expect index. In the new scheme we cannot have
1076 Vn.[bhsdq] represent a scalar. Therefore any
1077 Vn.[bhsdq] should have an index following it.
1078 Except in reglists of course. */
1079 atype.defined |= NTA_HASINDEX;
1080 else
1081 atype.defined |= NTA_HASTYPE;
1082
1083 atype.type = parsetype.type;
1084 atype.width = parsetype.width;
1085 }
1086
1087 if (skip_past_char (&str, '['))
1088 {
1089 expressionS exp;
1090
1091 /* Reject Sn[index] syntax. */
1092 if (!is_typed_vecreg)
1093 {
1094 first_error (_("this type of register can't be indexed"));
1095 return NULL;
1096 }
1097
1098 if (flags & PTR_IN_REGLIST)
1099 {
1100 first_error (_("index not allowed inside register list"));
1101 return NULL;
1102 }
1103
1104 atype.defined |= NTA_HASINDEX;
1105
1106 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1107
1108 if (exp.X_op != O_constant)
1109 {
1110 first_error (_("constant expression required"));
1111 return NULL;
1112 }
1113
1114 if (! skip_past_char (&str, ']'))
1115 return NULL;
1116
1117 atype.index = exp.X_add_number;
1118 }
1119 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1120 {
1121 /* Indexed vector register expected. */
1122 first_error (_("indexed vector register expected"));
1123 return NULL;
1124 }
1125
1126 /* A vector reg Vn should be typed or indexed. */
1127 if (type == REG_TYPE_VN && atype.defined == 0)
1128 {
1129 first_error (_("invalid use of vector register"));
1130 }
1131
1132 if (typeinfo)
1133 *typeinfo = atype;
1134
1135 *ccp = str;
1136
1137 return reg;
1138 }
1139
1140 /* Parse register.
1141
1142 Return the register on success; return null otherwise.
1143
1144 If this is a NEON vector register with additional type information, fill
1145 in the struct pointed to by VECTYPE (if non-NULL).
1146
1147 This parser does not handle register lists. */
1148
1149 static const reg_entry *
1150 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1151 struct vector_type_el *vectype)
1152 {
1153 return parse_typed_reg (ccp, type, vectype, 0);
1154 }
1155
1156 static inline bool
1157 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1158 {
1159 return
1160 e1.type == e2.type
1161 && e1.defined == e2.defined
1162 && e1.width == e2.width && e1.index == e2.index;
1163 }
1164
1165 /* This function parses a list of vector registers of type TYPE.
1166 On success, it returns the parsed register list information in the
1167 following encoded format:
1168
1169 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1170 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1171
1172 The information of the register shape and/or index is returned in
1173 *VECTYPE.
1174
1175 It returns PARSE_FAIL if the register list is invalid.
1176
1177 The list contains one to four registers.
1178 Each register can be one of:
1179 <Vt>.<T>[<index>]
1180 <Vt>.<T>
1181 All <T> should be identical.
1182 All <index> should be identical.
1183 There are restrictions on <Vt> numbers which are checked later
1184 (by reg_list_valid_p). */
1185
1186 static int
1187 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1188 struct vector_type_el *vectype)
1189 {
1190 char *str = *ccp;
1191 int nb_regs;
1192 struct vector_type_el typeinfo, typeinfo_first;
1193 int val, val_range;
1194 int in_range;
1195 int ret_val;
1196 int i;
1197 bool error = false;
1198 bool expect_index = false;
1199
1200 if (*str != '{')
1201 {
1202 set_syntax_error (_("expecting {"));
1203 return PARSE_FAIL;
1204 }
1205 str++;
1206
1207 nb_regs = 0;
1208 typeinfo_first.defined = 0;
1209 typeinfo_first.type = NT_invtype;
1210 typeinfo_first.width = -1;
1211 typeinfo_first.index = 0;
1212 ret_val = 0;
1213 val = -1;
1214 val_range = -1;
1215 in_range = 0;
1216 do
1217 {
1218 if (in_range)
1219 {
1220 str++; /* skip over '-' */
1221 val_range = val;
1222 }
1223 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1224 PTR_IN_REGLIST);
1225 if (!reg)
1226 {
1227 set_first_syntax_error (_("invalid vector register in list"));
1228 error = true;
1229 continue;
1230 }
1231 val = reg->number;
1232 /* reject [bhsd]n */
1233 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1234 {
1235 set_first_syntax_error (_("invalid scalar register in list"));
1236 error = true;
1237 continue;
1238 }
1239
1240 if (typeinfo.defined & NTA_HASINDEX)
1241 expect_index = true;
1242
1243 if (in_range)
1244 {
1245 if (val < val_range)
1246 {
1247 set_first_syntax_error
1248 (_("invalid range in vector register list"));
1249 error = true;
1250 }
1251 val_range++;
1252 }
1253 else
1254 {
1255 val_range = val;
1256 if (nb_regs == 0)
1257 typeinfo_first = typeinfo;
1258 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1259 {
1260 set_first_syntax_error
1261 (_("type mismatch in vector register list"));
1262 error = true;
1263 }
1264 }
1265 if (! error)
1266 for (i = val_range; i <= val; i++)
1267 {
1268 ret_val |= i << (5 * nb_regs);
1269 nb_regs++;
1270 }
1271 in_range = 0;
1272 }
1273 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1274
1275 skip_whitespace (str);
1276 if (*str != '}')
1277 {
1278 set_first_syntax_error (_("end of vector register list not found"));
1279 error = true;
1280 }
1281 str++;
1282
1283 skip_whitespace (str);
1284
1285 if (expect_index)
1286 {
1287 if (skip_past_char (&str, '['))
1288 {
1289 expressionS exp;
1290
1291 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1292 if (exp.X_op != O_constant)
1293 {
1294 set_first_syntax_error (_("constant expression required."));
1295 error = true;
1296 }
1297 if (! skip_past_char (&str, ']'))
1298 error = true;
1299 else
1300 typeinfo_first.index = exp.X_add_number;
1301 }
1302 else
1303 {
1304 set_first_syntax_error (_("expected index"));
1305 error = true;
1306 }
1307 }
1308
1309 if (nb_regs > 4)
1310 {
1311 set_first_syntax_error (_("too many registers in vector register list"));
1312 error = true;
1313 }
1314 else if (nb_regs == 0)
1315 {
1316 set_first_syntax_error (_("empty vector register list"));
1317 error = true;
1318 }
1319
1320 *ccp = str;
1321 if (! error)
1322 *vectype = typeinfo_first;
1323
1324 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1325 }
1326
1327 /* Directives: register aliases. */
1328
1329 static reg_entry *
1330 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1331 {
1332 reg_entry *new;
1333 const char *name;
1334
1335 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1336 {
1337 if (new->builtin)
1338 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1339 str);
1340
1341 /* Only warn about a redefinition if it's not defined as the
1342 same register. */
1343 else if (new->number != number || new->type != type)
1344 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1345
1346 return NULL;
1347 }
1348
1349 name = xstrdup (str);
1350 new = XNEW (reg_entry);
1351
1352 new->name = name;
1353 new->number = number;
1354 new->type = type;
1355 new->builtin = false;
1356
1357 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1358
1359 return new;
1360 }
1361
1362 /* Look for the .req directive. This is of the form:
1363
1364 new_register_name .req existing_register_name
1365
1366 If we find one, or if it looks sufficiently like one that we want to
1367 handle any error here, return TRUE. Otherwise return FALSE. */
1368
1369 static bool
1370 create_register_alias (char *newname, char *p)
1371 {
1372 const reg_entry *old;
1373 char *oldname, *nbuf;
1374 size_t nlen;
1375
1376 /* The input scrubber ensures that whitespace after the mnemonic is
1377 collapsed to single spaces. */
1378 oldname = p;
1379 if (!startswith (oldname, " .req "))
1380 return false;
1381
1382 oldname += 6;
1383 if (*oldname == '\0')
1384 return false;
1385
1386 old = str_hash_find (aarch64_reg_hsh, oldname);
1387 if (!old)
1388 {
1389 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1390 return true;
1391 }
1392
1393 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1394 the desired alias name, and p points to its end. If not, then
1395 the desired alias name is in the global original_case_string. */
1396 #ifdef TC_CASE_SENSITIVE
1397 nlen = p - newname;
1398 #else
1399 newname = original_case_string;
1400 nlen = strlen (newname);
1401 #endif
1402
1403 nbuf = xmemdup0 (newname, nlen);
1404
1405 /* Create aliases under the new name as stated; an all-lowercase
1406 version of the new name; and an all-uppercase version of the new
1407 name. */
1408 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1409 {
1410 for (p = nbuf; *p; p++)
1411 *p = TOUPPER (*p);
1412
1413 if (strncmp (nbuf, newname, nlen))
1414 {
1415 /* If this attempt to create an additional alias fails, do not bother
1416 trying to create the all-lower case alias. We will fail and issue
1417 a second, duplicate error message. This situation arises when the
1418 programmer does something like:
1419 foo .req r0
1420 Foo .req r1
1421 The second .req creates the "Foo" alias but then fails to create
1422 the artificial FOO alias because it has already been created by the
1423 first .req. */
1424 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1425 {
1426 free (nbuf);
1427 return true;
1428 }
1429 }
1430
1431 for (p = nbuf; *p; p++)
1432 *p = TOLOWER (*p);
1433
1434 if (strncmp (nbuf, newname, nlen))
1435 insert_reg_alias (nbuf, old->number, old->type);
1436 }
1437
1438 free (nbuf);
1439 return true;
1440 }
1441
1442 /* Should never be called, as .req goes between the alias and the
1443 register name, not at the beginning of the line. */
1444 static void
1445 s_req (int a ATTRIBUTE_UNUSED)
1446 {
1447 as_bad (_("invalid syntax for .req directive"));
1448 }
1449
1450 /* The .unreq directive deletes an alias which was previously defined
1451 by .req. For example:
1452
1453 my_alias .req r11
1454 .unreq my_alias */
1455
1456 static void
1457 s_unreq (int a ATTRIBUTE_UNUSED)
1458 {
1459 char *name;
1460 char saved_char;
1461
1462 name = input_line_pointer;
1463 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1464 saved_char = *input_line_pointer;
1465 *input_line_pointer = 0;
1466
1467 if (!*name)
1468 as_bad (_("invalid syntax for .unreq directive"));
1469 else
1470 {
1471 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1472
1473 if (!reg)
1474 as_bad (_("unknown register alias '%s'"), name);
1475 else if (reg->builtin)
1476 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1477 name);
1478 else
1479 {
1480 char *p;
1481 char *nbuf;
1482
1483 str_hash_delete (aarch64_reg_hsh, name);
1484 free ((char *) reg->name);
1485 free (reg);
1486
1487 /* Also locate the all upper case and all lower case versions.
1488 Do not complain if we cannot find one or the other as it
1489 was probably deleted above. */
1490
1491 nbuf = strdup (name);
1492 for (p = nbuf; *p; p++)
1493 *p = TOUPPER (*p);
1494 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1495 if (reg)
1496 {
1497 str_hash_delete (aarch64_reg_hsh, nbuf);
1498 free ((char *) reg->name);
1499 free (reg);
1500 }
1501
1502 for (p = nbuf; *p; p++)
1503 *p = TOLOWER (*p);
1504 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1505 if (reg)
1506 {
1507 str_hash_delete (aarch64_reg_hsh, nbuf);
1508 free ((char *) reg->name);
1509 free (reg);
1510 }
1511
1512 free (nbuf);
1513 }
1514 }
1515
1516 *input_line_pointer = saved_char;
1517 demand_empty_rest_of_line ();
1518 }
1519
1520 /* Directives: Instruction set selection. */
1521
1522 #if defined OBJ_ELF || defined OBJ_COFF
1523 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1524 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1525 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1526 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1527
1528 /* Create a new mapping symbol for the transition to STATE. */
1529
1530 static void
1531 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1532 {
1533 symbolS *symbolP;
1534 const char *symname;
1535 int type;
1536
1537 switch (state)
1538 {
1539 case MAP_DATA:
1540 symname = "$d";
1541 type = BSF_NO_FLAGS;
1542 break;
1543 case MAP_INSN:
1544 symname = "$x";
1545 type = BSF_NO_FLAGS;
1546 break;
1547 default:
1548 abort ();
1549 }
1550
1551 symbolP = symbol_new (symname, now_seg, frag, value);
1552 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1553
1554 /* Save the mapping symbols for future reference. Also check that
1555 we do not place two mapping symbols at the same offset within a
1556 frag. We'll handle overlap between frags in
1557 check_mapping_symbols.
1558
1559 If .fill or other data filling directive generates zero sized data,
1560 the mapping symbol for the following code will have the same value
1561 as the one generated for the data filling directive. In this case,
1562 we replace the old symbol with the new one at the same address. */
1563 if (value == 0)
1564 {
1565 if (frag->tc_frag_data.first_map != NULL)
1566 {
1567 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1568 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1569 &symbol_lastP);
1570 }
1571 frag->tc_frag_data.first_map = symbolP;
1572 }
1573 if (frag->tc_frag_data.last_map != NULL)
1574 {
1575 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1576 S_GET_VALUE (symbolP));
1577 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1578 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1579 &symbol_lastP);
1580 }
1581 frag->tc_frag_data.last_map = symbolP;
1582 }
1583
1584 /* We must sometimes convert a region marked as code to data during
1585 code alignment, if an odd number of bytes have to be padded. The
1586 code mapping symbol is pushed to an aligned address. */
1587
1588 static void
1589 insert_data_mapping_symbol (enum mstate state,
1590 valueT value, fragS * frag, offsetT bytes)
1591 {
1592 /* If there was already a mapping symbol, remove it. */
1593 if (frag->tc_frag_data.last_map != NULL
1594 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1595 frag->fr_address + value)
1596 {
1597 symbolS *symp = frag->tc_frag_data.last_map;
1598
1599 if (value == 0)
1600 {
1601 know (frag->tc_frag_data.first_map == symp);
1602 frag->tc_frag_data.first_map = NULL;
1603 }
1604 frag->tc_frag_data.last_map = NULL;
1605 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1606 }
1607
1608 make_mapping_symbol (MAP_DATA, value, frag);
1609 make_mapping_symbol (state, value + bytes, frag);
1610 }
1611
1612 static void mapping_state_2 (enum mstate state, int max_chars);
1613
1614 /* Set the mapping state to STATE. Only call this when about to
1615 emit some STATE bytes to the file. */
1616
1617 void
1618 mapping_state (enum mstate state)
1619 {
1620 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1621
1622 if (state == MAP_INSN)
1623 /* AArch64 instructions require 4-byte alignment. When emitting
1624 instructions into any section, record the appropriate section
1625 alignment. */
1626 record_alignment (now_seg, 2);
1627
1628 if (mapstate == state)
1629 /* The mapping symbol has already been emitted.
1630 There is nothing else to do. */
1631 return;
1632
1633 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1634 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1635 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1636 evaluated later in the next else. */
1637 return;
1638 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1639 {
1640 /* Only add the symbol if the offset is > 0:
1641 if we're at the first frag, check it's size > 0;
1642 if we're not at the first frag, then for sure
1643 the offset is > 0. */
1644 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1645 const int add_symbol = (frag_now != frag_first)
1646 || (frag_now_fix () > 0);
1647
1648 if (add_symbol)
1649 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1650 }
1651 #undef TRANSITION
1652
1653 mapping_state_2 (state, 0);
1654 }
1655
1656 /* Same as mapping_state, but MAX_CHARS bytes have already been
1657 allocated. Put the mapping symbol that far back. */
1658
1659 static void
1660 mapping_state_2 (enum mstate state, int max_chars)
1661 {
1662 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1663
1664 if (!SEG_NORMAL (now_seg))
1665 return;
1666
1667 if (mapstate == state)
1668 /* The mapping symbol has already been emitted.
1669 There is nothing else to do. */
1670 return;
1671
1672 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1673 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1674 }
1675 #else
1676 #define mapping_state(x) /* nothing */
1677 #define mapping_state_2(x, y) /* nothing */
1678 #endif
1679
1680 /* Directives: sectioning and alignment. */
1681
1682 static void
1683 s_bss (int ignore ATTRIBUTE_UNUSED)
1684 {
1685 /* We don't support putting frags in the BSS segment, we fake it by
1686 marking in_bss, then looking at s_skip for clues. */
1687 subseg_set (bss_section, 0);
1688 demand_empty_rest_of_line ();
1689 mapping_state (MAP_DATA);
1690 }
1691
1692 static void
1693 s_even (int ignore ATTRIBUTE_UNUSED)
1694 {
1695 /* Never make frag if expect extra pass. */
1696 if (!need_pass_2)
1697 frag_align (1, 0, 0);
1698
1699 record_alignment (now_seg, 1);
1700
1701 demand_empty_rest_of_line ();
1702 }
1703
1704 /* Directives: Literal pools. */
1705
1706 static literal_pool *
1707 find_literal_pool (int size)
1708 {
1709 literal_pool *pool;
1710
1711 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1712 {
1713 if (pool->section == now_seg
1714 && pool->sub_section == now_subseg && pool->size == size)
1715 break;
1716 }
1717
1718 return pool;
1719 }
1720
1721 static literal_pool *
1722 find_or_make_literal_pool (int size)
1723 {
1724 /* Next literal pool ID number. */
1725 static unsigned int latest_pool_num = 1;
1726 literal_pool *pool;
1727
1728 pool = find_literal_pool (size);
1729
1730 if (pool == NULL)
1731 {
1732 /* Create a new pool. */
1733 pool = XNEW (literal_pool);
1734 if (!pool)
1735 return NULL;
1736
1737 /* Currently we always put the literal pool in the current text
1738 section. If we were generating "small" model code where we
1739 knew that all code and initialised data was within 1MB then
1740 we could output literals to mergeable, read-only data
1741 sections. */
1742
1743 pool->next_free_entry = 0;
1744 pool->section = now_seg;
1745 pool->sub_section = now_subseg;
1746 pool->size = size;
1747 pool->next = list_of_pools;
1748 pool->symbol = NULL;
1749
1750 /* Add it to the list. */
1751 list_of_pools = pool;
1752 }
1753
1754 /* New pools, and emptied pools, will have a NULL symbol. */
1755 if (pool->symbol == NULL)
1756 {
1757 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1758 &zero_address_frag, 0);
1759 pool->id = latest_pool_num++;
1760 }
1761
1762 /* Done. */
1763 return pool;
1764 }
1765
1766 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1767 Return TRUE on success, otherwise return FALSE. */
1768 static bool
1769 add_to_lit_pool (expressionS *exp, int size)
1770 {
1771 literal_pool *pool;
1772 unsigned int entry;
1773
1774 pool = find_or_make_literal_pool (size);
1775
1776 /* Check if this literal value is already in the pool. */
1777 for (entry = 0; entry < pool->next_free_entry; entry++)
1778 {
1779 expressionS * litexp = & pool->literals[entry].exp;
1780
1781 if ((litexp->X_op == exp->X_op)
1782 && (exp->X_op == O_constant)
1783 && (litexp->X_add_number == exp->X_add_number)
1784 && (litexp->X_unsigned == exp->X_unsigned))
1785 break;
1786
1787 if ((litexp->X_op == exp->X_op)
1788 && (exp->X_op == O_symbol)
1789 && (litexp->X_add_number == exp->X_add_number)
1790 && (litexp->X_add_symbol == exp->X_add_symbol)
1791 && (litexp->X_op_symbol == exp->X_op_symbol))
1792 break;
1793 }
1794
1795 /* Do we need to create a new entry? */
1796 if (entry == pool->next_free_entry)
1797 {
1798 if (entry >= MAX_LITERAL_POOL_SIZE)
1799 {
1800 set_syntax_error (_("literal pool overflow"));
1801 return false;
1802 }
1803
1804 pool->literals[entry].exp = *exp;
1805 pool->next_free_entry += 1;
1806 if (exp->X_op == O_big)
1807 {
1808 /* PR 16688: Bignums are held in a single global array. We must
1809 copy and preserve that value now, before it is overwritten. */
1810 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1811 exp->X_add_number);
1812 memcpy (pool->literals[entry].bignum, generic_bignum,
1813 CHARS_PER_LITTLENUM * exp->X_add_number);
1814 }
1815 else
1816 pool->literals[entry].bignum = NULL;
1817 }
1818
1819 exp->X_op = O_symbol;
1820 exp->X_add_number = ((int) entry) * size;
1821 exp->X_add_symbol = pool->symbol;
1822
1823 return true;
1824 }
1825
1826 /* Can't use symbol_new here, so have to create a symbol and then at
1827 a later date assign it a value. That's what these functions do. */
1828
1829 static void
1830 symbol_locate (symbolS * symbolP,
1831 const char *name,/* It is copied, the caller can modify. */
1832 segT segment, /* Segment identifier (SEG_<something>). */
1833 valueT valu, /* Symbol value. */
1834 fragS * frag) /* Associated fragment. */
1835 {
1836 size_t name_length;
1837 char *preserved_copy_of_name;
1838
1839 name_length = strlen (name) + 1; /* +1 for \0. */
1840 obstack_grow (&notes, name, name_length);
1841 preserved_copy_of_name = obstack_finish (&notes);
1842
1843 #ifdef tc_canonicalize_symbol_name
1844 preserved_copy_of_name =
1845 tc_canonicalize_symbol_name (preserved_copy_of_name);
1846 #endif
1847
1848 S_SET_NAME (symbolP, preserved_copy_of_name);
1849
1850 S_SET_SEGMENT (symbolP, segment);
1851 S_SET_VALUE (symbolP, valu);
1852 symbol_clear_list_pointers (symbolP);
1853
1854 symbol_set_frag (symbolP, frag);
1855
1856 /* Link to end of symbol chain. */
1857 {
1858 extern int symbol_table_frozen;
1859
1860 if (symbol_table_frozen)
1861 abort ();
1862 }
1863
1864 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1865
1866 obj_symbol_new_hook (symbolP);
1867
1868 #ifdef tc_symbol_new_hook
1869 tc_symbol_new_hook (symbolP);
1870 #endif
1871
1872 #ifdef DEBUG_SYMS
1873 verify_symbol_chain (symbol_rootP, symbol_lastP);
1874 #endif /* DEBUG_SYMS */
1875 }
1876
1877
1878 static void
1879 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1880 {
1881 unsigned int entry;
1882 literal_pool *pool;
1883 char sym_name[20];
1884 int align;
1885
1886 for (align = 2; align <= 4; align++)
1887 {
1888 int size = 1 << align;
1889
1890 pool = find_literal_pool (size);
1891 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1892 continue;
1893
1894 /* Align pool as you have word accesses.
1895 Only make a frag if we have to. */
1896 if (!need_pass_2)
1897 frag_align (align, 0, 0);
1898
1899 mapping_state (MAP_DATA);
1900
1901 record_alignment (now_seg, align);
1902
1903 sprintf (sym_name, "$$lit_\002%x", pool->id);
1904
1905 symbol_locate (pool->symbol, sym_name, now_seg,
1906 (valueT) frag_now_fix (), frag_now);
1907 symbol_table_insert (pool->symbol);
1908
1909 for (entry = 0; entry < pool->next_free_entry; entry++)
1910 {
1911 expressionS * exp = & pool->literals[entry].exp;
1912
1913 if (exp->X_op == O_big)
1914 {
1915 /* PR 16688: Restore the global bignum value. */
1916 gas_assert (pool->literals[entry].bignum != NULL);
1917 memcpy (generic_bignum, pool->literals[entry].bignum,
1918 CHARS_PER_LITTLENUM * exp->X_add_number);
1919 }
1920
1921 /* First output the expression in the instruction to the pool. */
1922 emit_expr (exp, size); /* .word|.xword */
1923
1924 if (exp->X_op == O_big)
1925 {
1926 free (pool->literals[entry].bignum);
1927 pool->literals[entry].bignum = NULL;
1928 }
1929 }
1930
1931 /* Mark the pool as empty. */
1932 pool->next_free_entry = 0;
1933 pool->symbol = NULL;
1934 }
1935 }
1936
1937 #if defined(OBJ_ELF) || defined(OBJ_COFF)
1938 /* Forward declarations for functions below, in the MD interface
1939 section. */
1940 static struct reloc_table_entry * find_reloc_table_entry (char **);
1941
1942 /* Directives: Data. */
1943 /* N.B. the support for relocation suffix in this directive needs to be
1944 implemented properly. */
1945
1946 static void
1947 s_aarch64_cons (int nbytes)
1948 {
1949 expressionS exp;
1950
1951 #ifdef md_flush_pending_output
1952 md_flush_pending_output ();
1953 #endif
1954
1955 if (is_it_end_of_statement ())
1956 {
1957 demand_empty_rest_of_line ();
1958 return;
1959 }
1960
1961 #ifdef md_cons_align
1962 md_cons_align (nbytes);
1963 #endif
1964
1965 mapping_state (MAP_DATA);
1966 do
1967 {
1968 struct reloc_table_entry *reloc;
1969
1970 expression (&exp);
1971
1972 if (exp.X_op != O_symbol)
1973 emit_expr (&exp, (unsigned int) nbytes);
1974 else
1975 {
1976 skip_past_char (&input_line_pointer, '#');
1977 if (skip_past_char (&input_line_pointer, ':'))
1978 {
1979 reloc = find_reloc_table_entry (&input_line_pointer);
1980 if (reloc == NULL)
1981 as_bad (_("unrecognized relocation suffix"));
1982 else
1983 as_bad (_("unimplemented relocation suffix"));
1984 ignore_rest_of_line ();
1985 return;
1986 }
1987 else
1988 emit_expr (&exp, (unsigned int) nbytes);
1989 }
1990 }
1991 while (*input_line_pointer++ == ',');
1992
1993 /* Put terminator back into stream. */
1994 input_line_pointer--;
1995 demand_empty_rest_of_line ();
1996 }
1997 #endif
1998
1999 #ifdef OBJ_ELF
2000 /* Forward declarations for functions below, in the MD interface
2001 section. */
2002 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2003
2004 /* Mark symbol that it follows a variant PCS convention. */
2005
2006 static void
2007 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2008 {
2009 char *name;
2010 char c;
2011 symbolS *sym;
2012 asymbol *bfdsym;
2013 elf_symbol_type *elfsym;
2014
2015 c = get_symbol_name (&name);
2016 if (!*name)
2017 as_bad (_("Missing symbol name in directive"));
2018 sym = symbol_find_or_make (name);
2019 restore_line_pointer (c);
2020 demand_empty_rest_of_line ();
2021 bfdsym = symbol_get_bfdsym (sym);
2022 elfsym = elf_symbol_from (bfdsym);
2023 gas_assert (elfsym);
2024 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2025 }
2026 #endif /* OBJ_ELF */
2027
2028 /* Output a 32-bit word, but mark as an instruction. */
2029
2030 static void
2031 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2032 {
2033 expressionS exp;
2034 unsigned n = 0;
2035
2036 #ifdef md_flush_pending_output
2037 md_flush_pending_output ();
2038 #endif
2039
2040 if (is_it_end_of_statement ())
2041 {
2042 demand_empty_rest_of_line ();
2043 return;
2044 }
2045
2046 /* Sections are assumed to start aligned. In executable section, there is no
2047 MAP_DATA symbol pending. So we only align the address during
2048 MAP_DATA --> MAP_INSN transition.
2049 For other sections, this is not guaranteed. */
2050 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2051 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2052 frag_align_code (2, 0);
2053
2054 #ifdef OBJ_ELF
2055 mapping_state (MAP_INSN);
2056 #endif
2057
2058 do
2059 {
2060 expression (&exp);
2061 if (exp.X_op != O_constant)
2062 {
2063 as_bad (_("constant expression required"));
2064 ignore_rest_of_line ();
2065 return;
2066 }
2067
2068 if (target_big_endian)
2069 {
2070 unsigned int val = exp.X_add_number;
2071 exp.X_add_number = SWAP_32 (val);
2072 }
2073 emit_expr (&exp, INSN_SIZE);
2074 ++n;
2075 }
2076 while (*input_line_pointer++ == ',');
2077
2078 dwarf2_emit_insn (n * INSN_SIZE);
2079
2080 /* Put terminator back into stream. */
2081 input_line_pointer--;
2082 demand_empty_rest_of_line ();
2083 }
2084
2085 static void
2086 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2087 {
2088 demand_empty_rest_of_line ();
2089 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2090 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2091 }
2092
2093 #ifdef OBJ_ELF
2094 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2095
2096 static void
2097 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2098 {
2099 expressionS exp;
2100
2101 expression (&exp);
2102 frag_grow (4);
2103 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2104 BFD_RELOC_AARCH64_TLSDESC_ADD);
2105
2106 demand_empty_rest_of_line ();
2107 }
2108
2109 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2110
2111 static void
2112 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2113 {
2114 expressionS exp;
2115
2116 /* Since we're just labelling the code, there's no need to define a
2117 mapping symbol. */
2118 expression (&exp);
2119 /* Make sure there is enough room in this frag for the following
2120 blr. This trick only works if the blr follows immediately after
2121 the .tlsdesc directive. */
2122 frag_grow (4);
2123 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2124 BFD_RELOC_AARCH64_TLSDESC_CALL);
2125
2126 demand_empty_rest_of_line ();
2127 }
2128
2129 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2130
2131 static void
2132 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2133 {
2134 expressionS exp;
2135
2136 expression (&exp);
2137 frag_grow (4);
2138 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2139 BFD_RELOC_AARCH64_TLSDESC_LDR);
2140
2141 demand_empty_rest_of_line ();
2142 }
2143 #endif /* OBJ_ELF */
2144
2145 #ifdef TE_PE
2146 static void
2147 s_secrel (int dummy ATTRIBUTE_UNUSED)
2148 {
2149 expressionS exp;
2150
2151 do
2152 {
2153 expression (&exp);
2154 if (exp.X_op == O_symbol)
2155 exp.X_op = O_secrel;
2156
2157 emit_expr (&exp, 4);
2158 }
2159 while (*input_line_pointer++ == ',');
2160
2161 input_line_pointer--;
2162 demand_empty_rest_of_line ();
2163 }
2164
2165 void
2166 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2167 {
2168 expressionS exp;
2169
2170 exp.X_op = O_secrel;
2171 exp.X_add_symbol = symbol;
2172 exp.X_add_number = 0;
2173 emit_expr (&exp, size);
2174 }
2175
2176 static void
2177 s_secidx (int dummy ATTRIBUTE_UNUSED)
2178 {
2179 expressionS exp;
2180
2181 do
2182 {
2183 expression (&exp);
2184 if (exp.X_op == O_symbol)
2185 exp.X_op = O_secidx;
2186
2187 emit_expr (&exp, 2);
2188 }
2189 while (*input_line_pointer++ == ',');
2190
2191 input_line_pointer--;
2192 demand_empty_rest_of_line ();
2193 }
2194 #endif /* TE_PE */
2195
2196 static void s_aarch64_arch (int);
2197 static void s_aarch64_cpu (int);
2198 static void s_aarch64_arch_extension (int);
2199
2200 /* This table describes all the machine specific pseudo-ops the assembler
2201 has to support. The fields are:
2202 pseudo-op name without dot
2203 function to call to execute this pseudo-op
2204 Integer arg to pass to the function. */
2205
2206 const pseudo_typeS md_pseudo_table[] = {
2207 /* Never called because '.req' does not start a line. */
2208 {"req", s_req, 0},
2209 {"unreq", s_unreq, 0},
2210 {"bss", s_bss, 0},
2211 {"even", s_even, 0},
2212 {"ltorg", s_ltorg, 0},
2213 {"pool", s_ltorg, 0},
2214 {"cpu", s_aarch64_cpu, 0},
2215 {"arch", s_aarch64_arch, 0},
2216 {"arch_extension", s_aarch64_arch_extension, 0},
2217 {"inst", s_aarch64_inst, 0},
2218 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2219 #ifdef OBJ_ELF
2220 {"tlsdescadd", s_tlsdescadd, 0},
2221 {"tlsdesccall", s_tlsdesccall, 0},
2222 {"tlsdescldr", s_tlsdescldr, 0},
2223 {"variant_pcs", s_variant_pcs, 0},
2224 #endif
2225 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2226 {"word", s_aarch64_cons, 4},
2227 {"long", s_aarch64_cons, 4},
2228 {"xword", s_aarch64_cons, 8},
2229 {"dword", s_aarch64_cons, 8},
2230 #endif
2231 #ifdef TE_PE
2232 {"secrel32", s_secrel, 0},
2233 {"secidx", s_secidx, 0},
2234 #endif
2235 {"float16", float_cons, 'h'},
2236 {"bfloat16", float_cons, 'b'},
2237 {0, 0, 0}
2238 };
2239 \f
2240
2241 /* Check whether STR points to a register name followed by a comma or the
2242 end of line; REG_TYPE indicates which register types are checked
2243 against. Return TRUE if STR is such a register name; otherwise return
2244 FALSE. The function does not intend to produce any diagnostics, but since
2245 the register parser aarch64_reg_parse, which is called by this function,
2246 does produce diagnostics, we call clear_error to clear any diagnostics
2247 that may be generated by aarch64_reg_parse.
2248 Also, the function returns FALSE directly if there is any user error
2249 present at the function entry. This prevents the existing diagnostics
2250 state from being spoiled.
2251 The function currently serves parse_constant_immediate and
2252 parse_big_immediate only. */
2253 static bool
2254 reg_name_p (char *str, aarch64_reg_type reg_type)
2255 {
2256 const reg_entry *reg;
2257
2258 /* Prevent the diagnostics state from being spoiled. */
2259 if (error_p ())
2260 return false;
2261
2262 reg = aarch64_reg_parse (&str, reg_type, NULL);
2263
2264 /* Clear the parsing error that may be set by the reg parser. */
2265 clear_error ();
2266
2267 if (!reg)
2268 return false;
2269
2270 skip_whitespace (str);
2271 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2272 return true;
2273
2274 return false;
2275 }
2276
2277 /* Parser functions used exclusively in instruction operands. */
2278
2279 /* Parse an immediate expression which may not be constant.
2280
2281 To prevent the expression parser from pushing a register name
2282 into the symbol table as an undefined symbol, firstly a check is
2283 done to find out whether STR is a register of type REG_TYPE followed
2284 by a comma or the end of line. Return FALSE if STR is such a string. */
2285
2286 static bool
2287 parse_immediate_expression (char **str, expressionS *exp,
2288 aarch64_reg_type reg_type)
2289 {
2290 if (reg_name_p (*str, reg_type))
2291 {
2292 set_recoverable_error (_("immediate operand required"));
2293 return false;
2294 }
2295
2296 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2297
2298 if (exp->X_op == O_absent)
2299 {
2300 set_fatal_syntax_error (_("missing immediate expression"));
2301 return false;
2302 }
2303
2304 return true;
2305 }
2306
2307 /* Constant immediate-value read function for use in insn parsing.
2308 STR points to the beginning of the immediate (with the optional
2309 leading #); *VAL receives the value. REG_TYPE says which register
2310 names should be treated as registers rather than as symbolic immediates.
2311
2312 Return TRUE on success; otherwise return FALSE. */
2313
2314 static bool
2315 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2316 {
2317 expressionS exp;
2318
2319 if (! parse_immediate_expression (str, &exp, reg_type))
2320 return false;
2321
2322 if (exp.X_op != O_constant)
2323 {
2324 set_syntax_error (_("constant expression required"));
2325 return false;
2326 }
2327
2328 *val = exp.X_add_number;
2329 return true;
2330 }
2331
2332 static uint32_t
2333 encode_imm_float_bits (uint32_t imm)
2334 {
2335 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2336 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2337 }
2338
2339 /* Return TRUE if the single-precision floating-point value encoded in IMM
2340 can be expressed in the AArch64 8-bit signed floating-point format with
2341 3-bit exponent and normalized 4 bits of precision; in other words, the
2342 floating-point value must be expressable as
2343 (+/-) n / 16 * power (2, r)
2344 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2345
2346 static bool
2347 aarch64_imm_float_p (uint32_t imm)
2348 {
2349 /* If a single-precision floating-point value has the following bit
2350 pattern, it can be expressed in the AArch64 8-bit floating-point
2351 format:
2352
2353 3 32222222 2221111111111
2354 1 09876543 21098765432109876543210
2355 n Eeeeeexx xxxx0000000000000000000
2356
2357 where n, e and each x are either 0 or 1 independently, with
2358 E == ~ e. */
2359
2360 uint32_t pattern;
2361
2362 /* Prepare the pattern for 'Eeeeee'. */
2363 if (((imm >> 30) & 0x1) == 0)
2364 pattern = 0x3e000000;
2365 else
2366 pattern = 0x40000000;
2367
2368 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2369 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2370 }
2371
2372 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2373 as an IEEE float without any loss of precision. Store the value in
2374 *FPWORD if so. */
2375
2376 static bool
2377 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2378 {
2379 /* If a double-precision floating-point value has the following bit
2380 pattern, it can be expressed in a float:
2381
2382 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2383 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2384 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2385
2386 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2387 if Eeee_eeee != 1111_1111
2388
2389 where n, e, s and S are either 0 or 1 independently and where ~ is the
2390 inverse of E. */
2391
2392 uint32_t pattern;
2393 uint32_t high32 = imm >> 32;
2394 uint32_t low32 = imm;
2395
2396 /* Lower 29 bits need to be 0s. */
2397 if ((imm & 0x1fffffff) != 0)
2398 return false;
2399
2400 /* Prepare the pattern for 'Eeeeeeeee'. */
2401 if (((high32 >> 30) & 0x1) == 0)
2402 pattern = 0x38000000;
2403 else
2404 pattern = 0x40000000;
2405
2406 /* Check E~~~. */
2407 if ((high32 & 0x78000000) != pattern)
2408 return false;
2409
2410 /* Check Eeee_eeee != 1111_1111. */
2411 if ((high32 & 0x7ff00000) == 0x47f00000)
2412 return false;
2413
2414 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2415 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2416 | (low32 >> 29)); /* 3 S bits. */
2417 return true;
2418 }
2419
2420 /* Return true if we should treat OPERAND as a double-precision
2421 floating-point operand rather than a single-precision one. */
2422 static bool
2423 double_precision_operand_p (const aarch64_opnd_info *operand)
2424 {
2425 /* Check for unsuffixed SVE registers, which are allowed
2426 for LDR and STR but not in instructions that require an
2427 immediate. We get better error messages if we arbitrarily
2428 pick one size, parse the immediate normally, and then
2429 report the match failure in the normal way. */
2430 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2431 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2432 }
2433
2434 /* Parse a floating-point immediate. Return TRUE on success and return the
2435 value in *IMMED in the format of IEEE754 single-precision encoding.
2436 *CCP points to the start of the string; DP_P is TRUE when the immediate
2437 is expected to be in double-precision (N.B. this only matters when
2438 hexadecimal representation is involved). REG_TYPE says which register
2439 names should be treated as registers rather than as symbolic immediates.
2440
2441 This routine accepts any IEEE float; it is up to the callers to reject
2442 invalid ones. */
2443
2444 static bool
2445 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2446 aarch64_reg_type reg_type)
2447 {
2448 char *str = *ccp;
2449 char *fpnum;
2450 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2451 int64_t val = 0;
2452 unsigned fpword = 0;
2453 bool hex_p = false;
2454
2455 skip_past_char (&str, '#');
2456
2457 fpnum = str;
2458 skip_whitespace (fpnum);
2459
2460 if (startswith (fpnum, "0x"))
2461 {
2462 /* Support the hexadecimal representation of the IEEE754 encoding.
2463 Double-precision is expected when DP_P is TRUE, otherwise the
2464 representation should be in single-precision. */
2465 if (! parse_constant_immediate (&str, &val, reg_type))
2466 goto invalid_fp;
2467
2468 if (dp_p)
2469 {
2470 if (!can_convert_double_to_float (val, &fpword))
2471 goto invalid_fp;
2472 }
2473 else if ((uint64_t) val > 0xffffffff)
2474 goto invalid_fp;
2475 else
2476 fpword = val;
2477
2478 hex_p = true;
2479 }
2480 else if (reg_name_p (str, reg_type))
2481 {
2482 set_recoverable_error (_("immediate operand required"));
2483 return false;
2484 }
2485
2486 if (! hex_p)
2487 {
2488 int i;
2489
2490 if ((str = atof_ieee (str, 's', words)) == NULL)
2491 goto invalid_fp;
2492
2493 /* Our FP word must be 32 bits (single-precision FP). */
2494 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2495 {
2496 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2497 fpword |= words[i];
2498 }
2499 }
2500
2501 *immed = fpword;
2502 *ccp = str;
2503 return true;
2504
2505 invalid_fp:
2506 set_fatal_syntax_error (_("invalid floating-point constant"));
2507 return false;
2508 }
2509
2510 /* Less-generic immediate-value read function with the possibility of loading
2511 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2512 instructions.
2513
2514 To prevent the expression parser from pushing a register name into the
2515 symbol table as an undefined symbol, a check is firstly done to find
2516 out whether STR is a register of type REG_TYPE followed by a comma or
2517 the end of line. Return FALSE if STR is such a register. */
2518
2519 static bool
2520 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2521 {
2522 char *ptr = *str;
2523
2524 if (reg_name_p (ptr, reg_type))
2525 {
2526 set_syntax_error (_("immediate operand required"));
2527 return false;
2528 }
2529
2530 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2531
2532 if (inst.reloc.exp.X_op == O_constant)
2533 *imm = inst.reloc.exp.X_add_number;
2534
2535 *str = ptr;
2536
2537 return true;
2538 }
2539
2540 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2541 if NEED_LIBOPCODES is non-zero, the fixup will need
2542 assistance from the libopcodes. */
2543
2544 static inline void
2545 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2546 const aarch64_opnd_info *operand,
2547 int need_libopcodes_p)
2548 {
2549 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2550 reloc->opnd = operand->type;
2551 if (need_libopcodes_p)
2552 reloc->need_libopcodes_p = 1;
2553 };
2554
2555 /* Return TRUE if the instruction needs to be fixed up later internally by
2556 the GAS; otherwise return FALSE. */
2557
2558 static inline bool
2559 aarch64_gas_internal_fixup_p (void)
2560 {
2561 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2562 }
2563
2564 /* Assign the immediate value to the relevant field in *OPERAND if
2565 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2566 needs an internal fixup in a later stage.
2567 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2568 IMM.VALUE that may get assigned with the constant. */
2569 static inline void
2570 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2571 aarch64_opnd_info *operand,
2572 int addr_off_p,
2573 int need_libopcodes_p,
2574 int skip_p)
2575 {
2576 if (reloc->exp.X_op == O_constant)
2577 {
2578 if (addr_off_p)
2579 operand->addr.offset.imm = reloc->exp.X_add_number;
2580 else
2581 operand->imm.value = reloc->exp.X_add_number;
2582 reloc->type = BFD_RELOC_UNUSED;
2583 }
2584 else
2585 {
2586 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2587 /* Tell libopcodes to ignore this operand or not. This is helpful
2588 when one of the operands needs to be fixed up later but we need
2589 libopcodes to check the other operands. */
2590 operand->skip = skip_p;
2591 }
2592 }
2593
2594 /* Relocation modifiers. Each entry in the table contains the textual
2595 name for the relocation which may be placed before a symbol used as
2596 a load/store offset, or add immediate. It must be surrounded by a
2597 leading and trailing colon, for example:
2598
2599 ldr x0, [x1, #:rello:varsym]
2600 add x0, x1, #:rello:varsym */
2601
2602 struct reloc_table_entry
2603 {
2604 const char *name;
2605 int pc_rel;
2606 bfd_reloc_code_real_type adr_type;
2607 bfd_reloc_code_real_type adrp_type;
2608 bfd_reloc_code_real_type movw_type;
2609 bfd_reloc_code_real_type add_type;
2610 bfd_reloc_code_real_type ldst_type;
2611 bfd_reloc_code_real_type ld_literal_type;
2612 };
2613
2614 static struct reloc_table_entry reloc_table[] =
2615 {
2616 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2617 {"lo12", 0,
2618 0, /* adr_type */
2619 0,
2620 0,
2621 BFD_RELOC_AARCH64_ADD_LO12,
2622 BFD_RELOC_AARCH64_LDST_LO12,
2623 0},
2624
2625 /* Higher 21 bits of pc-relative page offset: ADRP */
2626 {"pg_hi21", 1,
2627 0, /* adr_type */
2628 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2629 0,
2630 0,
2631 0,
2632 0},
2633
2634 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2635 {"pg_hi21_nc", 1,
2636 0, /* adr_type */
2637 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2638 0,
2639 0,
2640 0,
2641 0},
2642
2643 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2644 {"abs_g0", 0,
2645 0, /* adr_type */
2646 0,
2647 BFD_RELOC_AARCH64_MOVW_G0,
2648 0,
2649 0,
2650 0},
2651
2652 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2653 {"abs_g0_s", 0,
2654 0, /* adr_type */
2655 0,
2656 BFD_RELOC_AARCH64_MOVW_G0_S,
2657 0,
2658 0,
2659 0},
2660
2661 /* Less significant bits 0-15 of address/value: MOVK, no check */
2662 {"abs_g0_nc", 0,
2663 0, /* adr_type */
2664 0,
2665 BFD_RELOC_AARCH64_MOVW_G0_NC,
2666 0,
2667 0,
2668 0},
2669
2670 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2671 {"abs_g1", 0,
2672 0, /* adr_type */
2673 0,
2674 BFD_RELOC_AARCH64_MOVW_G1,
2675 0,
2676 0,
2677 0},
2678
2679 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2680 {"abs_g1_s", 0,
2681 0, /* adr_type */
2682 0,
2683 BFD_RELOC_AARCH64_MOVW_G1_S,
2684 0,
2685 0,
2686 0},
2687
2688 /* Less significant bits 16-31 of address/value: MOVK, no check */
2689 {"abs_g1_nc", 0,
2690 0, /* adr_type */
2691 0,
2692 BFD_RELOC_AARCH64_MOVW_G1_NC,
2693 0,
2694 0,
2695 0},
2696
2697 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2698 {"abs_g2", 0,
2699 0, /* adr_type */
2700 0,
2701 BFD_RELOC_AARCH64_MOVW_G2,
2702 0,
2703 0,
2704 0},
2705
2706 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2707 {"abs_g2_s", 0,
2708 0, /* adr_type */
2709 0,
2710 BFD_RELOC_AARCH64_MOVW_G2_S,
2711 0,
2712 0,
2713 0},
2714
2715 /* Less significant bits 32-47 of address/value: MOVK, no check */
2716 {"abs_g2_nc", 0,
2717 0, /* adr_type */
2718 0,
2719 BFD_RELOC_AARCH64_MOVW_G2_NC,
2720 0,
2721 0,
2722 0},
2723
2724 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2725 {"abs_g3", 0,
2726 0, /* adr_type */
2727 0,
2728 BFD_RELOC_AARCH64_MOVW_G3,
2729 0,
2730 0,
2731 0},
2732
2733 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2734 {"prel_g0", 1,
2735 0, /* adr_type */
2736 0,
2737 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2738 0,
2739 0,
2740 0},
2741
2742 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2743 {"prel_g0_nc", 1,
2744 0, /* adr_type */
2745 0,
2746 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2747 0,
2748 0,
2749 0},
2750
2751 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2752 {"prel_g1", 1,
2753 0, /* adr_type */
2754 0,
2755 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2756 0,
2757 0,
2758 0},
2759
2760 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2761 {"prel_g1_nc", 1,
2762 0, /* adr_type */
2763 0,
2764 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2765 0,
2766 0,
2767 0},
2768
2769 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2770 {"prel_g2", 1,
2771 0, /* adr_type */
2772 0,
2773 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2774 0,
2775 0,
2776 0},
2777
2778 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2779 {"prel_g2_nc", 1,
2780 0, /* adr_type */
2781 0,
2782 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2783 0,
2784 0,
2785 0},
2786
2787 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2788 {"prel_g3", 1,
2789 0, /* adr_type */
2790 0,
2791 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2792 0,
2793 0,
2794 0},
2795
2796 /* Get to the page containing GOT entry for a symbol. */
2797 {"got", 1,
2798 0, /* adr_type */
2799 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2800 0,
2801 0,
2802 0,
2803 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2804
2805 /* 12 bit offset into the page containing GOT entry for that symbol. */
2806 {"got_lo12", 0,
2807 0, /* adr_type */
2808 0,
2809 0,
2810 0,
2811 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2812 0},
2813
2814 /* 0-15 bits of address/value: MOVk, no check. */
2815 {"gotoff_g0_nc", 0,
2816 0, /* adr_type */
2817 0,
2818 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2819 0,
2820 0,
2821 0},
2822
2823 /* Most significant bits 16-31 of address/value: MOVZ. */
2824 {"gotoff_g1", 0,
2825 0, /* adr_type */
2826 0,
2827 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2828 0,
2829 0,
2830 0},
2831
2832 /* 15 bit offset into the page containing GOT entry for that symbol. */
2833 {"gotoff_lo15", 0,
2834 0, /* adr_type */
2835 0,
2836 0,
2837 0,
2838 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2839 0},
2840
2841 /* Get to the page containing GOT TLS entry for a symbol */
2842 {"gottprel_g0_nc", 0,
2843 0, /* adr_type */
2844 0,
2845 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2846 0,
2847 0,
2848 0},
2849
2850 /* Get to the page containing GOT TLS entry for a symbol */
2851 {"gottprel_g1", 0,
2852 0, /* adr_type */
2853 0,
2854 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2855 0,
2856 0,
2857 0},
2858
2859 /* Get to the page containing GOT TLS entry for a symbol */
2860 {"tlsgd", 0,
2861 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2862 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2863 0,
2864 0,
2865 0,
2866 0},
2867
2868 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2869 {"tlsgd_lo12", 0,
2870 0, /* adr_type */
2871 0,
2872 0,
2873 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2874 0,
2875 0},
2876
2877 /* Lower 16 bits address/value: MOVk. */
2878 {"tlsgd_g0_nc", 0,
2879 0, /* adr_type */
2880 0,
2881 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2882 0,
2883 0,
2884 0},
2885
2886 /* Most significant bits 16-31 of address/value: MOVZ. */
2887 {"tlsgd_g1", 0,
2888 0, /* adr_type */
2889 0,
2890 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2891 0,
2892 0,
2893 0},
2894
2895 /* Get to the page containing GOT TLS entry for a symbol */
2896 {"tlsdesc", 0,
2897 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2898 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2899 0,
2900 0,
2901 0,
2902 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2903
2904 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2905 {"tlsdesc_lo12", 0,
2906 0, /* adr_type */
2907 0,
2908 0,
2909 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2910 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2911 0},
2912
2913 /* Get to the page containing GOT TLS entry for a symbol.
2914 The same as GD, we allocate two consecutive GOT slots
2915 for module index and module offset, the only difference
2916 with GD is the module offset should be initialized to
2917 zero without any outstanding runtime relocation. */
2918 {"tlsldm", 0,
2919 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2920 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2921 0,
2922 0,
2923 0,
2924 0},
2925
2926 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2927 {"tlsldm_lo12_nc", 0,
2928 0, /* adr_type */
2929 0,
2930 0,
2931 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2932 0,
2933 0},
2934
2935 /* 12 bit offset into the module TLS base address. */
2936 {"dtprel_lo12", 0,
2937 0, /* adr_type */
2938 0,
2939 0,
2940 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2941 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2942 0},
2943
2944 /* Same as dtprel_lo12, no overflow check. */
2945 {"dtprel_lo12_nc", 0,
2946 0, /* adr_type */
2947 0,
2948 0,
2949 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2950 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2951 0},
2952
2953 /* bits[23:12] of offset to the module TLS base address. */
2954 {"dtprel_hi12", 0,
2955 0, /* adr_type */
2956 0,
2957 0,
2958 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2959 0,
2960 0},
2961
2962 /* bits[15:0] of offset to the module TLS base address. */
2963 {"dtprel_g0", 0,
2964 0, /* adr_type */
2965 0,
2966 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2967 0,
2968 0,
2969 0},
2970
2971 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2972 {"dtprel_g0_nc", 0,
2973 0, /* adr_type */
2974 0,
2975 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2976 0,
2977 0,
2978 0},
2979
2980 /* bits[31:16] of offset to the module TLS base address. */
2981 {"dtprel_g1", 0,
2982 0, /* adr_type */
2983 0,
2984 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2985 0,
2986 0,
2987 0},
2988
2989 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2990 {"dtprel_g1_nc", 0,
2991 0, /* adr_type */
2992 0,
2993 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2994 0,
2995 0,
2996 0},
2997
2998 /* bits[47:32] of offset to the module TLS base address. */
2999 {"dtprel_g2", 0,
3000 0, /* adr_type */
3001 0,
3002 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3003 0,
3004 0,
3005 0},
3006
3007 /* Lower 16 bit offset into GOT entry for a symbol */
3008 {"tlsdesc_off_g0_nc", 0,
3009 0, /* adr_type */
3010 0,
3011 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3012 0,
3013 0,
3014 0},
3015
3016 /* Higher 16 bit offset into GOT entry for a symbol */
3017 {"tlsdesc_off_g1", 0,
3018 0, /* adr_type */
3019 0,
3020 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3021 0,
3022 0,
3023 0},
3024
3025 /* Get to the page containing GOT TLS entry for a symbol */
3026 {"gottprel", 0,
3027 0, /* adr_type */
3028 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3029 0,
3030 0,
3031 0,
3032 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3033
3034 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3035 {"gottprel_lo12", 0,
3036 0, /* adr_type */
3037 0,
3038 0,
3039 0,
3040 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3041 0},
3042
3043 /* Get tp offset for a symbol. */
3044 {"tprel", 0,
3045 0, /* adr_type */
3046 0,
3047 0,
3048 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3049 0,
3050 0},
3051
3052 /* Get tp offset for a symbol. */
3053 {"tprel_lo12", 0,
3054 0, /* adr_type */
3055 0,
3056 0,
3057 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3058 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3059 0},
3060
3061 /* Get tp offset for a symbol. */
3062 {"tprel_hi12", 0,
3063 0, /* adr_type */
3064 0,
3065 0,
3066 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3067 0,
3068 0},
3069
3070 /* Get tp offset for a symbol. */
3071 {"tprel_lo12_nc", 0,
3072 0, /* adr_type */
3073 0,
3074 0,
3075 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3076 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3077 0},
3078
3079 /* Most significant bits 32-47 of address/value: MOVZ. */
3080 {"tprel_g2", 0,
3081 0, /* adr_type */
3082 0,
3083 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3084 0,
3085 0,
3086 0},
3087
3088 /* Most significant bits 16-31 of address/value: MOVZ. */
3089 {"tprel_g1", 0,
3090 0, /* adr_type */
3091 0,
3092 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3093 0,
3094 0,
3095 0},
3096
3097 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3098 {"tprel_g1_nc", 0,
3099 0, /* adr_type */
3100 0,
3101 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3102 0,
3103 0,
3104 0},
3105
3106 /* Most significant bits 0-15 of address/value: MOVZ. */
3107 {"tprel_g0", 0,
3108 0, /* adr_type */
3109 0,
3110 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3111 0,
3112 0,
3113 0},
3114
3115 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3116 {"tprel_g0_nc", 0,
3117 0, /* adr_type */
3118 0,
3119 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3120 0,
3121 0,
3122 0},
3123
3124 /* 15bit offset from got entry to base address of GOT table. */
3125 {"gotpage_lo15", 0,
3126 0,
3127 0,
3128 0,
3129 0,
3130 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3131 0},
3132
3133 /* 14bit offset from got entry to base address of GOT table. */
3134 {"gotpage_lo14", 0,
3135 0,
3136 0,
3137 0,
3138 0,
3139 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3140 0},
3141 };
3142
3143 /* Given the address of a pointer pointing to the textual name of a
3144 relocation as may appear in assembler source, attempt to find its
3145 details in reloc_table. The pointer will be updated to the character
3146 after the trailing colon. On failure, NULL will be returned;
3147 otherwise return the reloc_table_entry. */
3148
3149 static struct reloc_table_entry *
3150 find_reloc_table_entry (char **str)
3151 {
3152 unsigned int i;
3153 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3154 {
3155 int length = strlen (reloc_table[i].name);
3156
3157 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3158 && (*str)[length] == ':')
3159 {
3160 *str += (length + 1);
3161 return &reloc_table[i];
3162 }
3163 }
3164
3165 return NULL;
3166 }
3167
3168 /* Returns 0 if the relocation should never be forced,
3169 1 if the relocation must be forced, and -1 if either
3170 result is OK. */
3171
3172 static signed int
3173 aarch64_force_reloc (unsigned int type)
3174 {
3175 switch (type)
3176 {
3177 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3178 /* Perform these "immediate" internal relocations
3179 even if the symbol is extern or weak. */
3180 return 0;
3181
3182 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3183 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3184 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3185 /* Pseudo relocs that need to be fixed up according to
3186 ilp32_p. */
3187 return 1;
3188
3189 case BFD_RELOC_AARCH64_ADD_LO12:
3190 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3191 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3192 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3193 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3194 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3195 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3196 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3197 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3198 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3199 case BFD_RELOC_AARCH64_LDST128_LO12:
3200 case BFD_RELOC_AARCH64_LDST16_LO12:
3201 case BFD_RELOC_AARCH64_LDST32_LO12:
3202 case BFD_RELOC_AARCH64_LDST64_LO12:
3203 case BFD_RELOC_AARCH64_LDST8_LO12:
3204 case BFD_RELOC_AARCH64_LDST_LO12:
3205 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3206 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3207 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3208 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3209 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3210 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3211 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3212 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3213 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3214 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3215 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3216 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3217 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3218 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3219 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3220 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3221 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3222 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3223 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3224 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3225 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3226 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3227 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3228 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3229 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3230 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3231 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3232 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3233 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3234 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3235 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3236 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3237 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3238 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3239 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3240 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3241 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3242 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3243 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3244 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3245 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3246 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3247 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3248 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3249 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3250 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3251 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3252 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3253 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3254 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3255 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3256 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3257 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3258 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3259 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3260 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3261 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3262 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3263 /* Always leave these relocations for the linker. */
3264 return 1;
3265
3266 default:
3267 return -1;
3268 }
3269 }
3270
3271 int
3272 aarch64_force_relocation (struct fix *fixp)
3273 {
3274 int res = aarch64_force_reloc (fixp->fx_r_type);
3275
3276 if (res == -1)
3277 return generic_force_reloc (fixp);
3278 return res;
3279 }
3280
3281 /* Mode argument to parse_shift and parser_shifter_operand. */
3282 enum parse_shift_mode
3283 {
3284 SHIFTED_NONE, /* no shifter allowed */
3285 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3286 "#imm{,lsl #n}" */
3287 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3288 "#imm" */
3289 SHIFTED_LSL, /* bare "lsl #n" */
3290 SHIFTED_MUL, /* bare "mul #n" */
3291 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3292 SHIFTED_MUL_VL, /* "mul vl" */
3293 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3294 };
3295
3296 /* Parse a <shift> operator on an AArch64 data processing instruction.
3297 Return TRUE on success; otherwise return FALSE. */
3298 static bool
3299 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3300 {
3301 const struct aarch64_name_value_pair *shift_op;
3302 enum aarch64_modifier_kind kind;
3303 expressionS exp;
3304 int exp_has_prefix;
3305 char *s = *str;
3306 char *p = s;
3307
3308 for (p = *str; ISALPHA (*p); p++)
3309 ;
3310
3311 if (p == *str)
3312 {
3313 set_syntax_error (_("shift expression expected"));
3314 return false;
3315 }
3316
3317 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3318
3319 if (shift_op == NULL)
3320 {
3321 set_syntax_error (_("shift operator expected"));
3322 return false;
3323 }
3324
3325 kind = aarch64_get_operand_modifier (shift_op);
3326
3327 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3328 {
3329 set_syntax_error (_("invalid use of 'MSL'"));
3330 return false;
3331 }
3332
3333 if (kind == AARCH64_MOD_MUL
3334 && mode != SHIFTED_MUL
3335 && mode != SHIFTED_MUL_VL)
3336 {
3337 set_syntax_error (_("invalid use of 'MUL'"));
3338 return false;
3339 }
3340
3341 switch (mode)
3342 {
3343 case SHIFTED_LOGIC_IMM:
3344 if (aarch64_extend_operator_p (kind))
3345 {
3346 set_syntax_error (_("extending shift is not permitted"));
3347 return false;
3348 }
3349 break;
3350
3351 case SHIFTED_ARITH_IMM:
3352 if (kind == AARCH64_MOD_ROR)
3353 {
3354 set_syntax_error (_("'ROR' shift is not permitted"));
3355 return false;
3356 }
3357 break;
3358
3359 case SHIFTED_LSL:
3360 if (kind != AARCH64_MOD_LSL)
3361 {
3362 set_syntax_error (_("only 'LSL' shift is permitted"));
3363 return false;
3364 }
3365 break;
3366
3367 case SHIFTED_MUL:
3368 if (kind != AARCH64_MOD_MUL)
3369 {
3370 set_syntax_error (_("only 'MUL' is permitted"));
3371 return false;
3372 }
3373 break;
3374
3375 case SHIFTED_MUL_VL:
3376 /* "MUL VL" consists of two separate tokens. Require the first
3377 token to be "MUL" and look for a following "VL". */
3378 if (kind == AARCH64_MOD_MUL)
3379 {
3380 skip_whitespace (p);
3381 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3382 {
3383 p += 2;
3384 kind = AARCH64_MOD_MUL_VL;
3385 break;
3386 }
3387 }
3388 set_syntax_error (_("only 'MUL VL' is permitted"));
3389 return false;
3390
3391 case SHIFTED_REG_OFFSET:
3392 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3393 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3394 {
3395 set_fatal_syntax_error
3396 (_("invalid shift for the register offset addressing mode"));
3397 return false;
3398 }
3399 break;
3400
3401 case SHIFTED_LSL_MSL:
3402 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3403 {
3404 set_syntax_error (_("invalid shift operator"));
3405 return false;
3406 }
3407 break;
3408
3409 default:
3410 abort ();
3411 }
3412
3413 /* Whitespace can appear here if the next thing is a bare digit. */
3414 skip_whitespace (p);
3415
3416 /* Parse shift amount. */
3417 exp_has_prefix = 0;
3418 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3419 exp.X_op = O_absent;
3420 else
3421 {
3422 if (is_immediate_prefix (*p))
3423 {
3424 p++;
3425 exp_has_prefix = 1;
3426 }
3427 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3428 }
3429 if (kind == AARCH64_MOD_MUL_VL)
3430 /* For consistency, give MUL VL the same shift amount as an implicit
3431 MUL #1. */
3432 operand->shifter.amount = 1;
3433 else if (exp.X_op == O_absent)
3434 {
3435 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3436 {
3437 set_syntax_error (_("missing shift amount"));
3438 return false;
3439 }
3440 operand->shifter.amount = 0;
3441 }
3442 else if (exp.X_op != O_constant)
3443 {
3444 set_syntax_error (_("constant shift amount required"));
3445 return false;
3446 }
3447 /* For parsing purposes, MUL #n has no inherent range. The range
3448 depends on the operand and will be checked by operand-specific
3449 routines. */
3450 else if (kind != AARCH64_MOD_MUL
3451 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3452 {
3453 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3454 return false;
3455 }
3456 else
3457 {
3458 operand->shifter.amount = exp.X_add_number;
3459 operand->shifter.amount_present = 1;
3460 }
3461
3462 operand->shifter.operator_present = 1;
3463 operand->shifter.kind = kind;
3464
3465 *str = p;
3466 return true;
3467 }
3468
3469 /* Parse a <shifter_operand> for a data processing instruction:
3470
3471 #<immediate>
3472 #<immediate>, LSL #imm
3473
3474 Validation of immediate operands is deferred to md_apply_fix.
3475
3476 Return TRUE on success; otherwise return FALSE. */
3477
3478 static bool
3479 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3480 enum parse_shift_mode mode)
3481 {
3482 char *p;
3483
3484 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3485 return false;
3486
3487 p = *str;
3488
3489 /* Accept an immediate expression. */
3490 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3491 REJECT_ABSENT))
3492 return false;
3493
3494 /* Accept optional LSL for arithmetic immediate values. */
3495 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3496 if (! parse_shift (&p, operand, SHIFTED_LSL))
3497 return false;
3498
3499 /* Not accept any shifter for logical immediate values. */
3500 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3501 && parse_shift (&p, operand, mode))
3502 {
3503 set_syntax_error (_("unexpected shift operator"));
3504 return false;
3505 }
3506
3507 *str = p;
3508 return true;
3509 }
3510
3511 /* Parse a <shifter_operand> for a data processing instruction:
3512
3513 <Rm>
3514 <Rm>, <shift>
3515 #<immediate>
3516 #<immediate>, LSL #imm
3517
3518 where <shift> is handled by parse_shift above, and the last two
3519 cases are handled by the function above.
3520
3521 Validation of immediate operands is deferred to md_apply_fix.
3522
3523 Return TRUE on success; otherwise return FALSE. */
3524
3525 static bool
3526 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3527 enum parse_shift_mode mode)
3528 {
3529 const reg_entry *reg;
3530 aarch64_opnd_qualifier_t qualifier;
3531 enum aarch64_operand_class opd_class
3532 = aarch64_get_operand_class (operand->type);
3533
3534 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3535 if (reg)
3536 {
3537 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3538 {
3539 set_syntax_error (_("unexpected register in the immediate operand"));
3540 return false;
3541 }
3542
3543 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3544 {
3545 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3546 return false;
3547 }
3548
3549 operand->reg.regno = reg->number;
3550 operand->qualifier = qualifier;
3551
3552 /* Accept optional shift operation on register. */
3553 if (! skip_past_comma (str))
3554 return true;
3555
3556 if (! parse_shift (str, operand, mode))
3557 return false;
3558
3559 return true;
3560 }
3561 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3562 {
3563 set_syntax_error
3564 (_("integer register expected in the extended/shifted operand "
3565 "register"));
3566 return false;
3567 }
3568
3569 /* We have a shifted immediate variable. */
3570 return parse_shifter_operand_imm (str, operand, mode);
3571 }
3572
3573 /* Return TRUE on success; return FALSE otherwise. */
3574
3575 static bool
3576 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3577 enum parse_shift_mode mode)
3578 {
3579 char *p = *str;
3580
3581 /* Determine if we have the sequence of characters #: or just :
3582 coming next. If we do, then we check for a :rello: relocation
3583 modifier. If we don't, punt the whole lot to
3584 parse_shifter_operand. */
3585
3586 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3587 {
3588 struct reloc_table_entry *entry;
3589
3590 if (p[0] == '#')
3591 p += 2;
3592 else
3593 p++;
3594 *str = p;
3595
3596 /* Try to parse a relocation. Anything else is an error. */
3597 if (!(entry = find_reloc_table_entry (str)))
3598 {
3599 set_syntax_error (_("unknown relocation modifier"));
3600 return false;
3601 }
3602
3603 if (entry->add_type == 0)
3604 {
3605 set_syntax_error
3606 (_("this relocation modifier is not allowed on this instruction"));
3607 return false;
3608 }
3609
3610 /* Save str before we decompose it. */
3611 p = *str;
3612
3613 /* Next, we parse the expression. */
3614 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3615 REJECT_ABSENT))
3616 return false;
3617
3618 /* Record the relocation type (use the ADD variant here). */
3619 inst.reloc.type = entry->add_type;
3620 inst.reloc.pc_rel = entry->pc_rel;
3621
3622 /* If str is empty, we've reached the end, stop here. */
3623 if (**str == '\0')
3624 return true;
3625
3626 /* Otherwise, we have a shifted reloc modifier, so rewind to
3627 recover the variable name and continue parsing for the shifter. */
3628 *str = p;
3629 return parse_shifter_operand_imm (str, operand, mode);
3630 }
3631
3632 return parse_shifter_operand (str, operand, mode);
3633 }
3634
3635 /* Parse all forms of an address expression. Information is written
3636 to *OPERAND and/or inst.reloc.
3637
3638 The A64 instruction set has the following addressing modes:
3639
3640 Offset
3641 [base] // in SIMD ld/st structure
3642 [base{,#0}] // in ld/st exclusive
3643 [base{,#imm}]
3644 [base,Xm{,LSL #imm}]
3645 [base,Xm,SXTX {#imm}]
3646 [base,Wm,(S|U)XTW {#imm}]
3647 Pre-indexed
3648 [base]! // in ldraa/ldrab exclusive
3649 [base,#imm]!
3650 Post-indexed
3651 [base],#imm
3652 [base],Xm // in SIMD ld/st structure
3653 PC-relative (literal)
3654 label
3655 SVE:
3656 [base,#imm,MUL VL]
3657 [base,Zm.D{,LSL #imm}]
3658 [base,Zm.S,(S|U)XTW {#imm}]
3659 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3660 [Zn.S,#imm]
3661 [Zn.D,#imm]
3662 [Zn.S{, Xm}]
3663 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3664 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3665 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3666
3667 (As a convenience, the notation "=immediate" is permitted in conjunction
3668 with the pc-relative literal load instructions to automatically place an
3669 immediate value or symbolic address in a nearby literal pool and generate
3670 a hidden label which references it.)
3671
3672 Upon a successful parsing, the address structure in *OPERAND will be
3673 filled in the following way:
3674
3675 .base_regno = <base>
3676 .offset.is_reg // 1 if the offset is a register
3677 .offset.imm = <imm>
3678 .offset.regno = <Rm>
3679
3680 For different addressing modes defined in the A64 ISA:
3681
3682 Offset
3683 .pcrel=0; .preind=1; .postind=0; .writeback=0
3684 Pre-indexed
3685 .pcrel=0; .preind=1; .postind=0; .writeback=1
3686 Post-indexed
3687 .pcrel=0; .preind=0; .postind=1; .writeback=1
3688 PC-relative (literal)
3689 .pcrel=1; .preind=1; .postind=0; .writeback=0
3690
3691 The shift/extension information, if any, will be stored in .shifter.
3692 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3693 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3694 corresponding register.
3695
3696 BASE_TYPE says which types of base register should be accepted and
3697 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3698 is the type of shifter that is allowed for immediate offsets,
3699 or SHIFTED_NONE if none.
3700
3701 In all other respects, it is the caller's responsibility to check
3702 for addressing modes not supported by the instruction, and to set
3703 inst.reloc.type. */
3704
3705 static bool
3706 parse_address_main (char **str, aarch64_opnd_info *operand,
3707 aarch64_opnd_qualifier_t *base_qualifier,
3708 aarch64_opnd_qualifier_t *offset_qualifier,
3709 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3710 enum parse_shift_mode imm_shift_mode)
3711 {
3712 char *p = *str;
3713 const reg_entry *reg;
3714 expressionS *exp = &inst.reloc.exp;
3715
3716 *base_qualifier = AARCH64_OPND_QLF_NIL;
3717 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3718 if (! skip_past_char (&p, '['))
3719 {
3720 /* =immediate or label. */
3721 operand->addr.pcrel = 1;
3722 operand->addr.preind = 1;
3723
3724 /* #:<reloc_op>:<symbol> */
3725 skip_past_char (&p, '#');
3726 if (skip_past_char (&p, ':'))
3727 {
3728 bfd_reloc_code_real_type ty;
3729 struct reloc_table_entry *entry;
3730
3731 /* Try to parse a relocation modifier. Anything else is
3732 an error. */
3733 entry = find_reloc_table_entry (&p);
3734 if (! entry)
3735 {
3736 set_syntax_error (_("unknown relocation modifier"));
3737 return false;
3738 }
3739
3740 switch (operand->type)
3741 {
3742 case AARCH64_OPND_ADDR_PCREL21:
3743 /* adr */
3744 ty = entry->adr_type;
3745 break;
3746
3747 default:
3748 ty = entry->ld_literal_type;
3749 break;
3750 }
3751
3752 if (ty == 0)
3753 {
3754 set_syntax_error
3755 (_("this relocation modifier is not allowed on this "
3756 "instruction"));
3757 return false;
3758 }
3759
3760 /* #:<reloc_op>: */
3761 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3762 {
3763 set_syntax_error (_("invalid relocation expression"));
3764 return false;
3765 }
3766 /* #:<reloc_op>:<expr> */
3767 /* Record the relocation type. */
3768 inst.reloc.type = ty;
3769 inst.reloc.pc_rel = entry->pc_rel;
3770 }
3771 else
3772 {
3773 if (skip_past_char (&p, '='))
3774 /* =immediate; need to generate the literal in the literal pool. */
3775 inst.gen_lit_pool = 1;
3776
3777 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3778 {
3779 set_syntax_error (_("invalid address"));
3780 return false;
3781 }
3782 }
3783
3784 *str = p;
3785 return true;
3786 }
3787
3788 /* [ */
3789
3790 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3791 if (!reg || !aarch64_check_reg_type (reg, base_type))
3792 {
3793 set_syntax_error (_(get_reg_expected_msg (base_type)));
3794 return false;
3795 }
3796 operand->addr.base_regno = reg->number;
3797
3798 /* [Xn */
3799 if (skip_past_comma (&p))
3800 {
3801 /* [Xn, */
3802 operand->addr.preind = 1;
3803
3804 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3805 if (reg)
3806 {
3807 if (!aarch64_check_reg_type (reg, offset_type))
3808 {
3809 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3810 return false;
3811 }
3812
3813 /* [Xn,Rm */
3814 operand->addr.offset.regno = reg->number;
3815 operand->addr.offset.is_reg = 1;
3816 /* Shifted index. */
3817 if (skip_past_comma (&p))
3818 {
3819 /* [Xn,Rm, */
3820 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3821 /* Use the diagnostics set in parse_shift, so not set new
3822 error message here. */
3823 return false;
3824 }
3825 /* We only accept:
3826 [base,Xm] # For vector plus scalar SVE2 indexing.
3827 [base,Xm{,LSL #imm}]
3828 [base,Xm,SXTX {#imm}]
3829 [base,Wm,(S|U)XTW {#imm}] */
3830 if (operand->shifter.kind == AARCH64_MOD_NONE
3831 || operand->shifter.kind == AARCH64_MOD_LSL
3832 || operand->shifter.kind == AARCH64_MOD_SXTX)
3833 {
3834 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3835 {
3836 set_syntax_error (_("invalid use of 32-bit register offset"));
3837 return false;
3838 }
3839 if (aarch64_get_qualifier_esize (*base_qualifier)
3840 != aarch64_get_qualifier_esize (*offset_qualifier)
3841 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3842 || *base_qualifier != AARCH64_OPND_QLF_S_S
3843 || *offset_qualifier != AARCH64_OPND_QLF_X))
3844 {
3845 set_syntax_error (_("offset has different size from base"));
3846 return false;
3847 }
3848 }
3849 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3850 {
3851 set_syntax_error (_("invalid use of 64-bit register offset"));
3852 return false;
3853 }
3854 }
3855 else
3856 {
3857 /* [Xn,#:<reloc_op>:<symbol> */
3858 skip_past_char (&p, '#');
3859 if (skip_past_char (&p, ':'))
3860 {
3861 struct reloc_table_entry *entry;
3862
3863 /* Try to parse a relocation modifier. Anything else is
3864 an error. */
3865 if (!(entry = find_reloc_table_entry (&p)))
3866 {
3867 set_syntax_error (_("unknown relocation modifier"));
3868 return false;
3869 }
3870
3871 if (entry->ldst_type == 0)
3872 {
3873 set_syntax_error
3874 (_("this relocation modifier is not allowed on this "
3875 "instruction"));
3876 return false;
3877 }
3878
3879 /* [Xn,#:<reloc_op>: */
3880 /* We now have the group relocation table entry corresponding to
3881 the name in the assembler source. Next, we parse the
3882 expression. */
3883 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3884 {
3885 set_syntax_error (_("invalid relocation expression"));
3886 return false;
3887 }
3888
3889 /* [Xn,#:<reloc_op>:<expr> */
3890 /* Record the load/store relocation type. */
3891 inst.reloc.type = entry->ldst_type;
3892 inst.reloc.pc_rel = entry->pc_rel;
3893 }
3894 else
3895 {
3896 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3897 {
3898 set_syntax_error (_("invalid expression in the address"));
3899 return false;
3900 }
3901 /* [Xn,<expr> */
3902 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3903 /* [Xn,<expr>,<shifter> */
3904 if (! parse_shift (&p, operand, imm_shift_mode))
3905 return false;
3906 }
3907 }
3908 }
3909
3910 if (! skip_past_char (&p, ']'))
3911 {
3912 set_syntax_error (_("']' expected"));
3913 return false;
3914 }
3915
3916 if (skip_past_char (&p, '!'))
3917 {
3918 if (operand->addr.preind && operand->addr.offset.is_reg)
3919 {
3920 set_syntax_error (_("register offset not allowed in pre-indexed "
3921 "addressing mode"));
3922 return false;
3923 }
3924 /* [Xn]! */
3925 operand->addr.writeback = 1;
3926 }
3927 else if (skip_past_comma (&p))
3928 {
3929 /* [Xn], */
3930 operand->addr.postind = 1;
3931 operand->addr.writeback = 1;
3932
3933 if (operand->addr.preind)
3934 {
3935 set_syntax_error (_("cannot combine pre- and post-indexing"));
3936 return false;
3937 }
3938
3939 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3940 if (reg)
3941 {
3942 /* [Xn],Xm */
3943 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3944 {
3945 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3946 return false;
3947 }
3948
3949 operand->addr.offset.regno = reg->number;
3950 operand->addr.offset.is_reg = 1;
3951 }
3952 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3953 {
3954 /* [Xn],#expr */
3955 set_syntax_error (_("invalid expression in the address"));
3956 return false;
3957 }
3958 }
3959
3960 /* If at this point neither .preind nor .postind is set, we have a
3961 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3962 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3963 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3964 [Zn.<T>, xzr]. */
3965 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3966 {
3967 if (operand->addr.writeback)
3968 {
3969 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3970 {
3971 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3972 operand->addr.offset.is_reg = 0;
3973 operand->addr.offset.imm = 0;
3974 operand->addr.preind = 1;
3975 }
3976 else
3977 {
3978 /* Reject [Rn]! */
3979 set_syntax_error (_("missing offset in the pre-indexed address"));
3980 return false;
3981 }
3982 }
3983 else
3984 {
3985 operand->addr.preind = 1;
3986 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3987 {
3988 operand->addr.offset.is_reg = 1;
3989 operand->addr.offset.regno = REG_ZR;
3990 *offset_qualifier = AARCH64_OPND_QLF_X;
3991 }
3992 else
3993 {
3994 inst.reloc.exp.X_op = O_constant;
3995 inst.reloc.exp.X_add_number = 0;
3996 }
3997 }
3998 }
3999
4000 *str = p;
4001 return true;
4002 }
4003
4004 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4005 on success. */
4006 static bool
4007 parse_address (char **str, aarch64_opnd_info *operand)
4008 {
4009 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4010 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4011 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
4012 }
4013
4014 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4015 The arguments have the same meaning as for parse_address_main.
4016 Return TRUE on success. */
4017 static bool
4018 parse_sve_address (char **str, aarch64_opnd_info *operand,
4019 aarch64_opnd_qualifier_t *base_qualifier,
4020 aarch64_opnd_qualifier_t *offset_qualifier)
4021 {
4022 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4023 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4024 SHIFTED_MUL_VL);
4025 }
4026
4027 /* Parse a register X0-X30. The register must be 64-bit and register 31
4028 is unallocated. */
4029 static bool
4030 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4031 {
4032 const reg_entry *reg = parse_reg (str);
4033 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4034 {
4035 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
4036 return false;
4037 }
4038 operand->reg.regno = reg->number;
4039 operand->qualifier = AARCH64_OPND_QLF_X;
4040 return true;
4041 }
4042
4043 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4044 Return TRUE on success; otherwise return FALSE. */
4045 static bool
4046 parse_half (char **str, int *internal_fixup_p)
4047 {
4048 char *p = *str;
4049
4050 skip_past_char (&p, '#');
4051
4052 gas_assert (internal_fixup_p);
4053 *internal_fixup_p = 0;
4054
4055 if (*p == ':')
4056 {
4057 struct reloc_table_entry *entry;
4058
4059 /* Try to parse a relocation. Anything else is an error. */
4060 ++p;
4061
4062 if (!(entry = find_reloc_table_entry (&p)))
4063 {
4064 set_syntax_error (_("unknown relocation modifier"));
4065 return false;
4066 }
4067
4068 if (entry->movw_type == 0)
4069 {
4070 set_syntax_error
4071 (_("this relocation modifier is not allowed on this instruction"));
4072 return false;
4073 }
4074
4075 inst.reloc.type = entry->movw_type;
4076 }
4077 else
4078 *internal_fixup_p = 1;
4079
4080 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4081 return false;
4082
4083 *str = p;
4084 return true;
4085 }
4086
4087 /* Parse an operand for an ADRP instruction:
4088 ADRP <Xd>, <label>
4089 Return TRUE on success; otherwise return FALSE. */
4090
4091 static bool
4092 parse_adrp (char **str)
4093 {
4094 char *p;
4095
4096 p = *str;
4097 if (*p == ':')
4098 {
4099 struct reloc_table_entry *entry;
4100
4101 /* Try to parse a relocation. Anything else is an error. */
4102 ++p;
4103 if (!(entry = find_reloc_table_entry (&p)))
4104 {
4105 set_syntax_error (_("unknown relocation modifier"));
4106 return false;
4107 }
4108
4109 if (entry->adrp_type == 0)
4110 {
4111 set_syntax_error
4112 (_("this relocation modifier is not allowed on this instruction"));
4113 return false;
4114 }
4115
4116 inst.reloc.type = entry->adrp_type;
4117 }
4118 else
4119 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4120
4121 inst.reloc.pc_rel = 1;
4122 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4123 return false;
4124 *str = p;
4125 return true;
4126 }
4127
4128 /* Miscellaneous. */
4129
4130 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4131 of SIZE tokens in which index I gives the token for field value I,
4132 or is null if field value I is invalid. REG_TYPE says which register
4133 names should be treated as registers rather than as symbolic immediates.
4134
4135 Return true on success, moving *STR past the operand and storing the
4136 field value in *VAL. */
4137
4138 static int
4139 parse_enum_string (char **str, int64_t *val, const char *const *array,
4140 size_t size, aarch64_reg_type reg_type)
4141 {
4142 expressionS exp;
4143 char *p, *q;
4144 size_t i;
4145
4146 /* Match C-like tokens. */
4147 p = q = *str;
4148 while (ISALNUM (*q))
4149 q++;
4150
4151 for (i = 0; i < size; ++i)
4152 if (array[i]
4153 && strncasecmp (array[i], p, q - p) == 0
4154 && array[i][q - p] == 0)
4155 {
4156 *val = i;
4157 *str = q;
4158 return true;
4159 }
4160
4161 if (!parse_immediate_expression (&p, &exp, reg_type))
4162 return false;
4163
4164 if (exp.X_op == O_constant
4165 && (uint64_t) exp.X_add_number < size)
4166 {
4167 *val = exp.X_add_number;
4168 *str = p;
4169 return true;
4170 }
4171
4172 /* Use the default error for this operand. */
4173 return false;
4174 }
4175
4176 /* Parse an option for a preload instruction. Returns the encoding for the
4177 option, or PARSE_FAIL. */
4178
4179 static int
4180 parse_pldop (char **str)
4181 {
4182 char *p, *q;
4183 const struct aarch64_name_value_pair *o;
4184
4185 p = q = *str;
4186 while (ISALNUM (*q))
4187 q++;
4188
4189 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4190 if (!o)
4191 return PARSE_FAIL;
4192
4193 *str = q;
4194 return o->value;
4195 }
4196
4197 /* Parse an option for a barrier instruction. Returns the encoding for the
4198 option, or PARSE_FAIL. */
4199
4200 static int
4201 parse_barrier (char **str)
4202 {
4203 char *p, *q;
4204 const struct aarch64_name_value_pair *o;
4205
4206 p = q = *str;
4207 while (ISALPHA (*q))
4208 q++;
4209
4210 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4211 if (!o)
4212 return PARSE_FAIL;
4213
4214 *str = q;
4215 return o->value;
4216 }
4217
4218 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4219 return 0 if successful. Otherwise return PARSE_FAIL. */
4220
4221 static int
4222 parse_barrier_psb (char **str,
4223 const struct aarch64_name_value_pair ** hint_opt)
4224 {
4225 char *p, *q;
4226 const struct aarch64_name_value_pair *o;
4227
4228 p = q = *str;
4229 while (ISALPHA (*q))
4230 q++;
4231
4232 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4233 if (!o)
4234 {
4235 set_fatal_syntax_error
4236 ( _("unknown or missing option to PSB/TSB"));
4237 return PARSE_FAIL;
4238 }
4239
4240 if (o->value != 0x11)
4241 {
4242 /* PSB only accepts option name 'CSYNC'. */
4243 set_syntax_error
4244 (_("the specified option is not accepted for PSB/TSB"));
4245 return PARSE_FAIL;
4246 }
4247
4248 *str = q;
4249 *hint_opt = o;
4250 return 0;
4251 }
4252
4253 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4254 return 0 if successful. Otherwise return PARSE_FAIL. */
4255
4256 static int
4257 parse_bti_operand (char **str,
4258 const struct aarch64_name_value_pair ** hint_opt)
4259 {
4260 char *p, *q;
4261 const struct aarch64_name_value_pair *o;
4262
4263 p = q = *str;
4264 while (ISALPHA (*q))
4265 q++;
4266
4267 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4268 if (!o)
4269 {
4270 set_fatal_syntax_error
4271 ( _("unknown option to BTI"));
4272 return PARSE_FAIL;
4273 }
4274
4275 switch (o->value)
4276 {
4277 /* Valid BTI operands. */
4278 case HINT_OPD_C:
4279 case HINT_OPD_J:
4280 case HINT_OPD_JC:
4281 break;
4282
4283 default:
4284 set_syntax_error
4285 (_("unknown option to BTI"));
4286 return PARSE_FAIL;
4287 }
4288
4289 *str = q;
4290 *hint_opt = o;
4291 return 0;
4292 }
4293
4294 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4295 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4296 on failure. Format:
4297
4298 REG_TYPE.QUALIFIER
4299
4300 Side effect: Update STR with current parse position of success.
4301 */
4302
4303 static const reg_entry *
4304 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4305 aarch64_opnd_qualifier_t *qualifier)
4306 {
4307 char *q;
4308
4309 reg_entry *reg = parse_reg (str);
4310 if (reg != NULL && aarch64_check_reg_type (reg, reg_type))
4311 {
4312 if (!skip_past_char (str, '.'))
4313 {
4314 set_syntax_error (_("missing ZA tile element size separator"));
4315 return NULL;
4316 }
4317
4318 q = *str;
4319 switch (TOLOWER (*q))
4320 {
4321 case 'b':
4322 *qualifier = AARCH64_OPND_QLF_S_B;
4323 break;
4324 case 'h':
4325 *qualifier = AARCH64_OPND_QLF_S_H;
4326 break;
4327 case 's':
4328 *qualifier = AARCH64_OPND_QLF_S_S;
4329 break;
4330 case 'd':
4331 *qualifier = AARCH64_OPND_QLF_S_D;
4332 break;
4333 case 'q':
4334 *qualifier = AARCH64_OPND_QLF_S_Q;
4335 break;
4336 default:
4337 return NULL;
4338 }
4339 q++;
4340
4341 *str = q;
4342 return reg;
4343 }
4344
4345 return NULL;
4346 }
4347
4348 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4349 Function return tile QUALIFIER on success.
4350
4351 Tiles are in example format: za[0-9]\.[bhsd]
4352
4353 Function returns <ZAda> register number or PARSE_FAIL.
4354 */
4355 static int
4356 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4357 {
4358 int regno;
4359 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZAT, qualifier);
4360
4361 if (reg == NULL)
4362 return PARSE_FAIL;
4363 regno = reg->number;
4364
4365 switch (*qualifier)
4366 {
4367 case AARCH64_OPND_QLF_S_B:
4368 if (regno != 0x00)
4369 {
4370 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4371 return PARSE_FAIL;
4372 }
4373 break;
4374 case AARCH64_OPND_QLF_S_H:
4375 if (regno > 0x01)
4376 {
4377 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4378 return PARSE_FAIL;
4379 }
4380 break;
4381 case AARCH64_OPND_QLF_S_S:
4382 if (regno > 0x03)
4383 {
4384 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4385 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4386 return PARSE_FAIL;
4387 }
4388 break;
4389 case AARCH64_OPND_QLF_S_D:
4390 if (regno > 0x07)
4391 {
4392 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4393 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4394 return PARSE_FAIL;
4395 }
4396 break;
4397 default:
4398 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4399 return PARSE_FAIL;
4400 }
4401
4402 return regno;
4403 }
4404
4405 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4406
4407 #<imm>
4408 <imm>
4409
4410 Function return TRUE if immediate was found, or FALSE.
4411 */
4412 static bool
4413 parse_sme_immediate (char **str, int64_t *imm)
4414 {
4415 int64_t val;
4416 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4417 return false;
4418
4419 *imm = val;
4420 return true;
4421 }
4422
4423 /* Parse index with vector select register and immediate:
4424
4425 [<Wv>, <imm>]
4426 [<Wv>, #<imm>]
4427 where <Wv> is in W12-W15 range and # is optional for immediate.
4428
4429 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4430 is set to true.
4431
4432 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4433 IMM output.
4434 */
4435 static bool
4436 parse_sme_za_hv_tiles_operand_index (char **str,
4437 int *vector_select_register,
4438 int64_t *imm)
4439 {
4440 const reg_entry *reg;
4441
4442 if (!skip_past_char (str, '['))
4443 {
4444 set_syntax_error (_("expected '['"));
4445 return false;
4446 }
4447
4448 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4449 reg = parse_reg (str);
4450 if (reg == NULL || reg->type != REG_TYPE_R_32
4451 || reg->number < 12 || reg->number > 15)
4452 {
4453 set_syntax_error (_("expected vector select register W12-W15"));
4454 return false;
4455 }
4456 *vector_select_register = reg->number;
4457
4458 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4459 {
4460 set_syntax_error (_("expected ','"));
4461 return false;
4462 }
4463
4464 if (!parse_sme_immediate (str, imm))
4465 {
4466 set_syntax_error (_("index offset immediate expected"));
4467 return false;
4468 }
4469
4470 if (!skip_past_char (str, ']'))
4471 {
4472 set_syntax_error (_("expected ']'"));
4473 return false;
4474 }
4475
4476 return true;
4477 }
4478
4479 /* Parse SME ZA horizontal or vertical vector access to tiles.
4480 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4481 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4482 contains <Wv> select register and corresponding optional IMMEDIATE.
4483 In addition QUALIFIER is extracted.
4484
4485 Field format examples:
4486
4487 ZA0<HV>.B[<Wv>, #<imm>]
4488 <ZAn><HV>.H[<Wv>, #<imm>]
4489 <ZAn><HV>.S[<Wv>, #<imm>]
4490 <ZAn><HV>.D[<Wv>, #<imm>]
4491 <ZAn><HV>.Q[<Wv>, #<imm>]
4492
4493 Function returns <ZAda> register number or PARSE_FAIL.
4494 */
4495 static int
4496 parse_sme_za_hv_tiles_operand (char **str,
4497 enum sme_hv_slice *slice_indicator,
4498 int *vector_select_register,
4499 int *imm,
4500 aarch64_opnd_qualifier_t *qualifier)
4501 {
4502 int regno;
4503 int regno_limit;
4504 int64_t imm_limit;
4505 int64_t imm_value;
4506 const reg_entry *reg;
4507
4508 reg = parse_reg_with_qual (str, REG_TYPE_ZATHV, qualifier);
4509 if (!reg)
4510 return PARSE_FAIL;
4511
4512 *slice_indicator = (aarch64_check_reg_type (reg, REG_TYPE_ZATH)
4513 ? HV_horizontal
4514 : HV_vertical);
4515 regno = reg->number;
4516
4517 switch (*qualifier)
4518 {
4519 case AARCH64_OPND_QLF_S_B:
4520 regno_limit = 0;
4521 imm_limit = 15;
4522 break;
4523 case AARCH64_OPND_QLF_S_H:
4524 regno_limit = 1;
4525 imm_limit = 7;
4526 break;
4527 case AARCH64_OPND_QLF_S_S:
4528 regno_limit = 3;
4529 imm_limit = 3;
4530 break;
4531 case AARCH64_OPND_QLF_S_D:
4532 regno_limit = 7;
4533 imm_limit = 1;
4534 break;
4535 case AARCH64_OPND_QLF_S_Q:
4536 regno_limit = 15;
4537 imm_limit = 0;
4538 break;
4539 default:
4540 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4541 return PARSE_FAIL;
4542 }
4543
4544 /* Check if destination register ZA tile vector is in range for given
4545 instruction variant. */
4546 if (regno < 0 || regno > regno_limit)
4547 {
4548 set_syntax_error (_("ZA tile vector out of range"));
4549 return PARSE_FAIL;
4550 }
4551
4552 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4553 &imm_value))
4554 return PARSE_FAIL;
4555
4556 /* Check if optional index offset is in the range for instruction
4557 variant. */
4558 if (imm_value < 0 || imm_value > imm_limit)
4559 {
4560 set_syntax_error (_("index offset out of range"));
4561 return PARSE_FAIL;
4562 }
4563
4564 *imm = imm_value;
4565
4566 return regno;
4567 }
4568
4569
4570 static int
4571 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4572 enum sme_hv_slice *slice_indicator,
4573 int *vector_select_register,
4574 int *imm,
4575 aarch64_opnd_qualifier_t *qualifier)
4576 {
4577 int regno;
4578
4579 if (!skip_past_char (str, '{'))
4580 {
4581 set_syntax_error (_("expected '{'"));
4582 return PARSE_FAIL;
4583 }
4584
4585 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4586 vector_select_register, imm,
4587 qualifier);
4588
4589 if (regno == PARSE_FAIL)
4590 return PARSE_FAIL;
4591
4592 if (!skip_past_char (str, '}'))
4593 {
4594 set_syntax_error (_("expected '}'"));
4595 return PARSE_FAIL;
4596 }
4597
4598 return regno;
4599 }
4600
4601 /* Parse list of up to eight 64-bit element tile names separated by commas in
4602 SME's ZERO instruction:
4603
4604 ZERO { <mask> }
4605
4606 Function returns <mask>:
4607
4608 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4609 */
4610 static int
4611 parse_sme_zero_mask(char **str)
4612 {
4613 char *q;
4614 int mask;
4615 aarch64_opnd_qualifier_t qualifier;
4616
4617 mask = 0x00;
4618 q = *str;
4619 do
4620 {
4621 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZAT,
4622 &qualifier);
4623 if (reg)
4624 {
4625 int regno = reg->number;
4626 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4627 {
4628 /* { ZA0.B } is assembled as all-ones immediate. */
4629 mask = 0xff;
4630 }
4631 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4632 mask |= 0x55 << regno;
4633 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4634 mask |= 0x11 << regno;
4635 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4636 mask |= 0x01 << regno;
4637 else
4638 {
4639 set_syntax_error (_("wrong ZA tile element format"));
4640 return PARSE_FAIL;
4641 }
4642 continue;
4643 }
4644 else if (strncasecmp (q, "za", 2) == 0
4645 && !ISALNUM (q[2]))
4646 {
4647 /* { ZA } is assembled as all-ones immediate. */
4648 mask = 0xff;
4649 q += 2;
4650 continue;
4651 }
4652 else
4653 {
4654 set_syntax_error (_("wrong ZA tile element format"));
4655 return PARSE_FAIL;
4656 }
4657 }
4658 while (skip_past_char (&q, ','));
4659
4660 *str = q;
4661 return mask;
4662 }
4663
4664 /* Wraps in curly braces <mask> operand ZERO instruction:
4665
4666 ZERO { <mask> }
4667
4668 Function returns value of <mask> bit-field.
4669 */
4670 static int
4671 parse_sme_list_of_64bit_tiles (char **str)
4672 {
4673 int regno;
4674
4675 if (!skip_past_char (str, '{'))
4676 {
4677 set_syntax_error (_("expected '{'"));
4678 return PARSE_FAIL;
4679 }
4680
4681 /* Empty <mask> list is an all-zeros immediate. */
4682 if (!skip_past_char (str, '}'))
4683 {
4684 regno = parse_sme_zero_mask (str);
4685 if (regno == PARSE_FAIL)
4686 return PARSE_FAIL;
4687
4688 if (!skip_past_char (str, '}'))
4689 {
4690 set_syntax_error (_("expected '}'"));
4691 return PARSE_FAIL;
4692 }
4693 }
4694 else
4695 regno = 0x00;
4696
4697 return regno;
4698 }
4699
4700 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4701 Operand format:
4702
4703 ZA[<Wv>, <imm>]
4704 ZA[<Wv>, #<imm>]
4705
4706 Function returns <Wv> or PARSE_FAIL.
4707 */
4708 static int
4709 parse_sme_za_array (char **str, int *imm)
4710 {
4711 char *p, *q;
4712 int regno;
4713 int64_t imm_value;
4714
4715 p = q = *str;
4716 while (ISALPHA (*q))
4717 q++;
4718
4719 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4720 {
4721 set_syntax_error (_("expected ZA array"));
4722 return PARSE_FAIL;
4723 }
4724
4725 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4726 return PARSE_FAIL;
4727
4728 if (imm_value < 0 || imm_value > 15)
4729 {
4730 set_syntax_error (_("offset out of range"));
4731 return PARSE_FAIL;
4732 }
4733
4734 *imm = imm_value;
4735 *str = q;
4736 return regno;
4737 }
4738
4739 /* Parse streaming mode operand for SMSTART and SMSTOP.
4740
4741 {SM | ZA}
4742
4743 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4744 */
4745 static int
4746 parse_sme_sm_za (char **str)
4747 {
4748 char *p, *q;
4749
4750 p = q = *str;
4751 while (ISALPHA (*q))
4752 q++;
4753
4754 if ((q - p != 2)
4755 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4756 {
4757 set_syntax_error (_("expected SM or ZA operand"));
4758 return PARSE_FAIL;
4759 }
4760
4761 *str = q;
4762 return TOLOWER (p[0]);
4763 }
4764
4765 /* Parse the name of the source scalable predicate register, the index base
4766 register W12-W15 and the element index. Function performs element index
4767 limit checks as well as qualifier type checks.
4768
4769 <Pn>.<T>[<Wv>, <imm>]
4770 <Pn>.<T>[<Wv>, #<imm>]
4771
4772 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4773 <imm> to IMM.
4774 Function returns <Pn>, or PARSE_FAIL.
4775 */
4776 static int
4777 parse_sme_pred_reg_with_index(char **str,
4778 int *index_base_reg,
4779 int *imm,
4780 aarch64_opnd_qualifier_t *qualifier)
4781 {
4782 int regno;
4783 int64_t imm_limit;
4784 int64_t imm_value;
4785 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4786
4787 if (reg == NULL)
4788 return PARSE_FAIL;
4789 regno = reg->number;
4790
4791 switch (*qualifier)
4792 {
4793 case AARCH64_OPND_QLF_S_B:
4794 imm_limit = 15;
4795 break;
4796 case AARCH64_OPND_QLF_S_H:
4797 imm_limit = 7;
4798 break;
4799 case AARCH64_OPND_QLF_S_S:
4800 imm_limit = 3;
4801 break;
4802 case AARCH64_OPND_QLF_S_D:
4803 imm_limit = 1;
4804 break;
4805 default:
4806 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4807 return PARSE_FAIL;
4808 }
4809
4810 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4811 return PARSE_FAIL;
4812
4813 if (imm_value < 0 || imm_value > imm_limit)
4814 {
4815 set_syntax_error (_("element index out of range for given variant"));
4816 return PARSE_FAIL;
4817 }
4818
4819 *imm = imm_value;
4820
4821 return regno;
4822 }
4823
4824 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4825 Returns the encoding for the option, or PARSE_FAIL.
4826
4827 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4828 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4829
4830 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4831 field, otherwise as a system register.
4832 */
4833
4834 static int
4835 parse_sys_reg (char **str, htab_t sys_regs,
4836 int imple_defined_p, int pstatefield_p,
4837 uint32_t* flags)
4838 {
4839 char *p, *q;
4840 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4841 const aarch64_sys_reg *o;
4842 int value;
4843
4844 p = buf;
4845 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4846 if (p < buf + (sizeof (buf) - 1))
4847 *p++ = TOLOWER (*q);
4848 *p = '\0';
4849
4850 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4851 valid system register. This is enforced by construction of the hash
4852 table. */
4853 if (p - buf != q - *str)
4854 return PARSE_FAIL;
4855
4856 o = str_hash_find (sys_regs, buf);
4857 if (!o)
4858 {
4859 if (!imple_defined_p)
4860 return PARSE_FAIL;
4861 else
4862 {
4863 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4864 unsigned int op0, op1, cn, cm, op2;
4865
4866 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4867 != 5)
4868 return PARSE_FAIL;
4869 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4870 return PARSE_FAIL;
4871 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4872 if (flags)
4873 *flags = 0;
4874 }
4875 }
4876 else
4877 {
4878 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4879 as_bad (_("selected processor does not support PSTATE field "
4880 "name '%s'"), buf);
4881 if (!pstatefield_p
4882 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4883 o->value, o->flags, o->features))
4884 as_bad (_("selected processor does not support system register "
4885 "name '%s'"), buf);
4886 if (aarch64_sys_reg_deprecated_p (o->flags))
4887 as_warn (_("system register name '%s' is deprecated and may be "
4888 "removed in a future release"), buf);
4889 value = o->value;
4890 if (flags)
4891 *flags = o->flags;
4892 }
4893
4894 *str = q;
4895 return value;
4896 }
4897
4898 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4899 for the option, or NULL. */
4900
4901 static const aarch64_sys_ins_reg *
4902 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4903 {
4904 char *p, *q;
4905 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4906 const aarch64_sys_ins_reg *o;
4907
4908 p = buf;
4909 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4910 if (p < buf + (sizeof (buf) - 1))
4911 *p++ = TOLOWER (*q);
4912 *p = '\0';
4913
4914 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4915 valid system register. This is enforced by construction of the hash
4916 table. */
4917 if (p - buf != q - *str)
4918 return NULL;
4919
4920 o = str_hash_find (sys_ins_regs, buf);
4921 if (!o)
4922 return NULL;
4923
4924 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4925 o->name, o->value, o->flags, 0))
4926 as_bad (_("selected processor does not support system register "
4927 "name '%s'"), buf);
4928 if (aarch64_sys_reg_deprecated_p (o->flags))
4929 as_warn (_("system register name '%s' is deprecated and may be "
4930 "removed in a future release"), buf);
4931
4932 *str = q;
4933 return o;
4934 }
4935 \f
4936 #define po_char_or_fail(chr) do { \
4937 if (! skip_past_char (&str, chr)) \
4938 goto failure; \
4939 } while (0)
4940
4941 #define po_reg_or_fail(regtype) do { \
4942 reg = aarch64_reg_parse (&str, regtype, NULL); \
4943 if (!reg) \
4944 { \
4945 set_default_error (); \
4946 goto failure; \
4947 } \
4948 } while (0)
4949
4950 #define po_int_reg_or_fail(reg_type) do { \
4951 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4952 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4953 { \
4954 set_default_error (); \
4955 goto failure; \
4956 } \
4957 info->reg.regno = reg->number; \
4958 info->qualifier = qualifier; \
4959 } while (0)
4960
4961 #define po_imm_nc_or_fail() do { \
4962 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4963 goto failure; \
4964 } while (0)
4965
4966 #define po_imm_or_fail(min, max) do { \
4967 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4968 goto failure; \
4969 if (val < min || val > max) \
4970 { \
4971 set_fatal_syntax_error (_("immediate value out of range "\
4972 #min " to "#max)); \
4973 goto failure; \
4974 } \
4975 } while (0)
4976
4977 #define po_enum_or_fail(array) do { \
4978 if (!parse_enum_string (&str, &val, array, \
4979 ARRAY_SIZE (array), imm_reg_type)) \
4980 goto failure; \
4981 } while (0)
4982
4983 #define po_misc_or_fail(expr) do { \
4984 if (!expr) \
4985 goto failure; \
4986 } while (0)
4987 \f
4988 /* encode the 12-bit imm field of Add/sub immediate */
4989 static inline uint32_t
4990 encode_addsub_imm (uint32_t imm)
4991 {
4992 return imm << 10;
4993 }
4994
4995 /* encode the shift amount field of Add/sub immediate */
4996 static inline uint32_t
4997 encode_addsub_imm_shift_amount (uint32_t cnt)
4998 {
4999 return cnt << 22;
5000 }
5001
5002
5003 /* encode the imm field of Adr instruction */
5004 static inline uint32_t
5005 encode_adr_imm (uint32_t imm)
5006 {
5007 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
5008 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
5009 }
5010
5011 /* encode the immediate field of Move wide immediate */
5012 static inline uint32_t
5013 encode_movw_imm (uint32_t imm)
5014 {
5015 return imm << 5;
5016 }
5017
5018 /* encode the 26-bit offset of unconditional branch */
5019 static inline uint32_t
5020 encode_branch_ofs_26 (uint32_t ofs)
5021 {
5022 return ofs & ((1 << 26) - 1);
5023 }
5024
5025 /* encode the 19-bit offset of conditional branch and compare & branch */
5026 static inline uint32_t
5027 encode_cond_branch_ofs_19 (uint32_t ofs)
5028 {
5029 return (ofs & ((1 << 19) - 1)) << 5;
5030 }
5031
5032 /* encode the 19-bit offset of ld literal */
5033 static inline uint32_t
5034 encode_ld_lit_ofs_19 (uint32_t ofs)
5035 {
5036 return (ofs & ((1 << 19) - 1)) << 5;
5037 }
5038
5039 /* Encode the 14-bit offset of test & branch. */
5040 static inline uint32_t
5041 encode_tst_branch_ofs_14 (uint32_t ofs)
5042 {
5043 return (ofs & ((1 << 14) - 1)) << 5;
5044 }
5045
5046 /* Encode the 16-bit imm field of svc/hvc/smc. */
5047 static inline uint32_t
5048 encode_svc_imm (uint32_t imm)
5049 {
5050 return imm << 5;
5051 }
5052
5053 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
5054 static inline uint32_t
5055 reencode_addsub_switch_add_sub (uint32_t opcode)
5056 {
5057 return opcode ^ (1 << 30);
5058 }
5059
5060 static inline uint32_t
5061 reencode_movzn_to_movz (uint32_t opcode)
5062 {
5063 return opcode | (1 << 30);
5064 }
5065
5066 static inline uint32_t
5067 reencode_movzn_to_movn (uint32_t opcode)
5068 {
5069 return opcode & ~(1 << 30);
5070 }
5071
5072 /* Overall per-instruction processing. */
5073
5074 /* We need to be able to fix up arbitrary expressions in some statements.
5075 This is so that we can handle symbols that are an arbitrary distance from
5076 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5077 which returns part of an address in a form which will be valid for
5078 a data instruction. We do this by pushing the expression into a symbol
5079 in the expr_section, and creating a fix for that. */
5080
5081 static fixS *
5082 fix_new_aarch64 (fragS * frag,
5083 int where,
5084 short int size,
5085 expressionS * exp,
5086 int pc_rel,
5087 int reloc)
5088 {
5089 fixS *new_fix;
5090
5091 switch (exp->X_op)
5092 {
5093 case O_constant:
5094 case O_symbol:
5095 case O_add:
5096 case O_subtract:
5097 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5098 break;
5099
5100 default:
5101 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5102 pc_rel, reloc);
5103 break;
5104 }
5105 return new_fix;
5106 }
5107 \f
5108 /* Diagnostics on operands errors. */
5109
5110 /* By default, output verbose error message.
5111 Disable the verbose error message by -mno-verbose-error. */
5112 static int verbose_error_p = 1;
5113
5114 #ifdef DEBUG_AARCH64
5115 /* N.B. this is only for the purpose of debugging. */
5116 const char* operand_mismatch_kind_names[] =
5117 {
5118 "AARCH64_OPDE_NIL",
5119 "AARCH64_OPDE_RECOVERABLE",
5120 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5121 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5122 "AARCH64_OPDE_SYNTAX_ERROR",
5123 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5124 "AARCH64_OPDE_INVALID_VARIANT",
5125 "AARCH64_OPDE_OUT_OF_RANGE",
5126 "AARCH64_OPDE_UNALIGNED",
5127 "AARCH64_OPDE_REG_LIST",
5128 "AARCH64_OPDE_OTHER_ERROR",
5129 };
5130 #endif /* DEBUG_AARCH64 */
5131
5132 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5133
5134 When multiple errors of different kinds are found in the same assembly
5135 line, only the error of the highest severity will be picked up for
5136 issuing the diagnostics. */
5137
5138 static inline bool
5139 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5140 enum aarch64_operand_error_kind rhs)
5141 {
5142 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5143 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5144 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5145 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5146 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5147 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5148 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5149 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5150 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5151 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5152 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5153 return lhs > rhs;
5154 }
5155
5156 /* Helper routine to get the mnemonic name from the assembly instruction
5157 line; should only be called for the diagnosis purpose, as there is
5158 string copy operation involved, which may affect the runtime
5159 performance if used in elsewhere. */
5160
5161 static const char*
5162 get_mnemonic_name (const char *str)
5163 {
5164 static char mnemonic[32];
5165 char *ptr;
5166
5167 /* Get the first 15 bytes and assume that the full name is included. */
5168 strncpy (mnemonic, str, 31);
5169 mnemonic[31] = '\0';
5170
5171 /* Scan up to the end of the mnemonic, which must end in white space,
5172 '.', or end of string. */
5173 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5174 ;
5175
5176 *ptr = '\0';
5177
5178 /* Append '...' to the truncated long name. */
5179 if (ptr - mnemonic == 31)
5180 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5181
5182 return mnemonic;
5183 }
5184
5185 static void
5186 reset_aarch64_instruction (aarch64_instruction *instruction)
5187 {
5188 memset (instruction, '\0', sizeof (aarch64_instruction));
5189 instruction->reloc.type = BFD_RELOC_UNUSED;
5190 }
5191
5192 /* Data structures storing one user error in the assembly code related to
5193 operands. */
5194
5195 struct operand_error_record
5196 {
5197 const aarch64_opcode *opcode;
5198 aarch64_operand_error detail;
5199 struct operand_error_record *next;
5200 };
5201
5202 typedef struct operand_error_record operand_error_record;
5203
5204 struct operand_errors
5205 {
5206 operand_error_record *head;
5207 operand_error_record *tail;
5208 };
5209
5210 typedef struct operand_errors operand_errors;
5211
5212 /* Top-level data structure reporting user errors for the current line of
5213 the assembly code.
5214 The way md_assemble works is that all opcodes sharing the same mnemonic
5215 name are iterated to find a match to the assembly line. In this data
5216 structure, each of the such opcodes will have one operand_error_record
5217 allocated and inserted. In other words, excessive errors related with
5218 a single opcode are disregarded. */
5219 operand_errors operand_error_report;
5220
5221 /* Free record nodes. */
5222 static operand_error_record *free_opnd_error_record_nodes = NULL;
5223
5224 /* Initialize the data structure that stores the operand mismatch
5225 information on assembling one line of the assembly code. */
5226 static void
5227 init_operand_error_report (void)
5228 {
5229 if (operand_error_report.head != NULL)
5230 {
5231 gas_assert (operand_error_report.tail != NULL);
5232 operand_error_report.tail->next = free_opnd_error_record_nodes;
5233 free_opnd_error_record_nodes = operand_error_report.head;
5234 operand_error_report.head = NULL;
5235 operand_error_report.tail = NULL;
5236 return;
5237 }
5238 gas_assert (operand_error_report.tail == NULL);
5239 }
5240
5241 /* Return TRUE if some operand error has been recorded during the
5242 parsing of the current assembly line using the opcode *OPCODE;
5243 otherwise return FALSE. */
5244 static inline bool
5245 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5246 {
5247 operand_error_record *record = operand_error_report.head;
5248 return record && record->opcode == opcode;
5249 }
5250
5251 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5252 OPCODE field is initialized with OPCODE.
5253 N.B. only one record for each opcode, i.e. the maximum of one error is
5254 recorded for each instruction template. */
5255
5256 static void
5257 add_operand_error_record (const operand_error_record* new_record)
5258 {
5259 const aarch64_opcode *opcode = new_record->opcode;
5260 operand_error_record* record = operand_error_report.head;
5261
5262 /* The record may have been created for this opcode. If not, we need
5263 to prepare one. */
5264 if (! opcode_has_operand_error_p (opcode))
5265 {
5266 /* Get one empty record. */
5267 if (free_opnd_error_record_nodes == NULL)
5268 {
5269 record = XNEW (operand_error_record);
5270 }
5271 else
5272 {
5273 record = free_opnd_error_record_nodes;
5274 free_opnd_error_record_nodes = record->next;
5275 }
5276 record->opcode = opcode;
5277 /* Insert at the head. */
5278 record->next = operand_error_report.head;
5279 operand_error_report.head = record;
5280 if (operand_error_report.tail == NULL)
5281 operand_error_report.tail = record;
5282 }
5283 else if (record->detail.kind != AARCH64_OPDE_NIL
5284 && record->detail.index <= new_record->detail.index
5285 && operand_error_higher_severity_p (record->detail.kind,
5286 new_record->detail.kind))
5287 {
5288 /* In the case of multiple errors found on operands related with a
5289 single opcode, only record the error of the leftmost operand and
5290 only if the error is of higher severity. */
5291 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5292 " the existing error %s on operand %d",
5293 operand_mismatch_kind_names[new_record->detail.kind],
5294 new_record->detail.index,
5295 operand_mismatch_kind_names[record->detail.kind],
5296 record->detail.index);
5297 return;
5298 }
5299
5300 record->detail = new_record->detail;
5301 }
5302
5303 static inline void
5304 record_operand_error_info (const aarch64_opcode *opcode,
5305 aarch64_operand_error *error_info)
5306 {
5307 operand_error_record record;
5308 record.opcode = opcode;
5309 record.detail = *error_info;
5310 add_operand_error_record (&record);
5311 }
5312
5313 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5314 error message *ERROR, for operand IDX (count from 0). */
5315
5316 static void
5317 record_operand_error (const aarch64_opcode *opcode, int idx,
5318 enum aarch64_operand_error_kind kind,
5319 const char* error)
5320 {
5321 aarch64_operand_error info;
5322 memset(&info, 0, sizeof (info));
5323 info.index = idx;
5324 info.kind = kind;
5325 info.error = error;
5326 info.non_fatal = false;
5327 record_operand_error_info (opcode, &info);
5328 }
5329
5330 static void
5331 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5332 enum aarch64_operand_error_kind kind,
5333 const char* error, const int *extra_data)
5334 {
5335 aarch64_operand_error info;
5336 info.index = idx;
5337 info.kind = kind;
5338 info.error = error;
5339 info.data[0].i = extra_data[0];
5340 info.data[1].i = extra_data[1];
5341 info.data[2].i = extra_data[2];
5342 info.non_fatal = false;
5343 record_operand_error_info (opcode, &info);
5344 }
5345
5346 static void
5347 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5348 const char* error, int lower_bound,
5349 int upper_bound)
5350 {
5351 int data[3] = {lower_bound, upper_bound, 0};
5352 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5353 error, data);
5354 }
5355
5356 /* Remove the operand error record for *OPCODE. */
5357 static void ATTRIBUTE_UNUSED
5358 remove_operand_error_record (const aarch64_opcode *opcode)
5359 {
5360 if (opcode_has_operand_error_p (opcode))
5361 {
5362 operand_error_record* record = operand_error_report.head;
5363 gas_assert (record != NULL && operand_error_report.tail != NULL);
5364 operand_error_report.head = record->next;
5365 record->next = free_opnd_error_record_nodes;
5366 free_opnd_error_record_nodes = record;
5367 if (operand_error_report.head == NULL)
5368 {
5369 gas_assert (operand_error_report.tail == record);
5370 operand_error_report.tail = NULL;
5371 }
5372 }
5373 }
5374
5375 /* Given the instruction in *INSTR, return the index of the best matched
5376 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5377
5378 Return -1 if there is no qualifier sequence; return the first match
5379 if there is multiple matches found. */
5380
5381 static int
5382 find_best_match (const aarch64_inst *instr,
5383 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5384 {
5385 int i, num_opnds, max_num_matched, idx;
5386
5387 num_opnds = aarch64_num_of_operands (instr->opcode);
5388 if (num_opnds == 0)
5389 {
5390 DEBUG_TRACE ("no operand");
5391 return -1;
5392 }
5393
5394 max_num_matched = 0;
5395 idx = 0;
5396
5397 /* For each pattern. */
5398 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5399 {
5400 int j, num_matched;
5401 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5402
5403 /* Most opcodes has much fewer patterns in the list. */
5404 if (empty_qualifier_sequence_p (qualifiers))
5405 {
5406 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5407 break;
5408 }
5409
5410 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5411 if (*qualifiers == instr->operands[j].qualifier)
5412 ++num_matched;
5413
5414 if (num_matched > max_num_matched)
5415 {
5416 max_num_matched = num_matched;
5417 idx = i;
5418 }
5419 }
5420
5421 DEBUG_TRACE ("return with %d", idx);
5422 return idx;
5423 }
5424
5425 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5426 corresponding operands in *INSTR. */
5427
5428 static inline void
5429 assign_qualifier_sequence (aarch64_inst *instr,
5430 const aarch64_opnd_qualifier_t *qualifiers)
5431 {
5432 int i = 0;
5433 int num_opnds = aarch64_num_of_operands (instr->opcode);
5434 gas_assert (num_opnds);
5435 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5436 instr->operands[i].qualifier = *qualifiers;
5437 }
5438
5439 /* Callback used by aarch64_print_operand to apply STYLE to the
5440 disassembler output created from FMT and ARGS. The STYLER object holds
5441 any required state. Must return a pointer to a string (created from FMT
5442 and ARGS) that will continue to be valid until the complete disassembled
5443 instruction has been printed.
5444
5445 We don't currently add any styling to the output of the disassembler as
5446 used within assembler error messages, and so STYLE is ignored here. A
5447 new string is allocated on the obstack help within STYLER and returned
5448 to the caller. */
5449
5450 static const char *aarch64_apply_style
5451 (struct aarch64_styler *styler,
5452 enum disassembler_style style ATTRIBUTE_UNUSED,
5453 const char *fmt, va_list args)
5454 {
5455 int res;
5456 char *ptr;
5457 struct obstack *stack = (struct obstack *) styler->state;
5458 va_list ap;
5459
5460 /* Calculate the required space. */
5461 va_copy (ap, args);
5462 res = vsnprintf (NULL, 0, fmt, ap);
5463 va_end (ap);
5464 gas_assert (res >= 0);
5465
5466 /* Allocate space on the obstack and format the result. */
5467 ptr = (char *) obstack_alloc (stack, res + 1);
5468 res = vsnprintf (ptr, (res + 1), fmt, args);
5469 gas_assert (res >= 0);
5470
5471 return ptr;
5472 }
5473
5474 /* Print operands for the diagnosis purpose. */
5475
5476 static void
5477 print_operands (char *buf, const aarch64_opcode *opcode,
5478 const aarch64_opnd_info *opnds)
5479 {
5480 int i;
5481 struct aarch64_styler styler;
5482 struct obstack content;
5483 obstack_init (&content);
5484
5485 styler.apply_style = aarch64_apply_style;
5486 styler.state = (void *) &content;
5487
5488 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5489 {
5490 char str[128];
5491 char cmt[128];
5492
5493 /* We regard the opcode operand info more, however we also look into
5494 the inst->operands to support the disassembling of the optional
5495 operand.
5496 The two operand code should be the same in all cases, apart from
5497 when the operand can be optional. */
5498 if (opcode->operands[i] == AARCH64_OPND_NIL
5499 || opnds[i].type == AARCH64_OPND_NIL)
5500 break;
5501
5502 /* Generate the operand string in STR. */
5503 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5504 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5505
5506 /* Delimiter. */
5507 if (str[0] != '\0')
5508 strcat (buf, i == 0 ? " " : ", ");
5509
5510 /* Append the operand string. */
5511 strcat (buf, str);
5512
5513 /* Append a comment. This works because only the last operand ever
5514 adds a comment. If that ever changes then we'll need to be
5515 smarter here. */
5516 if (cmt[0] != '\0')
5517 {
5518 strcat (buf, "\t// ");
5519 strcat (buf, cmt);
5520 }
5521 }
5522
5523 obstack_free (&content, NULL);
5524 }
5525
5526 /* Send to stderr a string as information. */
5527
5528 static void
5529 output_info (const char *format, ...)
5530 {
5531 const char *file;
5532 unsigned int line;
5533 va_list args;
5534
5535 file = as_where (&line);
5536 if (file)
5537 {
5538 if (line != 0)
5539 fprintf (stderr, "%s:%u: ", file, line);
5540 else
5541 fprintf (stderr, "%s: ", file);
5542 }
5543 fprintf (stderr, _("Info: "));
5544 va_start (args, format);
5545 vfprintf (stderr, format, args);
5546 va_end (args);
5547 (void) putc ('\n', stderr);
5548 }
5549
5550 /* Output one operand error record. */
5551
5552 static void
5553 output_operand_error_record (const operand_error_record *record, char *str)
5554 {
5555 const aarch64_operand_error *detail = &record->detail;
5556 int idx = detail->index;
5557 const aarch64_opcode *opcode = record->opcode;
5558 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5559 : AARCH64_OPND_NIL);
5560
5561 typedef void (*handler_t)(const char *format, ...);
5562 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5563
5564 switch (detail->kind)
5565 {
5566 case AARCH64_OPDE_NIL:
5567 gas_assert (0);
5568 break;
5569
5570 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5571 handler (_("this `%s' should have an immediately preceding `%s'"
5572 " -- `%s'"),
5573 detail->data[0].s, detail->data[1].s, str);
5574 break;
5575
5576 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5577 handler (_("the preceding `%s' should be followed by `%s` rather"
5578 " than `%s` -- `%s'"),
5579 detail->data[1].s, detail->data[0].s, opcode->name, str);
5580 break;
5581
5582 case AARCH64_OPDE_SYNTAX_ERROR:
5583 case AARCH64_OPDE_RECOVERABLE:
5584 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5585 case AARCH64_OPDE_OTHER_ERROR:
5586 /* Use the prepared error message if there is, otherwise use the
5587 operand description string to describe the error. */
5588 if (detail->error != NULL)
5589 {
5590 if (idx < 0)
5591 handler (_("%s -- `%s'"), detail->error, str);
5592 else
5593 handler (_("%s at operand %d -- `%s'"),
5594 detail->error, idx + 1, str);
5595 }
5596 else
5597 {
5598 gas_assert (idx >= 0);
5599 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5600 aarch64_get_operand_desc (opd_code), str);
5601 }
5602 break;
5603
5604 case AARCH64_OPDE_INVALID_VARIANT:
5605 handler (_("operand mismatch -- `%s'"), str);
5606 if (verbose_error_p)
5607 {
5608 /* We will try to correct the erroneous instruction and also provide
5609 more information e.g. all other valid variants.
5610
5611 The string representation of the corrected instruction and other
5612 valid variants are generated by
5613
5614 1) obtaining the intermediate representation of the erroneous
5615 instruction;
5616 2) manipulating the IR, e.g. replacing the operand qualifier;
5617 3) printing out the instruction by calling the printer functions
5618 shared with the disassembler.
5619
5620 The limitation of this method is that the exact input assembly
5621 line cannot be accurately reproduced in some cases, for example an
5622 optional operand present in the actual assembly line will be
5623 omitted in the output; likewise for the optional syntax rules,
5624 e.g. the # before the immediate. Another limitation is that the
5625 assembly symbols and relocation operations in the assembly line
5626 currently cannot be printed out in the error report. Last but not
5627 least, when there is other error(s) co-exist with this error, the
5628 'corrected' instruction may be still incorrect, e.g. given
5629 'ldnp h0,h1,[x0,#6]!'
5630 this diagnosis will provide the version:
5631 'ldnp s0,s1,[x0,#6]!'
5632 which is still not right. */
5633 size_t len = strlen (get_mnemonic_name (str));
5634 int i, qlf_idx;
5635 bool result;
5636 char buf[2048];
5637 aarch64_inst *inst_base = &inst.base;
5638 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5639
5640 /* Init inst. */
5641 reset_aarch64_instruction (&inst);
5642 inst_base->opcode = opcode;
5643
5644 /* Reset the error report so that there is no side effect on the
5645 following operand parsing. */
5646 init_operand_error_report ();
5647
5648 /* Fill inst. */
5649 result = parse_operands (str + len, opcode)
5650 && programmer_friendly_fixup (&inst);
5651 gas_assert (result);
5652 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5653 NULL, NULL, insn_sequence);
5654 gas_assert (!result);
5655
5656 /* Find the most matched qualifier sequence. */
5657 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5658 gas_assert (qlf_idx > -1);
5659
5660 /* Assign the qualifiers. */
5661 assign_qualifier_sequence (inst_base,
5662 opcode->qualifiers_list[qlf_idx]);
5663
5664 /* Print the hint. */
5665 output_info (_(" did you mean this?"));
5666 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5667 print_operands (buf, opcode, inst_base->operands);
5668 output_info (_(" %s"), buf);
5669
5670 /* Print out other variant(s) if there is any. */
5671 if (qlf_idx != 0 ||
5672 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5673 output_info (_(" other valid variant(s):"));
5674
5675 /* For each pattern. */
5676 qualifiers_list = opcode->qualifiers_list;
5677 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5678 {
5679 /* Most opcodes has much fewer patterns in the list.
5680 First NIL qualifier indicates the end in the list. */
5681 if (empty_qualifier_sequence_p (*qualifiers_list))
5682 break;
5683
5684 if (i != qlf_idx)
5685 {
5686 /* Mnemonics name. */
5687 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5688
5689 /* Assign the qualifiers. */
5690 assign_qualifier_sequence (inst_base, *qualifiers_list);
5691
5692 /* Print instruction. */
5693 print_operands (buf, opcode, inst_base->operands);
5694
5695 output_info (_(" %s"), buf);
5696 }
5697 }
5698 }
5699 break;
5700
5701 case AARCH64_OPDE_UNTIED_IMMS:
5702 handler (_("operand %d must have the same immediate value "
5703 "as operand 1 -- `%s'"),
5704 detail->index + 1, str);
5705 break;
5706
5707 case AARCH64_OPDE_UNTIED_OPERAND:
5708 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5709 detail->index + 1, str);
5710 break;
5711
5712 case AARCH64_OPDE_OUT_OF_RANGE:
5713 if (detail->data[0].i != detail->data[1].i)
5714 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5715 detail->error ? detail->error : _("immediate value"),
5716 detail->data[0].i, detail->data[1].i, idx + 1, str);
5717 else
5718 handler (_("%s must be %d at operand %d -- `%s'"),
5719 detail->error ? detail->error : _("immediate value"),
5720 detail->data[0].i, idx + 1, str);
5721 break;
5722
5723 case AARCH64_OPDE_REG_LIST:
5724 if (detail->data[0].i == 1)
5725 handler (_("invalid number of registers in the list; "
5726 "only 1 register is expected at operand %d -- `%s'"),
5727 idx + 1, str);
5728 else
5729 handler (_("invalid number of registers in the list; "
5730 "%d registers are expected at operand %d -- `%s'"),
5731 detail->data[0].i, idx + 1, str);
5732 break;
5733
5734 case AARCH64_OPDE_UNALIGNED:
5735 handler (_("immediate value must be a multiple of "
5736 "%d at operand %d -- `%s'"),
5737 detail->data[0].i, idx + 1, str);
5738 break;
5739
5740 default:
5741 gas_assert (0);
5742 break;
5743 }
5744 }
5745
5746 /* Process and output the error message about the operand mismatching.
5747
5748 When this function is called, the operand error information had
5749 been collected for an assembly line and there will be multiple
5750 errors in the case of multiple instruction templates; output the
5751 error message that most closely describes the problem.
5752
5753 The errors to be printed can be filtered on printing all errors
5754 or only non-fatal errors. This distinction has to be made because
5755 the error buffer may already be filled with fatal errors we don't want to
5756 print due to the different instruction templates. */
5757
5758 static void
5759 output_operand_error_report (char *str, bool non_fatal_only)
5760 {
5761 int largest_error_pos;
5762 const char *msg = NULL;
5763 enum aarch64_operand_error_kind kind;
5764 operand_error_record *curr;
5765 operand_error_record *head = operand_error_report.head;
5766 operand_error_record *record = NULL;
5767
5768 /* No error to report. */
5769 if (head == NULL)
5770 return;
5771
5772 gas_assert (head != NULL && operand_error_report.tail != NULL);
5773
5774 /* Only one error. */
5775 if (head == operand_error_report.tail)
5776 {
5777 /* If the only error is a non-fatal one and we don't want to print it,
5778 just exit. */
5779 if (!non_fatal_only || head->detail.non_fatal)
5780 {
5781 DEBUG_TRACE ("single opcode entry with error kind: %s",
5782 operand_mismatch_kind_names[head->detail.kind]);
5783 output_operand_error_record (head, str);
5784 }
5785 return;
5786 }
5787
5788 /* Find the error kind of the highest severity. */
5789 DEBUG_TRACE ("multiple opcode entries with error kind");
5790 kind = AARCH64_OPDE_NIL;
5791 for (curr = head; curr != NULL; curr = curr->next)
5792 {
5793 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5794 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5795 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5796 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5797 kind = curr->detail.kind;
5798 }
5799
5800 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5801
5802 /* Pick up one of errors of KIND to report. */
5803 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5804 for (curr = head; curr != NULL; curr = curr->next)
5805 {
5806 /* If we don't want to print non-fatal errors then don't consider them
5807 at all. */
5808 if (curr->detail.kind != kind
5809 || (non_fatal_only && !curr->detail.non_fatal))
5810 continue;
5811 /* If there are multiple errors, pick up the one with the highest
5812 mismatching operand index. In the case of multiple errors with
5813 the equally highest operand index, pick up the first one or the
5814 first one with non-NULL error message. */
5815 if (curr->detail.index > largest_error_pos
5816 || (curr->detail.index == largest_error_pos && msg == NULL
5817 && curr->detail.error != NULL))
5818 {
5819 largest_error_pos = curr->detail.index;
5820 record = curr;
5821 msg = record->detail.error;
5822 }
5823 }
5824
5825 /* The way errors are collected in the back-end is a bit non-intuitive. But
5826 essentially, because each operand template is tried recursively you may
5827 always have errors collected from the previous tried OPND. These are
5828 usually skipped if there is one successful match. However now with the
5829 non-fatal errors we have to ignore those previously collected hard errors
5830 when we're only interested in printing the non-fatal ones. This condition
5831 prevents us from printing errors that are not appropriate, since we did
5832 match a condition, but it also has warnings that it wants to print. */
5833 if (non_fatal_only && !record)
5834 return;
5835
5836 gas_assert (largest_error_pos != -2 && record != NULL);
5837 DEBUG_TRACE ("Pick up error kind %s to report",
5838 operand_mismatch_kind_names[record->detail.kind]);
5839
5840 /* Output. */
5841 output_operand_error_record (record, str);
5842 }
5843 \f
5844 /* Write an AARCH64 instruction to buf - always little-endian. */
5845 static void
5846 put_aarch64_insn (char *buf, uint32_t insn)
5847 {
5848 unsigned char *where = (unsigned char *) buf;
5849 where[0] = insn;
5850 where[1] = insn >> 8;
5851 where[2] = insn >> 16;
5852 where[3] = insn >> 24;
5853 }
5854
5855 static uint32_t
5856 get_aarch64_insn (char *buf)
5857 {
5858 unsigned char *where = (unsigned char *) buf;
5859 uint32_t result;
5860 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5861 | ((uint32_t) where[3] << 24)));
5862 return result;
5863 }
5864
5865 static void
5866 output_inst (struct aarch64_inst *new_inst)
5867 {
5868 char *to = NULL;
5869
5870 to = frag_more (INSN_SIZE);
5871
5872 frag_now->tc_frag_data.recorded = 1;
5873
5874 put_aarch64_insn (to, inst.base.value);
5875
5876 if (inst.reloc.type != BFD_RELOC_UNUSED)
5877 {
5878 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5879 INSN_SIZE, &inst.reloc.exp,
5880 inst.reloc.pc_rel,
5881 inst.reloc.type);
5882 DEBUG_TRACE ("Prepared relocation fix up");
5883 /* Don't check the addend value against the instruction size,
5884 that's the job of our code in md_apply_fix(). */
5885 fixp->fx_no_overflow = 1;
5886 if (new_inst != NULL)
5887 fixp->tc_fix_data.inst = new_inst;
5888 if (aarch64_gas_internal_fixup_p ())
5889 {
5890 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5891 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5892 fixp->fx_addnumber = inst.reloc.flags;
5893 }
5894 }
5895
5896 dwarf2_emit_insn (INSN_SIZE);
5897 }
5898
5899 /* Link together opcodes of the same name. */
5900
5901 struct templates
5902 {
5903 const aarch64_opcode *opcode;
5904 struct templates *next;
5905 };
5906
5907 typedef struct templates templates;
5908
5909 static templates *
5910 lookup_mnemonic (const char *start, int len)
5911 {
5912 templates *templ = NULL;
5913
5914 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5915 return templ;
5916 }
5917
5918 /* Subroutine of md_assemble, responsible for looking up the primary
5919 opcode from the mnemonic the user wrote. BASE points to the beginning
5920 of the mnemonic, DOT points to the first '.' within the mnemonic
5921 (if any) and END points to the end of the mnemonic. */
5922
5923 static templates *
5924 opcode_lookup (char *base, char *dot, char *end)
5925 {
5926 const aarch64_cond *cond;
5927 char condname[16];
5928 int len;
5929
5930 if (dot == end)
5931 return 0;
5932
5933 inst.cond = COND_ALWAYS;
5934
5935 /* Handle a possible condition. */
5936 if (dot)
5937 {
5938 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5939 if (!cond)
5940 return 0;
5941 inst.cond = cond->value;
5942 len = dot - base;
5943 }
5944 else
5945 len = end - base;
5946
5947 if (inst.cond == COND_ALWAYS)
5948 {
5949 /* Look for unaffixed mnemonic. */
5950 return lookup_mnemonic (base, len);
5951 }
5952 else if (len <= 13)
5953 {
5954 /* append ".c" to mnemonic if conditional */
5955 memcpy (condname, base, len);
5956 memcpy (condname + len, ".c", 2);
5957 base = condname;
5958 len += 2;
5959 return lookup_mnemonic (base, len);
5960 }
5961
5962 return NULL;
5963 }
5964
5965 /* Process an optional operand that is found omitted from the assembly line.
5966 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5967 instruction's opcode entry while IDX is the index of this omitted operand.
5968 */
5969
5970 static void
5971 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5972 int idx, aarch64_opnd_info *operand)
5973 {
5974 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5975 gas_assert (optional_operand_p (opcode, idx));
5976 gas_assert (!operand->present);
5977
5978 switch (type)
5979 {
5980 case AARCH64_OPND_Rd:
5981 case AARCH64_OPND_Rn:
5982 case AARCH64_OPND_Rm:
5983 case AARCH64_OPND_Rt:
5984 case AARCH64_OPND_Rt2:
5985 case AARCH64_OPND_Rt_LS64:
5986 case AARCH64_OPND_Rt_SP:
5987 case AARCH64_OPND_Rs:
5988 case AARCH64_OPND_Ra:
5989 case AARCH64_OPND_Rt_SYS:
5990 case AARCH64_OPND_Rd_SP:
5991 case AARCH64_OPND_Rn_SP:
5992 case AARCH64_OPND_Rm_SP:
5993 case AARCH64_OPND_Fd:
5994 case AARCH64_OPND_Fn:
5995 case AARCH64_OPND_Fm:
5996 case AARCH64_OPND_Fa:
5997 case AARCH64_OPND_Ft:
5998 case AARCH64_OPND_Ft2:
5999 case AARCH64_OPND_Sd:
6000 case AARCH64_OPND_Sn:
6001 case AARCH64_OPND_Sm:
6002 case AARCH64_OPND_Va:
6003 case AARCH64_OPND_Vd:
6004 case AARCH64_OPND_Vn:
6005 case AARCH64_OPND_Vm:
6006 case AARCH64_OPND_VdD1:
6007 case AARCH64_OPND_VnD1:
6008 operand->reg.regno = default_value;
6009 break;
6010
6011 case AARCH64_OPND_Ed:
6012 case AARCH64_OPND_En:
6013 case AARCH64_OPND_Em:
6014 case AARCH64_OPND_Em16:
6015 case AARCH64_OPND_SM3_IMM2:
6016 operand->reglane.regno = default_value;
6017 break;
6018
6019 case AARCH64_OPND_IDX:
6020 case AARCH64_OPND_BIT_NUM:
6021 case AARCH64_OPND_IMMR:
6022 case AARCH64_OPND_IMMS:
6023 case AARCH64_OPND_SHLL_IMM:
6024 case AARCH64_OPND_IMM_VLSL:
6025 case AARCH64_OPND_IMM_VLSR:
6026 case AARCH64_OPND_CCMP_IMM:
6027 case AARCH64_OPND_FBITS:
6028 case AARCH64_OPND_UIMM4:
6029 case AARCH64_OPND_UIMM3_OP1:
6030 case AARCH64_OPND_UIMM3_OP2:
6031 case AARCH64_OPND_IMM:
6032 case AARCH64_OPND_IMM_2:
6033 case AARCH64_OPND_WIDTH:
6034 case AARCH64_OPND_UIMM7:
6035 case AARCH64_OPND_NZCV:
6036 case AARCH64_OPND_SVE_PATTERN:
6037 case AARCH64_OPND_SVE_PRFOP:
6038 operand->imm.value = default_value;
6039 break;
6040
6041 case AARCH64_OPND_SVE_PATTERN_SCALED:
6042 operand->imm.value = default_value;
6043 operand->shifter.kind = AARCH64_MOD_MUL;
6044 operand->shifter.amount = 1;
6045 break;
6046
6047 case AARCH64_OPND_EXCEPTION:
6048 inst.reloc.type = BFD_RELOC_UNUSED;
6049 break;
6050
6051 case AARCH64_OPND_BARRIER_ISB:
6052 operand->barrier = aarch64_barrier_options + default_value;
6053 break;
6054
6055 case AARCH64_OPND_BTI_TARGET:
6056 operand->hint_option = aarch64_hint_options + default_value;
6057 break;
6058
6059 default:
6060 break;
6061 }
6062 }
6063
6064 /* Process the relocation type for move wide instructions.
6065 Return TRUE on success; otherwise return FALSE. */
6066
6067 static bool
6068 process_movw_reloc_info (void)
6069 {
6070 int is32;
6071 unsigned shift;
6072
6073 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6074
6075 if (inst.base.opcode->op == OP_MOVK)
6076 switch (inst.reloc.type)
6077 {
6078 case BFD_RELOC_AARCH64_MOVW_G0_S:
6079 case BFD_RELOC_AARCH64_MOVW_G1_S:
6080 case BFD_RELOC_AARCH64_MOVW_G2_S:
6081 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6082 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6083 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6084 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6085 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6086 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6087 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6088 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6089 set_syntax_error
6090 (_("the specified relocation type is not allowed for MOVK"));
6091 return false;
6092 default:
6093 break;
6094 }
6095
6096 switch (inst.reloc.type)
6097 {
6098 case BFD_RELOC_AARCH64_MOVW_G0:
6099 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6100 case BFD_RELOC_AARCH64_MOVW_G0_S:
6101 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6102 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6103 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6104 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6105 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6106 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6107 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6108 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6109 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6110 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6111 shift = 0;
6112 break;
6113 case BFD_RELOC_AARCH64_MOVW_G1:
6114 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6115 case BFD_RELOC_AARCH64_MOVW_G1_S:
6116 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6117 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6118 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6119 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6120 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6121 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6122 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6123 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6124 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6125 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6126 shift = 16;
6127 break;
6128 case BFD_RELOC_AARCH64_MOVW_G2:
6129 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6130 case BFD_RELOC_AARCH64_MOVW_G2_S:
6131 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6132 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6133 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6134 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6135 if (is32)
6136 {
6137 set_fatal_syntax_error
6138 (_("the specified relocation type is not allowed for 32-bit "
6139 "register"));
6140 return false;
6141 }
6142 shift = 32;
6143 break;
6144 case BFD_RELOC_AARCH64_MOVW_G3:
6145 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6146 if (is32)
6147 {
6148 set_fatal_syntax_error
6149 (_("the specified relocation type is not allowed for 32-bit "
6150 "register"));
6151 return false;
6152 }
6153 shift = 48;
6154 break;
6155 default:
6156 /* More cases should be added when more MOVW-related relocation types
6157 are supported in GAS. */
6158 gas_assert (aarch64_gas_internal_fixup_p ());
6159 /* The shift amount should have already been set by the parser. */
6160 return true;
6161 }
6162 inst.base.operands[1].shifter.amount = shift;
6163 return true;
6164 }
6165
6166 /* A primitive log calculator. */
6167
6168 static inline unsigned int
6169 get_logsz (unsigned int size)
6170 {
6171 const unsigned char ls[16] =
6172 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6173 if (size > 16)
6174 {
6175 gas_assert (0);
6176 return -1;
6177 }
6178 gas_assert (ls[size - 1] != (unsigned char)-1);
6179 return ls[size - 1];
6180 }
6181
6182 /* Determine and return the real reloc type code for an instruction
6183 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6184
6185 static inline bfd_reloc_code_real_type
6186 ldst_lo12_determine_real_reloc_type (void)
6187 {
6188 unsigned logsz, max_logsz;
6189 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6190 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6191
6192 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6193 {
6194 BFD_RELOC_AARCH64_LDST8_LO12,
6195 BFD_RELOC_AARCH64_LDST16_LO12,
6196 BFD_RELOC_AARCH64_LDST32_LO12,
6197 BFD_RELOC_AARCH64_LDST64_LO12,
6198 BFD_RELOC_AARCH64_LDST128_LO12
6199 },
6200 {
6201 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6202 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6203 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6204 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6205 BFD_RELOC_AARCH64_NONE
6206 },
6207 {
6208 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6209 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6210 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6211 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6212 BFD_RELOC_AARCH64_NONE
6213 },
6214 {
6215 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6216 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6217 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6218 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6219 BFD_RELOC_AARCH64_NONE
6220 },
6221 {
6222 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6223 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6224 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6225 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6226 BFD_RELOC_AARCH64_NONE
6227 }
6228 };
6229
6230 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6231 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6232 || (inst.reloc.type
6233 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6234 || (inst.reloc.type
6235 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6236 || (inst.reloc.type
6237 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6238 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6239
6240 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6241 opd1_qlf =
6242 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6243 1, opd0_qlf, 0);
6244 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6245
6246 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6247
6248 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6249 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6250 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6251 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6252 max_logsz = 3;
6253 else
6254 max_logsz = 4;
6255
6256 if (logsz > max_logsz)
6257 {
6258 /* SEE PR 27904 for an example of this. */
6259 set_fatal_syntax_error
6260 (_("relocation qualifier does not match instruction size"));
6261 return BFD_RELOC_AARCH64_NONE;
6262 }
6263
6264 /* In reloc.c, these pseudo relocation types should be defined in similar
6265 order as above reloc_ldst_lo12 array. Because the array index calculation
6266 below relies on this. */
6267 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6268 }
6269
6270 /* Check whether a register list REGINFO is valid. The registers must be
6271 numbered in increasing order (modulo 32), in increments of one or two.
6272
6273 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6274 increments of two.
6275
6276 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6277
6278 static bool
6279 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6280 {
6281 uint32_t i, nb_regs, prev_regno, incr;
6282
6283 nb_regs = 1 + (reginfo & 0x3);
6284 reginfo >>= 2;
6285 prev_regno = reginfo & 0x1f;
6286 incr = accept_alternate ? 2 : 1;
6287
6288 for (i = 1; i < nb_regs; ++i)
6289 {
6290 uint32_t curr_regno;
6291 reginfo >>= 5;
6292 curr_regno = reginfo & 0x1f;
6293 if (curr_regno != ((prev_regno + incr) & 0x1f))
6294 return false;
6295 prev_regno = curr_regno;
6296 }
6297
6298 return true;
6299 }
6300
6301 /* Generic instruction operand parser. This does no encoding and no
6302 semantic validation; it merely squirrels values away in the inst
6303 structure. Returns TRUE or FALSE depending on whether the
6304 specified grammar matched. */
6305
6306 static bool
6307 parse_operands (char *str, const aarch64_opcode *opcode)
6308 {
6309 int i;
6310 char *backtrack_pos = 0;
6311 const enum aarch64_opnd *operands = opcode->operands;
6312 aarch64_reg_type imm_reg_type;
6313
6314 clear_error ();
6315 skip_whitespace (str);
6316
6317 if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
6318 AARCH64_FEATURE_SVE
6319 | AARCH64_FEATURE_SVE2))
6320 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6321 else
6322 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6323
6324 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6325 {
6326 int64_t val;
6327 const reg_entry *reg;
6328 int comma_skipped_p = 0;
6329 struct vector_type_el vectype;
6330 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6331 aarch64_opnd_info *info = &inst.base.operands[i];
6332 aarch64_reg_type reg_type;
6333
6334 DEBUG_TRACE ("parse operand %d", i);
6335
6336 /* Assign the operand code. */
6337 info->type = operands[i];
6338
6339 if (optional_operand_p (opcode, i))
6340 {
6341 /* Remember where we are in case we need to backtrack. */
6342 gas_assert (!backtrack_pos);
6343 backtrack_pos = str;
6344 }
6345
6346 /* Expect comma between operands; the backtrack mechanism will take
6347 care of cases of omitted optional operand. */
6348 if (i > 0 && ! skip_past_char (&str, ','))
6349 {
6350 set_syntax_error (_("comma expected between operands"));
6351 goto failure;
6352 }
6353 else
6354 comma_skipped_p = 1;
6355
6356 switch (operands[i])
6357 {
6358 case AARCH64_OPND_Rd:
6359 case AARCH64_OPND_Rn:
6360 case AARCH64_OPND_Rm:
6361 case AARCH64_OPND_Rt:
6362 case AARCH64_OPND_Rt2:
6363 case AARCH64_OPND_Rs:
6364 case AARCH64_OPND_Ra:
6365 case AARCH64_OPND_Rt_LS64:
6366 case AARCH64_OPND_Rt_SYS:
6367 case AARCH64_OPND_PAIRREG:
6368 case AARCH64_OPND_SVE_Rm:
6369 po_int_reg_or_fail (REG_TYPE_R_Z);
6370
6371 /* In LS64 load/store instructions Rt register number must be even
6372 and <=22. */
6373 if (operands[i] == AARCH64_OPND_Rt_LS64)
6374 {
6375 /* We've already checked if this is valid register.
6376 This will check if register number (Rt) is not undefined for LS64
6377 instructions:
6378 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6379 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6380 {
6381 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6382 goto failure;
6383 }
6384 }
6385 break;
6386
6387 case AARCH64_OPND_Rd_SP:
6388 case AARCH64_OPND_Rn_SP:
6389 case AARCH64_OPND_Rt_SP:
6390 case AARCH64_OPND_SVE_Rn_SP:
6391 case AARCH64_OPND_Rm_SP:
6392 po_int_reg_or_fail (REG_TYPE_R_SP);
6393 break;
6394
6395 case AARCH64_OPND_Rm_EXT:
6396 case AARCH64_OPND_Rm_SFT:
6397 po_misc_or_fail (parse_shifter_operand
6398 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6399 ? SHIFTED_ARITH_IMM
6400 : SHIFTED_LOGIC_IMM)));
6401 if (!info->shifter.operator_present)
6402 {
6403 /* Default to LSL if not present. Libopcodes prefers shifter
6404 kind to be explicit. */
6405 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6406 info->shifter.kind = AARCH64_MOD_LSL;
6407 /* For Rm_EXT, libopcodes will carry out further check on whether
6408 or not stack pointer is used in the instruction (Recall that
6409 "the extend operator is not optional unless at least one of
6410 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6411 }
6412 break;
6413
6414 case AARCH64_OPND_Fd:
6415 case AARCH64_OPND_Fn:
6416 case AARCH64_OPND_Fm:
6417 case AARCH64_OPND_Fa:
6418 case AARCH64_OPND_Ft:
6419 case AARCH64_OPND_Ft2:
6420 case AARCH64_OPND_Sd:
6421 case AARCH64_OPND_Sn:
6422 case AARCH64_OPND_Sm:
6423 case AARCH64_OPND_SVE_VZn:
6424 case AARCH64_OPND_SVE_Vd:
6425 case AARCH64_OPND_SVE_Vm:
6426 case AARCH64_OPND_SVE_Vn:
6427 reg = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, NULL);
6428 if (!reg)
6429 {
6430 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6431 goto failure;
6432 }
6433 gas_assert (reg->type >= REG_TYPE_FP_B
6434 && reg->type <= REG_TYPE_FP_Q);
6435
6436 info->reg.regno = reg->number;
6437 info->qualifier = AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
6438 break;
6439
6440 case AARCH64_OPND_SVE_Pd:
6441 case AARCH64_OPND_SVE_Pg3:
6442 case AARCH64_OPND_SVE_Pg4_5:
6443 case AARCH64_OPND_SVE_Pg4_10:
6444 case AARCH64_OPND_SVE_Pg4_16:
6445 case AARCH64_OPND_SVE_Pm:
6446 case AARCH64_OPND_SVE_Pn:
6447 case AARCH64_OPND_SVE_Pt:
6448 case AARCH64_OPND_SME_Pm:
6449 reg_type = REG_TYPE_PN;
6450 goto vector_reg;
6451
6452 case AARCH64_OPND_SVE_Za_5:
6453 case AARCH64_OPND_SVE_Za_16:
6454 case AARCH64_OPND_SVE_Zd:
6455 case AARCH64_OPND_SVE_Zm_5:
6456 case AARCH64_OPND_SVE_Zm_16:
6457 case AARCH64_OPND_SVE_Zn:
6458 case AARCH64_OPND_SVE_Zt:
6459 reg_type = REG_TYPE_ZN;
6460 goto vector_reg;
6461
6462 case AARCH64_OPND_Va:
6463 case AARCH64_OPND_Vd:
6464 case AARCH64_OPND_Vn:
6465 case AARCH64_OPND_Vm:
6466 reg_type = REG_TYPE_VN;
6467 vector_reg:
6468 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6469 if (!reg)
6470 {
6471 first_error (_(get_reg_expected_msg (reg_type)));
6472 goto failure;
6473 }
6474 if (vectype.defined & NTA_HASINDEX)
6475 goto failure;
6476
6477 info->reg.regno = reg->number;
6478 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6479 && vectype.type == NT_invtype)
6480 /* Unqualified Pn and Zn registers are allowed in certain
6481 contexts. Rely on F_STRICT qualifier checking to catch
6482 invalid uses. */
6483 info->qualifier = AARCH64_OPND_QLF_NIL;
6484 else
6485 {
6486 info->qualifier = vectype_to_qualifier (&vectype);
6487 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6488 goto failure;
6489 }
6490 break;
6491
6492 case AARCH64_OPND_VdD1:
6493 case AARCH64_OPND_VnD1:
6494 reg = aarch64_reg_parse (&str, REG_TYPE_VN, &vectype);
6495 if (!reg)
6496 {
6497 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6498 goto failure;
6499 }
6500 if (vectype.type != NT_d || vectype.index != 1)
6501 {
6502 set_fatal_syntax_error
6503 (_("the top half of a 128-bit FP/SIMD register is expected"));
6504 goto failure;
6505 }
6506 info->reg.regno = reg->number;
6507 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6508 here; it is correct for the purpose of encoding/decoding since
6509 only the register number is explicitly encoded in the related
6510 instructions, although this appears a bit hacky. */
6511 info->qualifier = AARCH64_OPND_QLF_S_D;
6512 break;
6513
6514 case AARCH64_OPND_SVE_Zm3_INDEX:
6515 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6516 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6517 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6518 case AARCH64_OPND_SVE_Zm4_INDEX:
6519 case AARCH64_OPND_SVE_Zn_INDEX:
6520 reg_type = REG_TYPE_ZN;
6521 goto vector_reg_index;
6522
6523 case AARCH64_OPND_Ed:
6524 case AARCH64_OPND_En:
6525 case AARCH64_OPND_Em:
6526 case AARCH64_OPND_Em16:
6527 case AARCH64_OPND_SM3_IMM2:
6528 reg_type = REG_TYPE_VN;
6529 vector_reg_index:
6530 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6531 if (!reg)
6532 {
6533 first_error (_(get_reg_expected_msg (reg_type)));
6534 goto failure;
6535 }
6536 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6537 goto failure;
6538
6539 info->reglane.regno = reg->number;
6540 info->reglane.index = vectype.index;
6541 info->qualifier = vectype_to_qualifier (&vectype);
6542 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6543 goto failure;
6544 break;
6545
6546 case AARCH64_OPND_SVE_ZnxN:
6547 case AARCH64_OPND_SVE_ZtxN:
6548 reg_type = REG_TYPE_ZN;
6549 goto vector_reg_list;
6550
6551 case AARCH64_OPND_LVn:
6552 case AARCH64_OPND_LVt:
6553 case AARCH64_OPND_LVt_AL:
6554 case AARCH64_OPND_LEt:
6555 reg_type = REG_TYPE_VN;
6556 vector_reg_list:
6557 if (reg_type == REG_TYPE_ZN
6558 && get_opcode_dependent_value (opcode) == 1
6559 && *str != '{')
6560 {
6561 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6562 if (!reg)
6563 {
6564 first_error (_(get_reg_expected_msg (reg_type)));
6565 goto failure;
6566 }
6567 info->reglist.first_regno = reg->number;
6568 info->reglist.num_regs = 1;
6569 }
6570 else
6571 {
6572 val = parse_vector_reg_list (&str, reg_type, &vectype);
6573 if (val == PARSE_FAIL)
6574 goto failure;
6575
6576 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6577 {
6578 set_fatal_syntax_error (_("invalid register list"));
6579 goto failure;
6580 }
6581
6582 if (vectype.width != 0 && *str != ',')
6583 {
6584 set_fatal_syntax_error
6585 (_("expected element type rather than vector type"));
6586 goto failure;
6587 }
6588
6589 info->reglist.first_regno = (val >> 2) & 0x1f;
6590 info->reglist.num_regs = (val & 0x3) + 1;
6591 }
6592 if (operands[i] == AARCH64_OPND_LEt)
6593 {
6594 if (!(vectype.defined & NTA_HASINDEX))
6595 goto failure;
6596 info->reglist.has_index = 1;
6597 info->reglist.index = vectype.index;
6598 }
6599 else
6600 {
6601 if (vectype.defined & NTA_HASINDEX)
6602 goto failure;
6603 if (!(vectype.defined & NTA_HASTYPE))
6604 {
6605 if (reg_type == REG_TYPE_ZN)
6606 set_fatal_syntax_error (_("missing type suffix"));
6607 goto failure;
6608 }
6609 }
6610 info->qualifier = vectype_to_qualifier (&vectype);
6611 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6612 goto failure;
6613 break;
6614
6615 case AARCH64_OPND_CRn:
6616 case AARCH64_OPND_CRm:
6617 {
6618 char prefix = *(str++);
6619 if (prefix != 'c' && prefix != 'C')
6620 goto failure;
6621
6622 po_imm_nc_or_fail ();
6623 if (val > 15)
6624 {
6625 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6626 goto failure;
6627 }
6628 info->qualifier = AARCH64_OPND_QLF_CR;
6629 info->imm.value = val;
6630 break;
6631 }
6632
6633 case AARCH64_OPND_SHLL_IMM:
6634 case AARCH64_OPND_IMM_VLSR:
6635 po_imm_or_fail (1, 64);
6636 info->imm.value = val;
6637 break;
6638
6639 case AARCH64_OPND_CCMP_IMM:
6640 case AARCH64_OPND_SIMM5:
6641 case AARCH64_OPND_FBITS:
6642 case AARCH64_OPND_TME_UIMM16:
6643 case AARCH64_OPND_UIMM4:
6644 case AARCH64_OPND_UIMM4_ADDG:
6645 case AARCH64_OPND_UIMM10:
6646 case AARCH64_OPND_UIMM3_OP1:
6647 case AARCH64_OPND_UIMM3_OP2:
6648 case AARCH64_OPND_IMM_VLSL:
6649 case AARCH64_OPND_IMM:
6650 case AARCH64_OPND_IMM_2:
6651 case AARCH64_OPND_WIDTH:
6652 case AARCH64_OPND_SVE_INV_LIMM:
6653 case AARCH64_OPND_SVE_LIMM:
6654 case AARCH64_OPND_SVE_LIMM_MOV:
6655 case AARCH64_OPND_SVE_SHLIMM_PRED:
6656 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6657 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6658 case AARCH64_OPND_SVE_SHRIMM_PRED:
6659 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6660 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6661 case AARCH64_OPND_SVE_SIMM5:
6662 case AARCH64_OPND_SVE_SIMM5B:
6663 case AARCH64_OPND_SVE_SIMM6:
6664 case AARCH64_OPND_SVE_SIMM8:
6665 case AARCH64_OPND_SVE_UIMM3:
6666 case AARCH64_OPND_SVE_UIMM7:
6667 case AARCH64_OPND_SVE_UIMM8:
6668 case AARCH64_OPND_SVE_UIMM8_53:
6669 case AARCH64_OPND_IMM_ROT1:
6670 case AARCH64_OPND_IMM_ROT2:
6671 case AARCH64_OPND_IMM_ROT3:
6672 case AARCH64_OPND_SVE_IMM_ROT1:
6673 case AARCH64_OPND_SVE_IMM_ROT2:
6674 case AARCH64_OPND_SVE_IMM_ROT3:
6675 case AARCH64_OPND_CSSC_SIMM8:
6676 case AARCH64_OPND_CSSC_UIMM8:
6677 po_imm_nc_or_fail ();
6678 info->imm.value = val;
6679 break;
6680
6681 case AARCH64_OPND_SVE_AIMM:
6682 case AARCH64_OPND_SVE_ASIMM:
6683 po_imm_nc_or_fail ();
6684 info->imm.value = val;
6685 skip_whitespace (str);
6686 if (skip_past_comma (&str))
6687 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6688 else
6689 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6690 break;
6691
6692 case AARCH64_OPND_SVE_PATTERN:
6693 po_enum_or_fail (aarch64_sve_pattern_array);
6694 info->imm.value = val;
6695 break;
6696
6697 case AARCH64_OPND_SVE_PATTERN_SCALED:
6698 po_enum_or_fail (aarch64_sve_pattern_array);
6699 info->imm.value = val;
6700 if (skip_past_comma (&str)
6701 && !parse_shift (&str, info, SHIFTED_MUL))
6702 goto failure;
6703 if (!info->shifter.operator_present)
6704 {
6705 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6706 info->shifter.kind = AARCH64_MOD_MUL;
6707 info->shifter.amount = 1;
6708 }
6709 break;
6710
6711 case AARCH64_OPND_SVE_PRFOP:
6712 po_enum_or_fail (aarch64_sve_prfop_array);
6713 info->imm.value = val;
6714 break;
6715
6716 case AARCH64_OPND_UIMM7:
6717 po_imm_or_fail (0, 127);
6718 info->imm.value = val;
6719 break;
6720
6721 case AARCH64_OPND_IDX:
6722 case AARCH64_OPND_MASK:
6723 case AARCH64_OPND_BIT_NUM:
6724 case AARCH64_OPND_IMMR:
6725 case AARCH64_OPND_IMMS:
6726 po_imm_or_fail (0, 63);
6727 info->imm.value = val;
6728 break;
6729
6730 case AARCH64_OPND_IMM0:
6731 po_imm_nc_or_fail ();
6732 if (val != 0)
6733 {
6734 set_fatal_syntax_error (_("immediate zero expected"));
6735 goto failure;
6736 }
6737 info->imm.value = 0;
6738 break;
6739
6740 case AARCH64_OPND_FPIMM0:
6741 {
6742 int qfloat;
6743 bool res1 = false, res2 = false;
6744 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6745 it is probably not worth the effort to support it. */
6746 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6747 imm_reg_type))
6748 && (error_p ()
6749 || !(res2 = parse_constant_immediate (&str, &val,
6750 imm_reg_type))))
6751 goto failure;
6752 if ((res1 && qfloat == 0) || (res2 && val == 0))
6753 {
6754 info->imm.value = 0;
6755 info->imm.is_fp = 1;
6756 break;
6757 }
6758 set_fatal_syntax_error (_("immediate zero expected"));
6759 goto failure;
6760 }
6761
6762 case AARCH64_OPND_IMM_MOV:
6763 {
6764 char *saved = str;
6765 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6766 reg_name_p (str, REG_TYPE_VN))
6767 goto failure;
6768 str = saved;
6769 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6770 GE_OPT_PREFIX, REJECT_ABSENT));
6771 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6772 later. fix_mov_imm_insn will try to determine a machine
6773 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6774 message if the immediate cannot be moved by a single
6775 instruction. */
6776 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6777 inst.base.operands[i].skip = 1;
6778 }
6779 break;
6780
6781 case AARCH64_OPND_SIMD_IMM:
6782 case AARCH64_OPND_SIMD_IMM_SFT:
6783 if (! parse_big_immediate (&str, &val, imm_reg_type))
6784 goto failure;
6785 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6786 /* addr_off_p */ 0,
6787 /* need_libopcodes_p */ 1,
6788 /* skip_p */ 1);
6789 /* Parse shift.
6790 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6791 shift, we don't check it here; we leave the checking to
6792 the libopcodes (operand_general_constraint_met_p). By
6793 doing this, we achieve better diagnostics. */
6794 if (skip_past_comma (&str)
6795 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6796 goto failure;
6797 if (!info->shifter.operator_present
6798 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6799 {
6800 /* Default to LSL if not present. Libopcodes prefers shifter
6801 kind to be explicit. */
6802 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6803 info->shifter.kind = AARCH64_MOD_LSL;
6804 }
6805 break;
6806
6807 case AARCH64_OPND_FPIMM:
6808 case AARCH64_OPND_SIMD_FPIMM:
6809 case AARCH64_OPND_SVE_FPIMM8:
6810 {
6811 int qfloat;
6812 bool dp_p;
6813
6814 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6815 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6816 || !aarch64_imm_float_p (qfloat))
6817 {
6818 if (!error_p ())
6819 set_fatal_syntax_error (_("invalid floating-point"
6820 " constant"));
6821 goto failure;
6822 }
6823 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6824 inst.base.operands[i].imm.is_fp = 1;
6825 }
6826 break;
6827
6828 case AARCH64_OPND_SVE_I1_HALF_ONE:
6829 case AARCH64_OPND_SVE_I1_HALF_TWO:
6830 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6831 {
6832 int qfloat;
6833 bool dp_p;
6834
6835 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6836 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6837 {
6838 if (!error_p ())
6839 set_fatal_syntax_error (_("invalid floating-point"
6840 " constant"));
6841 goto failure;
6842 }
6843 inst.base.operands[i].imm.value = qfloat;
6844 inst.base.operands[i].imm.is_fp = 1;
6845 }
6846 break;
6847
6848 case AARCH64_OPND_LIMM:
6849 po_misc_or_fail (parse_shifter_operand (&str, info,
6850 SHIFTED_LOGIC_IMM));
6851 if (info->shifter.operator_present)
6852 {
6853 set_fatal_syntax_error
6854 (_("shift not allowed for bitmask immediate"));
6855 goto failure;
6856 }
6857 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6858 /* addr_off_p */ 0,
6859 /* need_libopcodes_p */ 1,
6860 /* skip_p */ 1);
6861 break;
6862
6863 case AARCH64_OPND_AIMM:
6864 if (opcode->op == OP_ADD)
6865 /* ADD may have relocation types. */
6866 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6867 SHIFTED_ARITH_IMM));
6868 else
6869 po_misc_or_fail (parse_shifter_operand (&str, info,
6870 SHIFTED_ARITH_IMM));
6871 switch (inst.reloc.type)
6872 {
6873 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6874 info->shifter.amount = 12;
6875 break;
6876 case BFD_RELOC_UNUSED:
6877 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6878 if (info->shifter.kind != AARCH64_MOD_NONE)
6879 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6880 inst.reloc.pc_rel = 0;
6881 break;
6882 default:
6883 break;
6884 }
6885 info->imm.value = 0;
6886 if (!info->shifter.operator_present)
6887 {
6888 /* Default to LSL if not present. Libopcodes prefers shifter
6889 kind to be explicit. */
6890 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6891 info->shifter.kind = AARCH64_MOD_LSL;
6892 }
6893 break;
6894
6895 case AARCH64_OPND_HALF:
6896 {
6897 /* #<imm16> or relocation. */
6898 int internal_fixup_p;
6899 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6900 if (internal_fixup_p)
6901 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6902 skip_whitespace (str);
6903 if (skip_past_comma (&str))
6904 {
6905 /* {, LSL #<shift>} */
6906 if (! aarch64_gas_internal_fixup_p ())
6907 {
6908 set_fatal_syntax_error (_("can't mix relocation modifier "
6909 "with explicit shift"));
6910 goto failure;
6911 }
6912 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6913 }
6914 else
6915 inst.base.operands[i].shifter.amount = 0;
6916 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6917 inst.base.operands[i].imm.value = 0;
6918 if (! process_movw_reloc_info ())
6919 goto failure;
6920 }
6921 break;
6922
6923 case AARCH64_OPND_EXCEPTION:
6924 case AARCH64_OPND_UNDEFINED:
6925 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6926 imm_reg_type));
6927 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6928 /* addr_off_p */ 0,
6929 /* need_libopcodes_p */ 0,
6930 /* skip_p */ 1);
6931 break;
6932
6933 case AARCH64_OPND_NZCV:
6934 {
6935 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6936 if (nzcv != NULL)
6937 {
6938 str += 4;
6939 info->imm.value = nzcv->value;
6940 break;
6941 }
6942 po_imm_or_fail (0, 15);
6943 info->imm.value = val;
6944 }
6945 break;
6946
6947 case AARCH64_OPND_COND:
6948 case AARCH64_OPND_COND1:
6949 {
6950 char *start = str;
6951 do
6952 str++;
6953 while (ISALPHA (*str));
6954 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6955 if (info->cond == NULL)
6956 {
6957 set_syntax_error (_("invalid condition"));
6958 goto failure;
6959 }
6960 else if (operands[i] == AARCH64_OPND_COND1
6961 && (info->cond->value & 0xe) == 0xe)
6962 {
6963 /* Do not allow AL or NV. */
6964 set_default_error ();
6965 goto failure;
6966 }
6967 }
6968 break;
6969
6970 case AARCH64_OPND_ADDR_ADRP:
6971 po_misc_or_fail (parse_adrp (&str));
6972 /* Clear the value as operand needs to be relocated. */
6973 info->imm.value = 0;
6974 break;
6975
6976 case AARCH64_OPND_ADDR_PCREL14:
6977 case AARCH64_OPND_ADDR_PCREL19:
6978 case AARCH64_OPND_ADDR_PCREL21:
6979 case AARCH64_OPND_ADDR_PCREL26:
6980 po_misc_or_fail (parse_address (&str, info));
6981 if (!info->addr.pcrel)
6982 {
6983 set_syntax_error (_("invalid pc-relative address"));
6984 goto failure;
6985 }
6986 if (inst.gen_lit_pool
6987 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6988 {
6989 /* Only permit "=value" in the literal load instructions.
6990 The literal will be generated by programmer_friendly_fixup. */
6991 set_syntax_error (_("invalid use of \"=immediate\""));
6992 goto failure;
6993 }
6994 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6995 {
6996 set_syntax_error (_("unrecognized relocation suffix"));
6997 goto failure;
6998 }
6999 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7000 {
7001 info->imm.value = inst.reloc.exp.X_add_number;
7002 inst.reloc.type = BFD_RELOC_UNUSED;
7003 }
7004 else
7005 {
7006 info->imm.value = 0;
7007 if (inst.reloc.type == BFD_RELOC_UNUSED)
7008 switch (opcode->iclass)
7009 {
7010 case compbranch:
7011 case condbranch:
7012 /* e.g. CBZ or B.COND */
7013 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7014 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7015 break;
7016 case testbranch:
7017 /* e.g. TBZ */
7018 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7019 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7020 break;
7021 case branch_imm:
7022 /* e.g. B or BL */
7023 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7024 inst.reloc.type =
7025 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7026 : BFD_RELOC_AARCH64_JUMP26;
7027 break;
7028 case loadlit:
7029 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7030 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7031 break;
7032 case pcreladdr:
7033 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7034 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7035 break;
7036 default:
7037 gas_assert (0);
7038 abort ();
7039 }
7040 inst.reloc.pc_rel = 1;
7041 }
7042 break;
7043
7044 case AARCH64_OPND_ADDR_SIMPLE:
7045 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7046 {
7047 /* [<Xn|SP>{, #<simm>}] */
7048 char *start = str;
7049 /* First use the normal address-parsing routines, to get
7050 the usual syntax errors. */
7051 po_misc_or_fail (parse_address (&str, info));
7052 if (info->addr.pcrel || info->addr.offset.is_reg
7053 || !info->addr.preind || info->addr.postind
7054 || info->addr.writeback)
7055 {
7056 set_syntax_error (_("invalid addressing mode"));
7057 goto failure;
7058 }
7059
7060 /* Then retry, matching the specific syntax of these addresses. */
7061 str = start;
7062 po_char_or_fail ('[');
7063 po_reg_or_fail (REG_TYPE_R64_SP);
7064 /* Accept optional ", #0". */
7065 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7066 && skip_past_char (&str, ','))
7067 {
7068 skip_past_char (&str, '#');
7069 if (! skip_past_char (&str, '0'))
7070 {
7071 set_fatal_syntax_error
7072 (_("the optional immediate offset can only be 0"));
7073 goto failure;
7074 }
7075 }
7076 po_char_or_fail (']');
7077 break;
7078 }
7079
7080 case AARCH64_OPND_ADDR_REGOFF:
7081 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7082 po_misc_or_fail (parse_address (&str, info));
7083 regoff_addr:
7084 if (info->addr.pcrel || !info->addr.offset.is_reg
7085 || !info->addr.preind || info->addr.postind
7086 || info->addr.writeback)
7087 {
7088 set_syntax_error (_("invalid addressing mode"));
7089 goto failure;
7090 }
7091 if (!info->shifter.operator_present)
7092 {
7093 /* Default to LSL if not present. Libopcodes prefers shifter
7094 kind to be explicit. */
7095 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7096 info->shifter.kind = AARCH64_MOD_LSL;
7097 }
7098 /* Qualifier to be deduced by libopcodes. */
7099 break;
7100
7101 case AARCH64_OPND_ADDR_SIMM7:
7102 po_misc_or_fail (parse_address (&str, info));
7103 if (info->addr.pcrel || info->addr.offset.is_reg
7104 || (!info->addr.preind && !info->addr.postind))
7105 {
7106 set_syntax_error (_("invalid addressing mode"));
7107 goto failure;
7108 }
7109 if (inst.reloc.type != BFD_RELOC_UNUSED)
7110 {
7111 set_syntax_error (_("relocation not allowed"));
7112 goto failure;
7113 }
7114 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7115 /* addr_off_p */ 1,
7116 /* need_libopcodes_p */ 1,
7117 /* skip_p */ 0);
7118 break;
7119
7120 case AARCH64_OPND_ADDR_SIMM9:
7121 case AARCH64_OPND_ADDR_SIMM9_2:
7122 case AARCH64_OPND_ADDR_SIMM11:
7123 case AARCH64_OPND_ADDR_SIMM13:
7124 po_misc_or_fail (parse_address (&str, info));
7125 if (info->addr.pcrel || info->addr.offset.is_reg
7126 || (!info->addr.preind && !info->addr.postind)
7127 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7128 && info->addr.writeback))
7129 {
7130 set_syntax_error (_("invalid addressing mode"));
7131 goto failure;
7132 }
7133 if (inst.reloc.type != BFD_RELOC_UNUSED)
7134 {
7135 set_syntax_error (_("relocation not allowed"));
7136 goto failure;
7137 }
7138 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7139 /* addr_off_p */ 1,
7140 /* need_libopcodes_p */ 1,
7141 /* skip_p */ 0);
7142 break;
7143
7144 case AARCH64_OPND_ADDR_SIMM10:
7145 case AARCH64_OPND_ADDR_OFFSET:
7146 po_misc_or_fail (parse_address (&str, info));
7147 if (info->addr.pcrel || info->addr.offset.is_reg
7148 || !info->addr.preind || info->addr.postind)
7149 {
7150 set_syntax_error (_("invalid addressing mode"));
7151 goto failure;
7152 }
7153 if (inst.reloc.type != BFD_RELOC_UNUSED)
7154 {
7155 set_syntax_error (_("relocation not allowed"));
7156 goto failure;
7157 }
7158 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7159 /* addr_off_p */ 1,
7160 /* need_libopcodes_p */ 1,
7161 /* skip_p */ 0);
7162 break;
7163
7164 case AARCH64_OPND_ADDR_UIMM12:
7165 po_misc_or_fail (parse_address (&str, info));
7166 if (info->addr.pcrel || info->addr.offset.is_reg
7167 || !info->addr.preind || info->addr.writeback)
7168 {
7169 set_syntax_error (_("invalid addressing mode"));
7170 goto failure;
7171 }
7172 if (inst.reloc.type == BFD_RELOC_UNUSED)
7173 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7174 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7175 || (inst.reloc.type
7176 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7177 || (inst.reloc.type
7178 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7179 || (inst.reloc.type
7180 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7181 || (inst.reloc.type
7182 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7183 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7184 /* Leave qualifier to be determined by libopcodes. */
7185 break;
7186
7187 case AARCH64_OPND_SIMD_ADDR_POST:
7188 /* [<Xn|SP>], <Xm|#<amount>> */
7189 po_misc_or_fail (parse_address (&str, info));
7190 if (!info->addr.postind || !info->addr.writeback)
7191 {
7192 set_syntax_error (_("invalid addressing mode"));
7193 goto failure;
7194 }
7195 if (!info->addr.offset.is_reg)
7196 {
7197 if (inst.reloc.exp.X_op == O_constant)
7198 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7199 else
7200 {
7201 set_fatal_syntax_error
7202 (_("writeback value must be an immediate constant"));
7203 goto failure;
7204 }
7205 }
7206 /* No qualifier. */
7207 break;
7208
7209 case AARCH64_OPND_SME_SM_ZA:
7210 /* { SM | ZA } */
7211 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7212 {
7213 set_syntax_error (_("unknown or missing PSTATE field name"));
7214 goto failure;
7215 }
7216 info->reg.regno = val;
7217 break;
7218
7219 case AARCH64_OPND_SME_PnT_Wm_imm:
7220 /* <Pn>.<T>[<Wm>, #<imm>] */
7221 {
7222 int index_base_reg;
7223 int imm;
7224 val = parse_sme_pred_reg_with_index (&str,
7225 &index_base_reg,
7226 &imm,
7227 &qualifier);
7228 if (val == PARSE_FAIL)
7229 goto failure;
7230
7231 info->za_tile_vector.regno = val;
7232 info->za_tile_vector.index.regno = index_base_reg;
7233 info->za_tile_vector.index.imm = imm;
7234 info->qualifier = qualifier;
7235 break;
7236 }
7237
7238 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7239 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7240 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7241 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7242 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7243 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7244 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7245 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7246 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7247 case AARCH64_OPND_SVE_ADDR_RI_U6:
7248 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7249 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7250 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7251 /* [X<n>{, #imm, MUL VL}]
7252 [X<n>{, #imm}]
7253 but recognizing SVE registers. */
7254 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7255 &offset_qualifier));
7256 if (base_qualifier != AARCH64_OPND_QLF_X)
7257 {
7258 set_syntax_error (_("invalid addressing mode"));
7259 goto failure;
7260 }
7261 sve_regimm:
7262 if (info->addr.pcrel || info->addr.offset.is_reg
7263 || !info->addr.preind || info->addr.writeback)
7264 {
7265 set_syntax_error (_("invalid addressing mode"));
7266 goto failure;
7267 }
7268 if (inst.reloc.type != BFD_RELOC_UNUSED
7269 || inst.reloc.exp.X_op != O_constant)
7270 {
7271 /* Make sure this has priority over
7272 "invalid addressing mode". */
7273 set_fatal_syntax_error (_("constant offset required"));
7274 goto failure;
7275 }
7276 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7277 break;
7278
7279 case AARCH64_OPND_SVE_ADDR_R:
7280 /* [<Xn|SP>{, <R><m>}]
7281 but recognizing SVE registers. */
7282 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7283 &offset_qualifier));
7284 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7285 {
7286 offset_qualifier = AARCH64_OPND_QLF_X;
7287 info->addr.offset.is_reg = 1;
7288 info->addr.offset.regno = 31;
7289 }
7290 else if (base_qualifier != AARCH64_OPND_QLF_X
7291 || offset_qualifier != AARCH64_OPND_QLF_X)
7292 {
7293 set_syntax_error (_("invalid addressing mode"));
7294 goto failure;
7295 }
7296 goto regoff_addr;
7297
7298 case AARCH64_OPND_SVE_ADDR_RR:
7299 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7300 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7301 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7302 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7303 case AARCH64_OPND_SVE_ADDR_RX:
7304 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7305 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7306 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7307 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7308 but recognizing SVE registers. */
7309 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7310 &offset_qualifier));
7311 if (base_qualifier != AARCH64_OPND_QLF_X
7312 || offset_qualifier != AARCH64_OPND_QLF_X)
7313 {
7314 set_syntax_error (_("invalid addressing mode"));
7315 goto failure;
7316 }
7317 goto regoff_addr;
7318
7319 case AARCH64_OPND_SVE_ADDR_RZ:
7320 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7321 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7322 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7323 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7324 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7325 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7326 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7327 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7328 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7329 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7330 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7331 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7332 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7333 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7334 &offset_qualifier));
7335 if (base_qualifier != AARCH64_OPND_QLF_X
7336 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7337 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7338 {
7339 set_syntax_error (_("invalid addressing mode"));
7340 goto failure;
7341 }
7342 info->qualifier = offset_qualifier;
7343 goto regoff_addr;
7344
7345 case AARCH64_OPND_SVE_ADDR_ZX:
7346 /* [Zn.<T>{, <Xm>}]. */
7347 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7348 &offset_qualifier));
7349 /* Things to check:
7350 base_qualifier either S_S or S_D
7351 offset_qualifier must be X
7352 */
7353 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7354 && base_qualifier != AARCH64_OPND_QLF_S_D)
7355 || offset_qualifier != AARCH64_OPND_QLF_X)
7356 {
7357 set_syntax_error (_("invalid addressing mode"));
7358 goto failure;
7359 }
7360 info->qualifier = base_qualifier;
7361 if (!info->addr.offset.is_reg || info->addr.pcrel
7362 || !info->addr.preind || info->addr.writeback
7363 || info->shifter.operator_present != 0)
7364 {
7365 set_syntax_error (_("invalid addressing mode"));
7366 goto failure;
7367 }
7368 info->shifter.kind = AARCH64_MOD_LSL;
7369 break;
7370
7371
7372 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7373 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7374 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7375 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7376 /* [Z<n>.<T>{, #imm}] */
7377 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7378 &offset_qualifier));
7379 if (base_qualifier != AARCH64_OPND_QLF_S_S
7380 && base_qualifier != AARCH64_OPND_QLF_S_D)
7381 {
7382 set_syntax_error (_("invalid addressing mode"));
7383 goto failure;
7384 }
7385 info->qualifier = base_qualifier;
7386 goto sve_regimm;
7387
7388 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7389 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7390 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7391 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7392 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7393
7394 We don't reject:
7395
7396 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7397
7398 here since we get better error messages by leaving it to
7399 the qualifier checking routines. */
7400 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7401 &offset_qualifier));
7402 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7403 && base_qualifier != AARCH64_OPND_QLF_S_D)
7404 || offset_qualifier != base_qualifier)
7405 {
7406 set_syntax_error (_("invalid addressing mode"));
7407 goto failure;
7408 }
7409 info->qualifier = base_qualifier;
7410 goto regoff_addr;
7411
7412 case AARCH64_OPND_SYSREG:
7413 {
7414 uint32_t sysreg_flags;
7415 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7416 &sysreg_flags)) == PARSE_FAIL)
7417 {
7418 set_syntax_error (_("unknown or missing system register name"));
7419 goto failure;
7420 }
7421 inst.base.operands[i].sysreg.value = val;
7422 inst.base.operands[i].sysreg.flags = sysreg_flags;
7423 break;
7424 }
7425
7426 case AARCH64_OPND_PSTATEFIELD:
7427 {
7428 uint32_t sysreg_flags;
7429 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7430 &sysreg_flags)) == PARSE_FAIL)
7431 {
7432 set_syntax_error (_("unknown or missing PSTATE field name"));
7433 goto failure;
7434 }
7435 inst.base.operands[i].pstatefield = val;
7436 inst.base.operands[i].sysreg.flags = sysreg_flags;
7437 break;
7438 }
7439
7440 case AARCH64_OPND_SYSREG_IC:
7441 inst.base.operands[i].sysins_op =
7442 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7443 goto sys_reg_ins;
7444
7445 case AARCH64_OPND_SYSREG_DC:
7446 inst.base.operands[i].sysins_op =
7447 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7448 goto sys_reg_ins;
7449
7450 case AARCH64_OPND_SYSREG_AT:
7451 inst.base.operands[i].sysins_op =
7452 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7453 goto sys_reg_ins;
7454
7455 case AARCH64_OPND_SYSREG_SR:
7456 inst.base.operands[i].sysins_op =
7457 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7458 goto sys_reg_ins;
7459
7460 case AARCH64_OPND_SYSREG_TLBI:
7461 inst.base.operands[i].sysins_op =
7462 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7463 sys_reg_ins:
7464 if (inst.base.operands[i].sysins_op == NULL)
7465 {
7466 set_fatal_syntax_error ( _("unknown or missing operation name"));
7467 goto failure;
7468 }
7469 break;
7470
7471 case AARCH64_OPND_BARRIER:
7472 case AARCH64_OPND_BARRIER_ISB:
7473 val = parse_barrier (&str);
7474 if (val != PARSE_FAIL
7475 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7476 {
7477 /* ISB only accepts options name 'sy'. */
7478 set_syntax_error
7479 (_("the specified option is not accepted in ISB"));
7480 /* Turn off backtrack as this optional operand is present. */
7481 backtrack_pos = 0;
7482 goto failure;
7483 }
7484 if (val != PARSE_FAIL
7485 && operands[i] == AARCH64_OPND_BARRIER)
7486 {
7487 /* Regular barriers accept options CRm (C0-C15).
7488 DSB nXS barrier variant accepts values > 15. */
7489 if (val < 0 || val > 15)
7490 {
7491 set_syntax_error (_("the specified option is not accepted in DSB"));
7492 goto failure;
7493 }
7494 }
7495 /* This is an extension to accept a 0..15 immediate. */
7496 if (val == PARSE_FAIL)
7497 po_imm_or_fail (0, 15);
7498 info->barrier = aarch64_barrier_options + val;
7499 break;
7500
7501 case AARCH64_OPND_BARRIER_DSB_NXS:
7502 val = parse_barrier (&str);
7503 if (val != PARSE_FAIL)
7504 {
7505 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7506 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7507 {
7508 set_syntax_error (_("the specified option is not accepted in DSB"));
7509 /* Turn off backtrack as this optional operand is present. */
7510 backtrack_pos = 0;
7511 goto failure;
7512 }
7513 }
7514 else
7515 {
7516 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7517 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7518 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7519 goto failure;
7520 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7521 {
7522 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7523 goto failure;
7524 }
7525 }
7526 /* Option index is encoded as 2-bit value in val<3:2>. */
7527 val = (val >> 2) - 4;
7528 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7529 break;
7530
7531 case AARCH64_OPND_PRFOP:
7532 val = parse_pldop (&str);
7533 /* This is an extension to accept a 0..31 immediate. */
7534 if (val == PARSE_FAIL)
7535 po_imm_or_fail (0, 31);
7536 inst.base.operands[i].prfop = aarch64_prfops + val;
7537 break;
7538
7539 case AARCH64_OPND_BARRIER_PSB:
7540 val = parse_barrier_psb (&str, &(info->hint_option));
7541 if (val == PARSE_FAIL)
7542 goto failure;
7543 break;
7544
7545 case AARCH64_OPND_BTI_TARGET:
7546 val = parse_bti_operand (&str, &(info->hint_option));
7547 if (val == PARSE_FAIL)
7548 goto failure;
7549 break;
7550
7551 case AARCH64_OPND_SME_ZAda_2b:
7552 case AARCH64_OPND_SME_ZAda_3b:
7553 val = parse_sme_zada_operand (&str, &qualifier);
7554 if (val == PARSE_FAIL)
7555 goto failure;
7556 info->reg.regno = val;
7557 info->qualifier = qualifier;
7558 break;
7559
7560 case AARCH64_OPND_SME_ZA_HV_idx_src:
7561 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7562 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7563 {
7564 enum sme_hv_slice slice_indicator;
7565 int vector_select_register;
7566 int imm;
7567
7568 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7569 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7570 &slice_indicator,
7571 &vector_select_register,
7572 &imm,
7573 &qualifier);
7574 else
7575 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7576 &vector_select_register,
7577 &imm,
7578 &qualifier);
7579 if (val == PARSE_FAIL)
7580 goto failure;
7581 info->za_tile_vector.regno = val;
7582 info->za_tile_vector.index.regno = vector_select_register;
7583 info->za_tile_vector.index.imm = imm;
7584 info->za_tile_vector.v = slice_indicator;
7585 info->qualifier = qualifier;
7586 break;
7587 }
7588
7589 case AARCH64_OPND_SME_list_of_64bit_tiles:
7590 val = parse_sme_list_of_64bit_tiles (&str);
7591 if (val == PARSE_FAIL)
7592 goto failure;
7593 info->imm.value = val;
7594 break;
7595
7596 case AARCH64_OPND_SME_ZA_array:
7597 {
7598 int imm;
7599 val = parse_sme_za_array (&str, &imm);
7600 if (val == PARSE_FAIL)
7601 goto failure;
7602 info->za_tile_vector.index.regno = val;
7603 info->za_tile_vector.index.imm = imm;
7604 break;
7605 }
7606
7607 case AARCH64_OPND_MOPS_ADDR_Rd:
7608 case AARCH64_OPND_MOPS_ADDR_Rs:
7609 po_char_or_fail ('[');
7610 if (!parse_x0_to_x30 (&str, info))
7611 goto failure;
7612 po_char_or_fail (']');
7613 po_char_or_fail ('!');
7614 break;
7615
7616 case AARCH64_OPND_MOPS_WB_Rn:
7617 if (!parse_x0_to_x30 (&str, info))
7618 goto failure;
7619 po_char_or_fail ('!');
7620 break;
7621
7622 default:
7623 as_fatal (_("unhandled operand code %d"), operands[i]);
7624 }
7625
7626 /* If we get here, this operand was successfully parsed. */
7627 inst.base.operands[i].present = 1;
7628 continue;
7629
7630 failure:
7631 /* The parse routine should already have set the error, but in case
7632 not, set a default one here. */
7633 if (! error_p ())
7634 set_default_error ();
7635
7636 if (! backtrack_pos)
7637 goto parse_operands_return;
7638
7639 {
7640 /* We reach here because this operand is marked as optional, and
7641 either no operand was supplied or the operand was supplied but it
7642 was syntactically incorrect. In the latter case we report an
7643 error. In the former case we perform a few more checks before
7644 dropping through to the code to insert the default operand. */
7645
7646 char *tmp = backtrack_pos;
7647 char endchar = END_OF_INSN;
7648
7649 if (i != (aarch64_num_of_operands (opcode) - 1))
7650 endchar = ',';
7651 skip_past_char (&tmp, ',');
7652
7653 if (*tmp != endchar)
7654 /* The user has supplied an operand in the wrong format. */
7655 goto parse_operands_return;
7656
7657 /* Make sure there is not a comma before the optional operand.
7658 For example the fifth operand of 'sys' is optional:
7659
7660 sys #0,c0,c0,#0, <--- wrong
7661 sys #0,c0,c0,#0 <--- correct. */
7662 if (comma_skipped_p && i && endchar == END_OF_INSN)
7663 {
7664 set_fatal_syntax_error
7665 (_("unexpected comma before the omitted optional operand"));
7666 goto parse_operands_return;
7667 }
7668 }
7669
7670 /* Reaching here means we are dealing with an optional operand that is
7671 omitted from the assembly line. */
7672 gas_assert (optional_operand_p (opcode, i));
7673 info->present = 0;
7674 process_omitted_operand (operands[i], opcode, i, info);
7675
7676 /* Try again, skipping the optional operand at backtrack_pos. */
7677 str = backtrack_pos;
7678 backtrack_pos = 0;
7679
7680 /* Clear any error record after the omitted optional operand has been
7681 successfully handled. */
7682 clear_error ();
7683 }
7684
7685 /* Check if we have parsed all the operands. */
7686 if (*str != '\0' && ! error_p ())
7687 {
7688 /* Set I to the index of the last present operand; this is
7689 for the purpose of diagnostics. */
7690 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7691 ;
7692 set_fatal_syntax_error
7693 (_("unexpected characters following instruction"));
7694 }
7695
7696 parse_operands_return:
7697
7698 if (error_p ())
7699 {
7700 inst.parsing_error.index = i;
7701 DEBUG_TRACE ("parsing FAIL: %s - %s",
7702 operand_mismatch_kind_names[inst.parsing_error.kind],
7703 inst.parsing_error.error);
7704 /* Record the operand error properly; this is useful when there
7705 are multiple instruction templates for a mnemonic name, so that
7706 later on, we can select the error that most closely describes
7707 the problem. */
7708 record_operand_error_info (opcode, &inst.parsing_error);
7709 return false;
7710 }
7711 else
7712 {
7713 DEBUG_TRACE ("parsing SUCCESS");
7714 return true;
7715 }
7716 }
7717
7718 /* It does some fix-up to provide some programmer friendly feature while
7719 keeping the libopcodes happy, i.e. libopcodes only accepts
7720 the preferred architectural syntax.
7721 Return FALSE if there is any failure; otherwise return TRUE. */
7722
7723 static bool
7724 programmer_friendly_fixup (aarch64_instruction *instr)
7725 {
7726 aarch64_inst *base = &instr->base;
7727 const aarch64_opcode *opcode = base->opcode;
7728 enum aarch64_op op = opcode->op;
7729 aarch64_opnd_info *operands = base->operands;
7730
7731 DEBUG_TRACE ("enter");
7732
7733 switch (opcode->iclass)
7734 {
7735 case testbranch:
7736 /* TBNZ Xn|Wn, #uimm6, label
7737 Test and Branch Not Zero: conditionally jumps to label if bit number
7738 uimm6 in register Xn is not zero. The bit number implies the width of
7739 the register, which may be written and should be disassembled as Wn if
7740 uimm is less than 32. */
7741 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7742 {
7743 if (operands[1].imm.value >= 32)
7744 {
7745 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7746 0, 31);
7747 return false;
7748 }
7749 operands[0].qualifier = AARCH64_OPND_QLF_X;
7750 }
7751 break;
7752 case loadlit:
7753 /* LDR Wt, label | =value
7754 As a convenience assemblers will typically permit the notation
7755 "=value" in conjunction with the pc-relative literal load instructions
7756 to automatically place an immediate value or symbolic address in a
7757 nearby literal pool and generate a hidden label which references it.
7758 ISREG has been set to 0 in the case of =value. */
7759 if (instr->gen_lit_pool
7760 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7761 {
7762 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7763 if (op == OP_LDRSW_LIT)
7764 size = 4;
7765 if (instr->reloc.exp.X_op != O_constant
7766 && instr->reloc.exp.X_op != O_big
7767 && instr->reloc.exp.X_op != O_symbol)
7768 {
7769 record_operand_error (opcode, 1,
7770 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7771 _("constant expression expected"));
7772 return false;
7773 }
7774 if (! add_to_lit_pool (&instr->reloc.exp, size))
7775 {
7776 record_operand_error (opcode, 1,
7777 AARCH64_OPDE_OTHER_ERROR,
7778 _("literal pool insertion failed"));
7779 return false;
7780 }
7781 }
7782 break;
7783 case log_shift:
7784 case bitfield:
7785 /* UXT[BHW] Wd, Wn
7786 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7787 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7788 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7789 A programmer-friendly assembler should accept a destination Xd in
7790 place of Wd, however that is not the preferred form for disassembly.
7791 */
7792 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7793 && operands[1].qualifier == AARCH64_OPND_QLF_W
7794 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7795 operands[0].qualifier = AARCH64_OPND_QLF_W;
7796 break;
7797
7798 case addsub_ext:
7799 {
7800 /* In the 64-bit form, the final register operand is written as Wm
7801 for all but the (possibly omitted) UXTX/LSL and SXTX
7802 operators.
7803 As a programmer-friendly assembler, we accept e.g.
7804 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7805 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7806 int idx = aarch64_operand_index (opcode->operands,
7807 AARCH64_OPND_Rm_EXT);
7808 gas_assert (idx == 1 || idx == 2);
7809 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7810 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7811 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7812 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7813 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7814 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7815 }
7816 break;
7817
7818 default:
7819 break;
7820 }
7821
7822 DEBUG_TRACE ("exit with SUCCESS");
7823 return true;
7824 }
7825
7826 /* Check for loads and stores that will cause unpredictable behavior. */
7827
7828 static void
7829 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7830 {
7831 aarch64_inst *base = &instr->base;
7832 const aarch64_opcode *opcode = base->opcode;
7833 const aarch64_opnd_info *opnds = base->operands;
7834 switch (opcode->iclass)
7835 {
7836 case ldst_pos:
7837 case ldst_imm9:
7838 case ldst_imm10:
7839 case ldst_unscaled:
7840 case ldst_unpriv:
7841 /* Loading/storing the base register is unpredictable if writeback. */
7842 if ((aarch64_get_operand_class (opnds[0].type)
7843 == AARCH64_OPND_CLASS_INT_REG)
7844 && opnds[0].reg.regno == opnds[1].addr.base_regno
7845 && opnds[1].addr.base_regno != REG_SP
7846 /* Exempt STG/STZG/ST2G/STZ2G. */
7847 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7848 && opnds[1].addr.writeback)
7849 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7850 break;
7851
7852 case ldstpair_off:
7853 case ldstnapair_offs:
7854 case ldstpair_indexed:
7855 /* Loading/storing the base register is unpredictable if writeback. */
7856 if ((aarch64_get_operand_class (opnds[0].type)
7857 == AARCH64_OPND_CLASS_INT_REG)
7858 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7859 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7860 && opnds[2].addr.base_regno != REG_SP
7861 /* Exempt STGP. */
7862 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7863 && opnds[2].addr.writeback)
7864 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7865 /* Load operations must load different registers. */
7866 if ((opcode->opcode & (1 << 22))
7867 && opnds[0].reg.regno == opnds[1].reg.regno)
7868 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7869 break;
7870
7871 case ldstexcl:
7872 if ((aarch64_get_operand_class (opnds[0].type)
7873 == AARCH64_OPND_CLASS_INT_REG)
7874 && (aarch64_get_operand_class (opnds[1].type)
7875 == AARCH64_OPND_CLASS_INT_REG))
7876 {
7877 if ((opcode->opcode & (1 << 22)))
7878 {
7879 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7880 if ((opcode->opcode & (1 << 21))
7881 && opnds[0].reg.regno == opnds[1].reg.regno)
7882 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7883 }
7884 else
7885 {
7886 /* Store-Exclusive is unpredictable if Rt == Rs. */
7887 if (opnds[0].reg.regno == opnds[1].reg.regno)
7888 as_warn
7889 (_("unpredictable: identical transfer and status registers"
7890 " --`%s'"),str);
7891
7892 if (opnds[0].reg.regno == opnds[2].reg.regno)
7893 {
7894 if (!(opcode->opcode & (1 << 21)))
7895 /* Store-Exclusive is unpredictable if Rn == Rs. */
7896 as_warn
7897 (_("unpredictable: identical base and status registers"
7898 " --`%s'"),str);
7899 else
7900 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7901 as_warn
7902 (_("unpredictable: "
7903 "identical transfer and status registers"
7904 " --`%s'"),str);
7905 }
7906
7907 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7908 if ((opcode->opcode & (1 << 21))
7909 && opnds[0].reg.regno == opnds[3].reg.regno
7910 && opnds[3].reg.regno != REG_SP)
7911 as_warn (_("unpredictable: identical base and status registers"
7912 " --`%s'"),str);
7913 }
7914 }
7915 break;
7916
7917 default:
7918 break;
7919 }
7920 }
7921
7922 static void
7923 force_automatic_sequence_close (void)
7924 {
7925 struct aarch64_segment_info_type *tc_seg_info;
7926
7927 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7928 if (tc_seg_info->insn_sequence.instr)
7929 {
7930 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7931 _("previous `%s' sequence has not been closed"),
7932 tc_seg_info->insn_sequence.instr->opcode->name);
7933 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7934 }
7935 }
7936
7937 /* A wrapper function to interface with libopcodes on encoding and
7938 record the error message if there is any.
7939
7940 Return TRUE on success; otherwise return FALSE. */
7941
7942 static bool
7943 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7944 aarch64_insn *code)
7945 {
7946 aarch64_operand_error error_info;
7947 memset (&error_info, '\0', sizeof (error_info));
7948 error_info.kind = AARCH64_OPDE_NIL;
7949 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7950 && !error_info.non_fatal)
7951 return true;
7952
7953 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7954 record_operand_error_info (opcode, &error_info);
7955 return error_info.non_fatal;
7956 }
7957
7958 #ifdef DEBUG_AARCH64
7959 static inline void
7960 dump_opcode_operands (const aarch64_opcode *opcode)
7961 {
7962 int i = 0;
7963 while (opcode->operands[i] != AARCH64_OPND_NIL)
7964 {
7965 aarch64_verbose ("\t\t opnd%d: %s", i,
7966 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7967 ? aarch64_get_operand_name (opcode->operands[i])
7968 : aarch64_get_operand_desc (opcode->operands[i]));
7969 ++i;
7970 }
7971 }
7972 #endif /* DEBUG_AARCH64 */
7973
7974 /* This is the guts of the machine-dependent assembler. STR points to a
7975 machine dependent instruction. This function is supposed to emit
7976 the frags/bytes it assembles to. */
7977
7978 void
7979 md_assemble (char *str)
7980 {
7981 templates *template;
7982 const aarch64_opcode *opcode;
7983 struct aarch64_segment_info_type *tc_seg_info;
7984 aarch64_inst *inst_base;
7985 unsigned saved_cond;
7986
7987 /* Align the previous label if needed. */
7988 if (last_label_seen != NULL)
7989 {
7990 symbol_set_frag (last_label_seen, frag_now);
7991 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7992 S_SET_SEGMENT (last_label_seen, now_seg);
7993 }
7994
7995 /* Update the current insn_sequence from the segment. */
7996 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7997 insn_sequence = &tc_seg_info->insn_sequence;
7998 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7999
8000 inst.reloc.type = BFD_RELOC_UNUSED;
8001
8002 DEBUG_TRACE ("\n\n");
8003 DEBUG_TRACE ("==============================");
8004 DEBUG_TRACE ("Enter md_assemble with %s", str);
8005
8006 /* Scan up to the end of the mnemonic, which must end in whitespace,
8007 '.', or end of string. */
8008 char *p = str;
8009 char *dot = 0;
8010 for (; is_part_of_name (*p); p++)
8011 if (*p == '.' && !dot)
8012 dot = p;
8013
8014 if (p == str)
8015 {
8016 as_bad (_("unknown mnemonic -- `%s'"), str);
8017 return;
8018 }
8019
8020 if (!dot && create_register_alias (str, p))
8021 return;
8022
8023 template = opcode_lookup (str, dot, p);
8024 if (!template)
8025 {
8026 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8027 str);
8028 return;
8029 }
8030
8031 skip_whitespace (p);
8032 if (*p == ',')
8033 {
8034 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8035 get_mnemonic_name (str), str);
8036 return;
8037 }
8038
8039 init_operand_error_report ();
8040
8041 /* Sections are assumed to start aligned. In executable section, there is no
8042 MAP_DATA symbol pending. So we only align the address during
8043 MAP_DATA --> MAP_INSN transition.
8044 For other sections, this is not guaranteed. */
8045 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8046 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8047 frag_align_code (2, 0);
8048
8049 saved_cond = inst.cond;
8050 reset_aarch64_instruction (&inst);
8051 inst.cond = saved_cond;
8052
8053 /* Iterate through all opcode entries with the same mnemonic name. */
8054 do
8055 {
8056 opcode = template->opcode;
8057
8058 DEBUG_TRACE ("opcode %s found", opcode->name);
8059 #ifdef DEBUG_AARCH64
8060 if (debug_dump)
8061 dump_opcode_operands (opcode);
8062 #endif /* DEBUG_AARCH64 */
8063
8064 mapping_state (MAP_INSN);
8065
8066 inst_base = &inst.base;
8067 inst_base->opcode = opcode;
8068
8069 /* Truly conditionally executed instructions, e.g. b.cond. */
8070 if (opcode->flags & F_COND)
8071 {
8072 gas_assert (inst.cond != COND_ALWAYS);
8073 inst_base->cond = get_cond_from_value (inst.cond);
8074 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8075 }
8076 else if (inst.cond != COND_ALWAYS)
8077 {
8078 /* It shouldn't arrive here, where the assembly looks like a
8079 conditional instruction but the found opcode is unconditional. */
8080 gas_assert (0);
8081 continue;
8082 }
8083
8084 if (parse_operands (p, opcode)
8085 && programmer_friendly_fixup (&inst)
8086 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8087 {
8088 /* Check that this instruction is supported for this CPU. */
8089 if (!opcode->avariant
8090 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8091 {
8092 as_bad (_("selected processor does not support `%s'"), str);
8093 return;
8094 }
8095
8096 warn_unpredictable_ldst (&inst, str);
8097
8098 if (inst.reloc.type == BFD_RELOC_UNUSED
8099 || !inst.reloc.need_libopcodes_p)
8100 output_inst (NULL);
8101 else
8102 {
8103 /* If there is relocation generated for the instruction,
8104 store the instruction information for the future fix-up. */
8105 struct aarch64_inst *copy;
8106 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8107 copy = XNEW (struct aarch64_inst);
8108 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8109 output_inst (copy);
8110 }
8111
8112 /* Issue non-fatal messages if any. */
8113 output_operand_error_report (str, true);
8114 return;
8115 }
8116
8117 template = template->next;
8118 if (template != NULL)
8119 {
8120 reset_aarch64_instruction (&inst);
8121 inst.cond = saved_cond;
8122 }
8123 }
8124 while (template != NULL);
8125
8126 /* Issue the error messages if any. */
8127 output_operand_error_report (str, false);
8128 }
8129
8130 /* Various frobbings of labels and their addresses. */
8131
8132 void
8133 aarch64_start_line_hook (void)
8134 {
8135 last_label_seen = NULL;
8136 }
8137
8138 void
8139 aarch64_frob_label (symbolS * sym)
8140 {
8141 last_label_seen = sym;
8142
8143 dwarf2_emit_label (sym);
8144 }
8145
8146 void
8147 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8148 {
8149 /* Check to see if we have a block to close. */
8150 force_automatic_sequence_close ();
8151 }
8152
8153 int
8154 aarch64_data_in_code (void)
8155 {
8156 if (startswith (input_line_pointer + 1, "data:"))
8157 {
8158 *input_line_pointer = '/';
8159 input_line_pointer += 5;
8160 *input_line_pointer = 0;
8161 return 1;
8162 }
8163
8164 return 0;
8165 }
8166
8167 char *
8168 aarch64_canonicalize_symbol_name (char *name)
8169 {
8170 int len;
8171
8172 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8173 *(name + len - 5) = 0;
8174
8175 return name;
8176 }
8177 \f
8178 /* Table of all register names defined by default. The user can
8179 define additional names with .req. Note that all register names
8180 should appear in both upper and lowercase variants. Some registers
8181 also have mixed-case names. */
8182
8183 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8184 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8185 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8186 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8187 #define REGSET16(p,t) \
8188 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8189 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8190 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8191 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8192 #define REGSET16S(p,s,t) \
8193 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8194 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8195 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8196 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8197 #define REGSET31(p,t) \
8198 REGSET16(p, t), \
8199 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8200 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8201 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8202 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8203 #define REGSET(p,t) \
8204 REGSET31(p,t), REGNUM(p,31,t)
8205
8206 /* These go into aarch64_reg_hsh hash-table. */
8207 static const reg_entry reg_names[] = {
8208 /* Integer registers. */
8209 REGSET31 (x, R_64), REGSET31 (X, R_64),
8210 REGSET31 (w, R_32), REGSET31 (W, R_32),
8211
8212 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8213 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8214 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8215 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8216 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8217 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8218
8219 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8220 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8221
8222 /* Floating-point single precision registers. */
8223 REGSET (s, FP_S), REGSET (S, FP_S),
8224
8225 /* Floating-point double precision registers. */
8226 REGSET (d, FP_D), REGSET (D, FP_D),
8227
8228 /* Floating-point half precision registers. */
8229 REGSET (h, FP_H), REGSET (H, FP_H),
8230
8231 /* Floating-point byte precision registers. */
8232 REGSET (b, FP_B), REGSET (B, FP_B),
8233
8234 /* Floating-point quad precision registers. */
8235 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8236
8237 /* FP/SIMD registers. */
8238 REGSET (v, VN), REGSET (V, VN),
8239
8240 /* SVE vector registers. */
8241 REGSET (z, ZN), REGSET (Z, ZN),
8242
8243 /* SVE predicate registers. */
8244 REGSET16 (p, PN), REGSET16 (P, PN),
8245
8246 /* SME ZA tile registers. */
8247 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8248
8249 /* SME ZA tile registers (horizontal slice). */
8250 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8251
8252 /* SME ZA tile registers (vertical slice). */
8253 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV)
8254 };
8255
8256 #undef REGDEF
8257 #undef REGDEF_ALIAS
8258 #undef REGNUM
8259 #undef REGSET16
8260 #undef REGSET31
8261 #undef REGSET
8262
8263 #define N 1
8264 #define n 0
8265 #define Z 1
8266 #define z 0
8267 #define C 1
8268 #define c 0
8269 #define V 1
8270 #define v 0
8271 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8272 static const asm_nzcv nzcv_names[] = {
8273 {"nzcv", B (n, z, c, v)},
8274 {"nzcV", B (n, z, c, V)},
8275 {"nzCv", B (n, z, C, v)},
8276 {"nzCV", B (n, z, C, V)},
8277 {"nZcv", B (n, Z, c, v)},
8278 {"nZcV", B (n, Z, c, V)},
8279 {"nZCv", B (n, Z, C, v)},
8280 {"nZCV", B (n, Z, C, V)},
8281 {"Nzcv", B (N, z, c, v)},
8282 {"NzcV", B (N, z, c, V)},
8283 {"NzCv", B (N, z, C, v)},
8284 {"NzCV", B (N, z, C, V)},
8285 {"NZcv", B (N, Z, c, v)},
8286 {"NZcV", B (N, Z, c, V)},
8287 {"NZCv", B (N, Z, C, v)},
8288 {"NZCV", B (N, Z, C, V)}
8289 };
8290
8291 #undef N
8292 #undef n
8293 #undef Z
8294 #undef z
8295 #undef C
8296 #undef c
8297 #undef V
8298 #undef v
8299 #undef B
8300 \f
8301 /* MD interface: bits in the object file. */
8302
8303 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8304 for use in the a.out file, and stores them in the array pointed to by buf.
8305 This knows about the endian-ness of the target machine and does
8306 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8307 2 (short) and 4 (long) Floating numbers are put out as a series of
8308 LITTLENUMS (shorts, here at least). */
8309
8310 void
8311 md_number_to_chars (char *buf, valueT val, int n)
8312 {
8313 if (target_big_endian)
8314 number_to_chars_bigendian (buf, val, n);
8315 else
8316 number_to_chars_littleendian (buf, val, n);
8317 }
8318
8319 /* MD interface: Sections. */
8320
8321 /* Estimate the size of a frag before relaxing. Assume everything fits in
8322 4 bytes. */
8323
8324 int
8325 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8326 {
8327 fragp->fr_var = 4;
8328 return 4;
8329 }
8330
8331 /* Round up a section size to the appropriate boundary. */
8332
8333 valueT
8334 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8335 {
8336 return size;
8337 }
8338
8339 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8340 of an rs_align_code fragment.
8341
8342 Here we fill the frag with the appropriate info for padding the
8343 output stream. The resulting frag will consist of a fixed (fr_fix)
8344 and of a repeating (fr_var) part.
8345
8346 The fixed content is always emitted before the repeating content and
8347 these two parts are used as follows in constructing the output:
8348 - the fixed part will be used to align to a valid instruction word
8349 boundary, in case that we start at a misaligned address; as no
8350 executable instruction can live at the misaligned location, we
8351 simply fill with zeros;
8352 - the variable part will be used to cover the remaining padding and
8353 we fill using the AArch64 NOP instruction.
8354
8355 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8356 enough storage space for up to 3 bytes for padding the back to a valid
8357 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8358
8359 void
8360 aarch64_handle_align (fragS * fragP)
8361 {
8362 /* NOP = d503201f */
8363 /* AArch64 instructions are always little-endian. */
8364 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8365
8366 int bytes, fix, noop_size;
8367 char *p;
8368
8369 if (fragP->fr_type != rs_align_code)
8370 return;
8371
8372 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8373 p = fragP->fr_literal + fragP->fr_fix;
8374
8375 #ifdef OBJ_ELF
8376 gas_assert (fragP->tc_frag_data.recorded);
8377 #endif
8378
8379 noop_size = sizeof (aarch64_noop);
8380
8381 fix = bytes & (noop_size - 1);
8382 if (fix)
8383 {
8384 #if defined OBJ_ELF || defined OBJ_COFF
8385 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8386 #endif
8387 memset (p, 0, fix);
8388 p += fix;
8389 fragP->fr_fix += fix;
8390 }
8391
8392 if (noop_size)
8393 memcpy (p, aarch64_noop, noop_size);
8394 fragP->fr_var = noop_size;
8395 }
8396
8397 /* Perform target specific initialisation of a frag.
8398 Note - despite the name this initialisation is not done when the frag
8399 is created, but only when its type is assigned. A frag can be created
8400 and used a long time before its type is set, so beware of assuming that
8401 this initialisation is performed first. */
8402
8403 #ifndef OBJ_ELF
8404 void
8405 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8406 int max_chars ATTRIBUTE_UNUSED)
8407 {
8408 }
8409
8410 #else /* OBJ_ELF is defined. */
8411 void
8412 aarch64_init_frag (fragS * fragP, int max_chars)
8413 {
8414 /* Record a mapping symbol for alignment frags. We will delete this
8415 later if the alignment ends up empty. */
8416 if (!fragP->tc_frag_data.recorded)
8417 fragP->tc_frag_data.recorded = 1;
8418
8419 /* PR 21809: Do not set a mapping state for debug sections
8420 - it just confuses other tools. */
8421 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8422 return;
8423
8424 switch (fragP->fr_type)
8425 {
8426 case rs_align_test:
8427 case rs_fill:
8428 mapping_state_2 (MAP_DATA, max_chars);
8429 break;
8430 case rs_align:
8431 /* PR 20364: We can get alignment frags in code sections,
8432 so do not just assume that we should use the MAP_DATA state. */
8433 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8434 break;
8435 case rs_align_code:
8436 mapping_state_2 (MAP_INSN, max_chars);
8437 break;
8438 default:
8439 break;
8440 }
8441 }
8442
8443 /* Whether SFrame stack trace info is supported. */
8444
8445 bool
8446 aarch64_support_sframe_p (void)
8447 {
8448 /* At this time, SFrame is supported for aarch64 only. */
8449 return (aarch64_abi == AARCH64_ABI_LP64);
8450 }
8451
8452 /* Specify if RA tracking is needed. */
8453
8454 bool
8455 aarch64_sframe_ra_tracking_p (void)
8456 {
8457 return true;
8458 }
8459
8460 /* Specify the fixed offset to recover RA from CFA.
8461 (useful only when RA tracking is not needed). */
8462
8463 offsetT
8464 aarch64_sframe_cfa_ra_offset (void)
8465 {
8466 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8467 }
8468
8469 /* Get the abi/arch indentifier for SFrame. */
8470
8471 unsigned char
8472 aarch64_sframe_get_abi_arch (void)
8473 {
8474 unsigned char sframe_abi_arch = 0;
8475
8476 if (aarch64_support_sframe_p ())
8477 {
8478 sframe_abi_arch = target_big_endian
8479 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8480 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8481 }
8482
8483 return sframe_abi_arch;
8484 }
8485
8486 #endif /* OBJ_ELF */
8487 \f
8488 /* Initialize the DWARF-2 unwind information for this procedure. */
8489
8490 void
8491 tc_aarch64_frame_initial_instructions (void)
8492 {
8493 cfi_add_CFA_def_cfa (REG_SP, 0);
8494 }
8495
8496 /* Convert REGNAME to a DWARF-2 register number. */
8497
8498 int
8499 tc_aarch64_regname_to_dw2regnum (char *regname)
8500 {
8501 const reg_entry *reg = parse_reg (&regname);
8502 if (reg == NULL)
8503 return -1;
8504
8505 switch (reg->type)
8506 {
8507 case REG_TYPE_SP_32:
8508 case REG_TYPE_SP_64:
8509 case REG_TYPE_R_32:
8510 case REG_TYPE_R_64:
8511 return reg->number;
8512
8513 case REG_TYPE_FP_B:
8514 case REG_TYPE_FP_H:
8515 case REG_TYPE_FP_S:
8516 case REG_TYPE_FP_D:
8517 case REG_TYPE_FP_Q:
8518 return reg->number + 64;
8519
8520 default:
8521 break;
8522 }
8523 return -1;
8524 }
8525
8526 /* Implement DWARF2_ADDR_SIZE. */
8527
8528 int
8529 aarch64_dwarf2_addr_size (void)
8530 {
8531 if (ilp32_p)
8532 return 4;
8533 else if (llp64_p)
8534 return 8;
8535 return bfd_arch_bits_per_address (stdoutput) / 8;
8536 }
8537
8538 /* MD interface: Symbol and relocation handling. */
8539
8540 /* Return the address within the segment that a PC-relative fixup is
8541 relative to. For AArch64 PC-relative fixups applied to instructions
8542 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8543
8544 long
8545 md_pcrel_from_section (fixS * fixP, segT seg)
8546 {
8547 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8548
8549 /* If this is pc-relative and we are going to emit a relocation
8550 then we just want to put out any pipeline compensation that the linker
8551 will need. Otherwise we want to use the calculated base. */
8552 if (fixP->fx_pcrel
8553 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8554 || aarch64_force_relocation (fixP)))
8555 base = 0;
8556
8557 /* AArch64 should be consistent for all pc-relative relocations. */
8558 return base + AARCH64_PCREL_OFFSET;
8559 }
8560
8561 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8562 Otherwise we have no need to default values of symbols. */
8563
8564 symbolS *
8565 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8566 {
8567 #ifdef OBJ_ELF
8568 if (name[0] == '_' && name[1] == 'G'
8569 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8570 {
8571 if (!GOT_symbol)
8572 {
8573 if (symbol_find (name))
8574 as_bad (_("GOT already in the symbol table"));
8575
8576 GOT_symbol = symbol_new (name, undefined_section,
8577 &zero_address_frag, 0);
8578 }
8579
8580 return GOT_symbol;
8581 }
8582 #endif
8583
8584 return 0;
8585 }
8586
8587 /* Return non-zero if the indicated VALUE has overflowed the maximum
8588 range expressible by a unsigned number with the indicated number of
8589 BITS. */
8590
8591 static bool
8592 unsigned_overflow (valueT value, unsigned bits)
8593 {
8594 valueT lim;
8595 if (bits >= sizeof (valueT) * 8)
8596 return false;
8597 lim = (valueT) 1 << bits;
8598 return (value >= lim);
8599 }
8600
8601
8602 /* Return non-zero if the indicated VALUE has overflowed the maximum
8603 range expressible by an signed number with the indicated number of
8604 BITS. */
8605
8606 static bool
8607 signed_overflow (offsetT value, unsigned bits)
8608 {
8609 offsetT lim;
8610 if (bits >= sizeof (offsetT) * 8)
8611 return false;
8612 lim = (offsetT) 1 << (bits - 1);
8613 return (value < -lim || value >= lim);
8614 }
8615
8616 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8617 unsigned immediate offset load/store instruction, try to encode it as
8618 an unscaled, 9-bit, signed immediate offset load/store instruction.
8619 Return TRUE if it is successful; otherwise return FALSE.
8620
8621 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8622 in response to the standard LDR/STR mnemonics when the immediate offset is
8623 unambiguous, i.e. when it is negative or unaligned. */
8624
8625 static bool
8626 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8627 {
8628 int idx;
8629 enum aarch64_op new_op;
8630 const aarch64_opcode *new_opcode;
8631
8632 gas_assert (instr->opcode->iclass == ldst_pos);
8633
8634 switch (instr->opcode->op)
8635 {
8636 case OP_LDRB_POS:new_op = OP_LDURB; break;
8637 case OP_STRB_POS: new_op = OP_STURB; break;
8638 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8639 case OP_LDRH_POS: new_op = OP_LDURH; break;
8640 case OP_STRH_POS: new_op = OP_STURH; break;
8641 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8642 case OP_LDR_POS: new_op = OP_LDUR; break;
8643 case OP_STR_POS: new_op = OP_STUR; break;
8644 case OP_LDRF_POS: new_op = OP_LDURV; break;
8645 case OP_STRF_POS: new_op = OP_STURV; break;
8646 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8647 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8648 default: new_op = OP_NIL; break;
8649 }
8650
8651 if (new_op == OP_NIL)
8652 return false;
8653
8654 new_opcode = aarch64_get_opcode (new_op);
8655 gas_assert (new_opcode != NULL);
8656
8657 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8658 instr->opcode->op, new_opcode->op);
8659
8660 aarch64_replace_opcode (instr, new_opcode);
8661
8662 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8663 qualifier matching may fail because the out-of-date qualifier will
8664 prevent the operand being updated with a new and correct qualifier. */
8665 idx = aarch64_operand_index (instr->opcode->operands,
8666 AARCH64_OPND_ADDR_SIMM9);
8667 gas_assert (idx == 1);
8668 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8669
8670 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8671
8672 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8673 insn_sequence))
8674 return false;
8675
8676 return true;
8677 }
8678
8679 /* Called by fix_insn to fix a MOV immediate alias instruction.
8680
8681 Operand for a generic move immediate instruction, which is an alias
8682 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8683 a 32-bit/64-bit immediate value into general register. An assembler error
8684 shall result if the immediate cannot be created by a single one of these
8685 instructions. If there is a choice, then to ensure reversability an
8686 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8687
8688 static void
8689 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8690 {
8691 const aarch64_opcode *opcode;
8692
8693 /* Need to check if the destination is SP/ZR. The check has to be done
8694 before any aarch64_replace_opcode. */
8695 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8696 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8697
8698 instr->operands[1].imm.value = value;
8699 instr->operands[1].skip = 0;
8700
8701 if (try_mov_wide_p)
8702 {
8703 /* Try the MOVZ alias. */
8704 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8705 aarch64_replace_opcode (instr, opcode);
8706 if (aarch64_opcode_encode (instr->opcode, instr,
8707 &instr->value, NULL, NULL, insn_sequence))
8708 {
8709 put_aarch64_insn (buf, instr->value);
8710 return;
8711 }
8712 /* Try the MOVK alias. */
8713 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8714 aarch64_replace_opcode (instr, opcode);
8715 if (aarch64_opcode_encode (instr->opcode, instr,
8716 &instr->value, NULL, NULL, insn_sequence))
8717 {
8718 put_aarch64_insn (buf, instr->value);
8719 return;
8720 }
8721 }
8722
8723 if (try_mov_bitmask_p)
8724 {
8725 /* Try the ORR alias. */
8726 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8727 aarch64_replace_opcode (instr, opcode);
8728 if (aarch64_opcode_encode (instr->opcode, instr,
8729 &instr->value, NULL, NULL, insn_sequence))
8730 {
8731 put_aarch64_insn (buf, instr->value);
8732 return;
8733 }
8734 }
8735
8736 as_bad_where (fixP->fx_file, fixP->fx_line,
8737 _("immediate cannot be moved by a single instruction"));
8738 }
8739
8740 /* An instruction operand which is immediate related may have symbol used
8741 in the assembly, e.g.
8742
8743 mov w0, u32
8744 .set u32, 0x00ffff00
8745
8746 At the time when the assembly instruction is parsed, a referenced symbol,
8747 like 'u32' in the above example may not have been seen; a fixS is created
8748 in such a case and is handled here after symbols have been resolved.
8749 Instruction is fixed up with VALUE using the information in *FIXP plus
8750 extra information in FLAGS.
8751
8752 This function is called by md_apply_fix to fix up instructions that need
8753 a fix-up described above but does not involve any linker-time relocation. */
8754
8755 static void
8756 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8757 {
8758 int idx;
8759 uint32_t insn;
8760 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8761 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8762 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8763
8764 if (new_inst)
8765 {
8766 /* Now the instruction is about to be fixed-up, so the operand that
8767 was previously marked as 'ignored' needs to be unmarked in order
8768 to get the encoding done properly. */
8769 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8770 new_inst->operands[idx].skip = 0;
8771 }
8772
8773 gas_assert (opnd != AARCH64_OPND_NIL);
8774
8775 switch (opnd)
8776 {
8777 case AARCH64_OPND_EXCEPTION:
8778 case AARCH64_OPND_UNDEFINED:
8779 if (unsigned_overflow (value, 16))
8780 as_bad_where (fixP->fx_file, fixP->fx_line,
8781 _("immediate out of range"));
8782 insn = get_aarch64_insn (buf);
8783 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8784 put_aarch64_insn (buf, insn);
8785 break;
8786
8787 case AARCH64_OPND_AIMM:
8788 /* ADD or SUB with immediate.
8789 NOTE this assumes we come here with a add/sub shifted reg encoding
8790 3 322|2222|2 2 2 21111 111111
8791 1 098|7654|3 2 1 09876 543210 98765 43210
8792 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8793 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8794 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8795 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8796 ->
8797 3 322|2222|2 2 221111111111
8798 1 098|7654|3 2 109876543210 98765 43210
8799 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8800 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8801 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8802 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8803 Fields sf Rn Rd are already set. */
8804 insn = get_aarch64_insn (buf);
8805 if (value < 0)
8806 {
8807 /* Add <-> sub. */
8808 insn = reencode_addsub_switch_add_sub (insn);
8809 value = -value;
8810 }
8811
8812 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8813 && unsigned_overflow (value, 12))
8814 {
8815 /* Try to shift the value by 12 to make it fit. */
8816 if (((value >> 12) << 12) == value
8817 && ! unsigned_overflow (value, 12 + 12))
8818 {
8819 value >>= 12;
8820 insn |= encode_addsub_imm_shift_amount (1);
8821 }
8822 }
8823
8824 if (unsigned_overflow (value, 12))
8825 as_bad_where (fixP->fx_file, fixP->fx_line,
8826 _("immediate out of range"));
8827
8828 insn |= encode_addsub_imm (value);
8829
8830 put_aarch64_insn (buf, insn);
8831 break;
8832
8833 case AARCH64_OPND_SIMD_IMM:
8834 case AARCH64_OPND_SIMD_IMM_SFT:
8835 case AARCH64_OPND_LIMM:
8836 /* Bit mask immediate. */
8837 gas_assert (new_inst != NULL);
8838 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8839 new_inst->operands[idx].imm.value = value;
8840 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8841 &new_inst->value, NULL, NULL, insn_sequence))
8842 put_aarch64_insn (buf, new_inst->value);
8843 else
8844 as_bad_where (fixP->fx_file, fixP->fx_line,
8845 _("invalid immediate"));
8846 break;
8847
8848 case AARCH64_OPND_HALF:
8849 /* 16-bit unsigned immediate. */
8850 if (unsigned_overflow (value, 16))
8851 as_bad_where (fixP->fx_file, fixP->fx_line,
8852 _("immediate out of range"));
8853 insn = get_aarch64_insn (buf);
8854 insn |= encode_movw_imm (value & 0xffff);
8855 put_aarch64_insn (buf, insn);
8856 break;
8857
8858 case AARCH64_OPND_IMM_MOV:
8859 /* Operand for a generic move immediate instruction, which is
8860 an alias instruction that generates a single MOVZ, MOVN or ORR
8861 instruction to loads a 32-bit/64-bit immediate value into general
8862 register. An assembler error shall result if the immediate cannot be
8863 created by a single one of these instructions. If there is a choice,
8864 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8865 and MOVZ or MOVN to ORR. */
8866 gas_assert (new_inst != NULL);
8867 fix_mov_imm_insn (fixP, buf, new_inst, value);
8868 break;
8869
8870 case AARCH64_OPND_ADDR_SIMM7:
8871 case AARCH64_OPND_ADDR_SIMM9:
8872 case AARCH64_OPND_ADDR_SIMM9_2:
8873 case AARCH64_OPND_ADDR_SIMM10:
8874 case AARCH64_OPND_ADDR_UIMM12:
8875 case AARCH64_OPND_ADDR_SIMM11:
8876 case AARCH64_OPND_ADDR_SIMM13:
8877 /* Immediate offset in an address. */
8878 insn = get_aarch64_insn (buf);
8879
8880 gas_assert (new_inst != NULL && new_inst->value == insn);
8881 gas_assert (new_inst->opcode->operands[1] == opnd
8882 || new_inst->opcode->operands[2] == opnd);
8883
8884 /* Get the index of the address operand. */
8885 if (new_inst->opcode->operands[1] == opnd)
8886 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8887 idx = 1;
8888 else
8889 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8890 idx = 2;
8891
8892 /* Update the resolved offset value. */
8893 new_inst->operands[idx].addr.offset.imm = value;
8894
8895 /* Encode/fix-up. */
8896 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8897 &new_inst->value, NULL, NULL, insn_sequence))
8898 {
8899 put_aarch64_insn (buf, new_inst->value);
8900 break;
8901 }
8902 else if (new_inst->opcode->iclass == ldst_pos
8903 && try_to_encode_as_unscaled_ldst (new_inst))
8904 {
8905 put_aarch64_insn (buf, new_inst->value);
8906 break;
8907 }
8908
8909 as_bad_where (fixP->fx_file, fixP->fx_line,
8910 _("immediate offset out of range"));
8911 break;
8912
8913 default:
8914 gas_assert (0);
8915 as_fatal (_("unhandled operand code %d"), opnd);
8916 }
8917 }
8918
8919 /* Apply a fixup (fixP) to segment data, once it has been determined
8920 by our caller that we have all the info we need to fix it up.
8921
8922 Parameter valP is the pointer to the value of the bits. */
8923
8924 void
8925 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8926 {
8927 offsetT value = *valP;
8928 uint32_t insn;
8929 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8930 int scale;
8931 unsigned flags = fixP->fx_addnumber;
8932
8933 DEBUG_TRACE ("\n\n");
8934 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8935 DEBUG_TRACE ("Enter md_apply_fix");
8936
8937 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8938
8939 /* Note whether this will delete the relocation. */
8940
8941 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8942 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8943 fixP->fx_done = 1;
8944
8945 /* Process the relocations. */
8946 switch (fixP->fx_r_type)
8947 {
8948 case BFD_RELOC_NONE:
8949 /* This will need to go in the object file. */
8950 fixP->fx_done = 0;
8951 break;
8952
8953 case BFD_RELOC_8:
8954 case BFD_RELOC_8_PCREL:
8955 if (fixP->fx_done || !seg->use_rela_p)
8956 md_number_to_chars (buf, value, 1);
8957 break;
8958
8959 case BFD_RELOC_16:
8960 case BFD_RELOC_16_PCREL:
8961 if (fixP->fx_done || !seg->use_rela_p)
8962 md_number_to_chars (buf, value, 2);
8963 break;
8964
8965 case BFD_RELOC_32:
8966 case BFD_RELOC_32_PCREL:
8967 if (fixP->fx_done || !seg->use_rela_p)
8968 md_number_to_chars (buf, value, 4);
8969 break;
8970
8971 case BFD_RELOC_64:
8972 case BFD_RELOC_64_PCREL:
8973 if (fixP->fx_done || !seg->use_rela_p)
8974 md_number_to_chars (buf, value, 8);
8975 break;
8976
8977 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8978 /* We claim that these fixups have been processed here, even if
8979 in fact we generate an error because we do not have a reloc
8980 for them, so tc_gen_reloc() will reject them. */
8981 fixP->fx_done = 1;
8982 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8983 {
8984 as_bad_where (fixP->fx_file, fixP->fx_line,
8985 _("undefined symbol %s used as an immediate value"),
8986 S_GET_NAME (fixP->fx_addsy));
8987 goto apply_fix_return;
8988 }
8989 fix_insn (fixP, flags, value);
8990 break;
8991
8992 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8993 if (fixP->fx_done || !seg->use_rela_p)
8994 {
8995 if (value & 3)
8996 as_bad_where (fixP->fx_file, fixP->fx_line,
8997 _("pc-relative load offset not word aligned"));
8998 if (signed_overflow (value, 21))
8999 as_bad_where (fixP->fx_file, fixP->fx_line,
9000 _("pc-relative load offset out of range"));
9001 insn = get_aarch64_insn (buf);
9002 insn |= encode_ld_lit_ofs_19 (value >> 2);
9003 put_aarch64_insn (buf, insn);
9004 }
9005 break;
9006
9007 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9008 if (fixP->fx_done || !seg->use_rela_p)
9009 {
9010 if (signed_overflow (value, 21))
9011 as_bad_where (fixP->fx_file, fixP->fx_line,
9012 _("pc-relative address offset out of range"));
9013 insn = get_aarch64_insn (buf);
9014 insn |= encode_adr_imm (value);
9015 put_aarch64_insn (buf, insn);
9016 }
9017 break;
9018
9019 case BFD_RELOC_AARCH64_BRANCH19:
9020 if (fixP->fx_done || !seg->use_rela_p)
9021 {
9022 if (value & 3)
9023 as_bad_where (fixP->fx_file, fixP->fx_line,
9024 _("conditional branch target not word aligned"));
9025 if (signed_overflow (value, 21))
9026 as_bad_where (fixP->fx_file, fixP->fx_line,
9027 _("conditional branch out of range"));
9028 insn = get_aarch64_insn (buf);
9029 insn |= encode_cond_branch_ofs_19 (value >> 2);
9030 put_aarch64_insn (buf, insn);
9031 }
9032 break;
9033
9034 case BFD_RELOC_AARCH64_TSTBR14:
9035 if (fixP->fx_done || !seg->use_rela_p)
9036 {
9037 if (value & 3)
9038 as_bad_where (fixP->fx_file, fixP->fx_line,
9039 _("conditional branch target not word aligned"));
9040 if (signed_overflow (value, 16))
9041 as_bad_where (fixP->fx_file, fixP->fx_line,
9042 _("conditional branch out of range"));
9043 insn = get_aarch64_insn (buf);
9044 insn |= encode_tst_branch_ofs_14 (value >> 2);
9045 put_aarch64_insn (buf, insn);
9046 }
9047 break;
9048
9049 case BFD_RELOC_AARCH64_CALL26:
9050 case BFD_RELOC_AARCH64_JUMP26:
9051 if (fixP->fx_done || !seg->use_rela_p)
9052 {
9053 if (value & 3)
9054 as_bad_where (fixP->fx_file, fixP->fx_line,
9055 _("branch target not word aligned"));
9056 if (signed_overflow (value, 28))
9057 as_bad_where (fixP->fx_file, fixP->fx_line,
9058 _("branch out of range"));
9059 insn = get_aarch64_insn (buf);
9060 insn |= encode_branch_ofs_26 (value >> 2);
9061 put_aarch64_insn (buf, insn);
9062 }
9063 break;
9064
9065 case BFD_RELOC_AARCH64_MOVW_G0:
9066 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9067 case BFD_RELOC_AARCH64_MOVW_G0_S:
9068 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9069 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9070 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9071 scale = 0;
9072 goto movw_common;
9073 case BFD_RELOC_AARCH64_MOVW_G1:
9074 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9075 case BFD_RELOC_AARCH64_MOVW_G1_S:
9076 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9077 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9078 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9079 scale = 16;
9080 goto movw_common;
9081 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9082 scale = 0;
9083 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9084 /* Should always be exported to object file, see
9085 aarch64_force_relocation(). */
9086 gas_assert (!fixP->fx_done);
9087 gas_assert (seg->use_rela_p);
9088 goto movw_common;
9089 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9090 scale = 16;
9091 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9092 /* Should always be exported to object file, see
9093 aarch64_force_relocation(). */
9094 gas_assert (!fixP->fx_done);
9095 gas_assert (seg->use_rela_p);
9096 goto movw_common;
9097 case BFD_RELOC_AARCH64_MOVW_G2:
9098 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9099 case BFD_RELOC_AARCH64_MOVW_G2_S:
9100 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9101 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9102 scale = 32;
9103 goto movw_common;
9104 case BFD_RELOC_AARCH64_MOVW_G3:
9105 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9106 scale = 48;
9107 movw_common:
9108 if (fixP->fx_done || !seg->use_rela_p)
9109 {
9110 insn = get_aarch64_insn (buf);
9111
9112 if (!fixP->fx_done)
9113 {
9114 /* REL signed addend must fit in 16 bits */
9115 if (signed_overflow (value, 16))
9116 as_bad_where (fixP->fx_file, fixP->fx_line,
9117 _("offset out of range"));
9118 }
9119 else
9120 {
9121 /* Check for overflow and scale. */
9122 switch (fixP->fx_r_type)
9123 {
9124 case BFD_RELOC_AARCH64_MOVW_G0:
9125 case BFD_RELOC_AARCH64_MOVW_G1:
9126 case BFD_RELOC_AARCH64_MOVW_G2:
9127 case BFD_RELOC_AARCH64_MOVW_G3:
9128 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9129 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9130 if (unsigned_overflow (value, scale + 16))
9131 as_bad_where (fixP->fx_file, fixP->fx_line,
9132 _("unsigned value out of range"));
9133 break;
9134 case BFD_RELOC_AARCH64_MOVW_G0_S:
9135 case BFD_RELOC_AARCH64_MOVW_G1_S:
9136 case BFD_RELOC_AARCH64_MOVW_G2_S:
9137 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9138 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9139 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9140 /* NOTE: We can only come here with movz or movn. */
9141 if (signed_overflow (value, scale + 16))
9142 as_bad_where (fixP->fx_file, fixP->fx_line,
9143 _("signed value out of range"));
9144 if (value < 0)
9145 {
9146 /* Force use of MOVN. */
9147 value = ~value;
9148 insn = reencode_movzn_to_movn (insn);
9149 }
9150 else
9151 {
9152 /* Force use of MOVZ. */
9153 insn = reencode_movzn_to_movz (insn);
9154 }
9155 break;
9156 default:
9157 /* Unchecked relocations. */
9158 break;
9159 }
9160 value >>= scale;
9161 }
9162
9163 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9164 insn |= encode_movw_imm (value & 0xffff);
9165
9166 put_aarch64_insn (buf, insn);
9167 }
9168 break;
9169
9170 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9171 fixP->fx_r_type = (ilp32_p
9172 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9173 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9174 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9175 /* Should always be exported to object file, see
9176 aarch64_force_relocation(). */
9177 gas_assert (!fixP->fx_done);
9178 gas_assert (seg->use_rela_p);
9179 break;
9180
9181 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9182 fixP->fx_r_type = (ilp32_p
9183 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9184 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9185 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9186 /* Should always be exported to object file, see
9187 aarch64_force_relocation(). */
9188 gas_assert (!fixP->fx_done);
9189 gas_assert (seg->use_rela_p);
9190 break;
9191
9192 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9193 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9194 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9195 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9196 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9197 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9198 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9199 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9200 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9201 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9202 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9203 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9204 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9205 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9206 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9207 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9208 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9209 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9210 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9211 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9212 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9213 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9214 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9215 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9216 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9217 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9218 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9219 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9220 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9221 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9222 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9223 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9224 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9225 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9226 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9227 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9228 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9229 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9230 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9231 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9232 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9233 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9234 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9235 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9236 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9237 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9238 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9239 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9240 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9241 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9242 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9243 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9244 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9245 /* Should always be exported to object file, see
9246 aarch64_force_relocation(). */
9247 gas_assert (!fixP->fx_done);
9248 gas_assert (seg->use_rela_p);
9249 break;
9250
9251 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9252 /* Should always be exported to object file, see
9253 aarch64_force_relocation(). */
9254 fixP->fx_r_type = (ilp32_p
9255 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9256 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9257 gas_assert (!fixP->fx_done);
9258 gas_assert (seg->use_rela_p);
9259 break;
9260
9261 case BFD_RELOC_AARCH64_ADD_LO12:
9262 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9263 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9264 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9265 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9266 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9267 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9268 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9269 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9270 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9271 case BFD_RELOC_AARCH64_LDST128_LO12:
9272 case BFD_RELOC_AARCH64_LDST16_LO12:
9273 case BFD_RELOC_AARCH64_LDST32_LO12:
9274 case BFD_RELOC_AARCH64_LDST64_LO12:
9275 case BFD_RELOC_AARCH64_LDST8_LO12:
9276 /* Should always be exported to object file, see
9277 aarch64_force_relocation(). */
9278 gas_assert (!fixP->fx_done);
9279 gas_assert (seg->use_rela_p);
9280 break;
9281
9282 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9283 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9284 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9285 break;
9286
9287 case BFD_RELOC_UNUSED:
9288 /* An error will already have been reported. */
9289 break;
9290
9291 case BFD_RELOC_RVA:
9292 case BFD_RELOC_32_SECREL:
9293 case BFD_RELOC_16_SECIDX:
9294 break;
9295
9296 default:
9297 as_bad_where (fixP->fx_file, fixP->fx_line,
9298 _("unexpected %s fixup"),
9299 bfd_get_reloc_code_name (fixP->fx_r_type));
9300 break;
9301 }
9302
9303 apply_fix_return:
9304 /* Free the allocated the struct aarch64_inst.
9305 N.B. currently there are very limited number of fix-up types actually use
9306 this field, so the impact on the performance should be minimal . */
9307 free (fixP->tc_fix_data.inst);
9308
9309 return;
9310 }
9311
9312 /* Translate internal representation of relocation info to BFD target
9313 format. */
9314
9315 arelent *
9316 tc_gen_reloc (asection * section, fixS * fixp)
9317 {
9318 arelent *reloc;
9319 bfd_reloc_code_real_type code;
9320
9321 reloc = XNEW (arelent);
9322
9323 reloc->sym_ptr_ptr = XNEW (asymbol *);
9324 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9325 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9326
9327 if (fixp->fx_pcrel)
9328 {
9329 if (section->use_rela_p)
9330 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9331 else
9332 fixp->fx_offset = reloc->address;
9333 }
9334 reloc->addend = fixp->fx_offset;
9335
9336 code = fixp->fx_r_type;
9337 switch (code)
9338 {
9339 case BFD_RELOC_16:
9340 if (fixp->fx_pcrel)
9341 code = BFD_RELOC_16_PCREL;
9342 break;
9343
9344 case BFD_RELOC_32:
9345 if (fixp->fx_pcrel)
9346 code = BFD_RELOC_32_PCREL;
9347 break;
9348
9349 case BFD_RELOC_64:
9350 if (fixp->fx_pcrel)
9351 code = BFD_RELOC_64_PCREL;
9352 break;
9353
9354 default:
9355 break;
9356 }
9357
9358 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9359 if (reloc->howto == NULL)
9360 {
9361 as_bad_where (fixp->fx_file, fixp->fx_line,
9362 _
9363 ("cannot represent %s relocation in this object file format"),
9364 bfd_get_reloc_code_name (code));
9365 return NULL;
9366 }
9367
9368 return reloc;
9369 }
9370
9371 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9372
9373 void
9374 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9375 {
9376 bfd_reloc_code_real_type type;
9377 int pcrel = 0;
9378
9379 #ifdef TE_PE
9380 if (exp->X_op == O_secrel)
9381 {
9382 exp->X_op = O_symbol;
9383 type = BFD_RELOC_32_SECREL;
9384 }
9385 else if (exp->X_op == O_secidx)
9386 {
9387 exp->X_op = O_symbol;
9388 type = BFD_RELOC_16_SECIDX;
9389 }
9390 else
9391 {
9392 #endif
9393 /* Pick a reloc.
9394 FIXME: @@ Should look at CPU word size. */
9395 switch (size)
9396 {
9397 case 1:
9398 type = BFD_RELOC_8;
9399 break;
9400 case 2:
9401 type = BFD_RELOC_16;
9402 break;
9403 case 4:
9404 type = BFD_RELOC_32;
9405 break;
9406 case 8:
9407 type = BFD_RELOC_64;
9408 break;
9409 default:
9410 as_bad (_("cannot do %u-byte relocation"), size);
9411 type = BFD_RELOC_UNUSED;
9412 break;
9413 }
9414 #ifdef TE_PE
9415 }
9416 #endif
9417
9418 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9419 }
9420
9421 /* Implement md_after_parse_args. This is the earliest time we need to decide
9422 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9423
9424 void
9425 aarch64_after_parse_args (void)
9426 {
9427 if (aarch64_abi != AARCH64_ABI_NONE)
9428 return;
9429
9430 #ifdef OBJ_ELF
9431 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9432 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9433 aarch64_abi = AARCH64_ABI_ILP32;
9434 else
9435 aarch64_abi = AARCH64_ABI_LP64;
9436 #else
9437 aarch64_abi = AARCH64_ABI_LLP64;
9438 #endif
9439 }
9440
9441 #ifdef OBJ_ELF
9442 const char *
9443 elf64_aarch64_target_format (void)
9444 {
9445 #ifdef TE_CLOUDABI
9446 /* FIXME: What to do for ilp32_p ? */
9447 if (target_big_endian)
9448 return "elf64-bigaarch64-cloudabi";
9449 else
9450 return "elf64-littleaarch64-cloudabi";
9451 #else
9452 if (target_big_endian)
9453 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9454 else
9455 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9456 #endif
9457 }
9458
9459 void
9460 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9461 {
9462 elf_frob_symbol (symp, puntp);
9463 }
9464 #elif defined OBJ_COFF
9465 const char *
9466 coff_aarch64_target_format (void)
9467 {
9468 return "pe-aarch64-little";
9469 }
9470 #endif
9471
9472 /* MD interface: Finalization. */
9473
9474 /* A good place to do this, although this was probably not intended
9475 for this kind of use. We need to dump the literal pool before
9476 references are made to a null symbol pointer. */
9477
9478 void
9479 aarch64_cleanup (void)
9480 {
9481 literal_pool *pool;
9482
9483 for (pool = list_of_pools; pool; pool = pool->next)
9484 {
9485 /* Put it at the end of the relevant section. */
9486 subseg_set (pool->section, pool->sub_section);
9487 s_ltorg (0);
9488 }
9489 }
9490
9491 #ifdef OBJ_ELF
9492 /* Remove any excess mapping symbols generated for alignment frags in
9493 SEC. We may have created a mapping symbol before a zero byte
9494 alignment; remove it if there's a mapping symbol after the
9495 alignment. */
9496 static void
9497 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9498 void *dummy ATTRIBUTE_UNUSED)
9499 {
9500 segment_info_type *seginfo = seg_info (sec);
9501 fragS *fragp;
9502
9503 if (seginfo == NULL || seginfo->frchainP == NULL)
9504 return;
9505
9506 for (fragp = seginfo->frchainP->frch_root;
9507 fragp != NULL; fragp = fragp->fr_next)
9508 {
9509 symbolS *sym = fragp->tc_frag_data.last_map;
9510 fragS *next = fragp->fr_next;
9511
9512 /* Variable-sized frags have been converted to fixed size by
9513 this point. But if this was variable-sized to start with,
9514 there will be a fixed-size frag after it. So don't handle
9515 next == NULL. */
9516 if (sym == NULL || next == NULL)
9517 continue;
9518
9519 if (S_GET_VALUE (sym) < next->fr_address)
9520 /* Not at the end of this frag. */
9521 continue;
9522 know (S_GET_VALUE (sym) == next->fr_address);
9523
9524 do
9525 {
9526 if (next->tc_frag_data.first_map != NULL)
9527 {
9528 /* Next frag starts with a mapping symbol. Discard this
9529 one. */
9530 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9531 break;
9532 }
9533
9534 if (next->fr_next == NULL)
9535 {
9536 /* This mapping symbol is at the end of the section. Discard
9537 it. */
9538 know (next->fr_fix == 0 && next->fr_var == 0);
9539 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9540 break;
9541 }
9542
9543 /* As long as we have empty frags without any mapping symbols,
9544 keep looking. */
9545 /* If the next frag is non-empty and does not start with a
9546 mapping symbol, then this mapping symbol is required. */
9547 if (next->fr_address != next->fr_next->fr_address)
9548 break;
9549
9550 next = next->fr_next;
9551 }
9552 while (next != NULL);
9553 }
9554 }
9555 #endif
9556
9557 /* Adjust the symbol table. */
9558
9559 void
9560 aarch64_adjust_symtab (void)
9561 {
9562 #ifdef OBJ_ELF
9563 /* Remove any overlapping mapping symbols generated by alignment frags. */
9564 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9565 /* Now do generic ELF adjustments. */
9566 elf_adjust_symtab ();
9567 #endif
9568 }
9569
9570 static void
9571 checked_hash_insert (htab_t table, const char *key, void *value)
9572 {
9573 str_hash_insert (table, key, value, 0);
9574 }
9575
9576 static void
9577 sysreg_hash_insert (htab_t table, const char *key, void *value)
9578 {
9579 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9580 checked_hash_insert (table, key, value);
9581 }
9582
9583 static void
9584 fill_instruction_hash_table (void)
9585 {
9586 const aarch64_opcode *opcode = aarch64_opcode_table;
9587
9588 while (opcode->name != NULL)
9589 {
9590 templates *templ, *new_templ;
9591 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9592
9593 new_templ = XNEW (templates);
9594 new_templ->opcode = opcode;
9595 new_templ->next = NULL;
9596
9597 if (!templ)
9598 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9599 else
9600 {
9601 new_templ->next = templ->next;
9602 templ->next = new_templ;
9603 }
9604 ++opcode;
9605 }
9606 }
9607
9608 static inline void
9609 convert_to_upper (char *dst, const char *src, size_t num)
9610 {
9611 unsigned int i;
9612 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9613 *dst = TOUPPER (*src);
9614 *dst = '\0';
9615 }
9616
9617 /* Assume STR point to a lower-case string, allocate, convert and return
9618 the corresponding upper-case string. */
9619 static inline const char*
9620 get_upper_str (const char *str)
9621 {
9622 char *ret;
9623 size_t len = strlen (str);
9624 ret = XNEWVEC (char, len + 1);
9625 convert_to_upper (ret, str, len);
9626 return ret;
9627 }
9628
9629 /* MD interface: Initialization. */
9630
9631 void
9632 md_begin (void)
9633 {
9634 unsigned mach;
9635 unsigned int i;
9636
9637 aarch64_ops_hsh = str_htab_create ();
9638 aarch64_cond_hsh = str_htab_create ();
9639 aarch64_shift_hsh = str_htab_create ();
9640 aarch64_sys_regs_hsh = str_htab_create ();
9641 aarch64_pstatefield_hsh = str_htab_create ();
9642 aarch64_sys_regs_ic_hsh = str_htab_create ();
9643 aarch64_sys_regs_dc_hsh = str_htab_create ();
9644 aarch64_sys_regs_at_hsh = str_htab_create ();
9645 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9646 aarch64_sys_regs_sr_hsh = str_htab_create ();
9647 aarch64_reg_hsh = str_htab_create ();
9648 aarch64_barrier_opt_hsh = str_htab_create ();
9649 aarch64_nzcv_hsh = str_htab_create ();
9650 aarch64_pldop_hsh = str_htab_create ();
9651 aarch64_hint_opt_hsh = str_htab_create ();
9652
9653 fill_instruction_hash_table ();
9654
9655 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9656 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9657 (void *) (aarch64_sys_regs + i));
9658
9659 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9660 sysreg_hash_insert (aarch64_pstatefield_hsh,
9661 aarch64_pstatefields[i].name,
9662 (void *) (aarch64_pstatefields + i));
9663
9664 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9665 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9666 aarch64_sys_regs_ic[i].name,
9667 (void *) (aarch64_sys_regs_ic + i));
9668
9669 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9670 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9671 aarch64_sys_regs_dc[i].name,
9672 (void *) (aarch64_sys_regs_dc + i));
9673
9674 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9675 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9676 aarch64_sys_regs_at[i].name,
9677 (void *) (aarch64_sys_regs_at + i));
9678
9679 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9680 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9681 aarch64_sys_regs_tlbi[i].name,
9682 (void *) (aarch64_sys_regs_tlbi + i));
9683
9684 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9685 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9686 aarch64_sys_regs_sr[i].name,
9687 (void *) (aarch64_sys_regs_sr + i));
9688
9689 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9690 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9691 (void *) (reg_names + i));
9692
9693 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9694 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9695 (void *) (nzcv_names + i));
9696
9697 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9698 {
9699 const char *name = aarch64_operand_modifiers[i].name;
9700 checked_hash_insert (aarch64_shift_hsh, name,
9701 (void *) (aarch64_operand_modifiers + i));
9702 /* Also hash the name in the upper case. */
9703 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9704 (void *) (aarch64_operand_modifiers + i));
9705 }
9706
9707 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9708 {
9709 unsigned int j;
9710 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9711 the same condition code. */
9712 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9713 {
9714 const char *name = aarch64_conds[i].names[j];
9715 if (name == NULL)
9716 break;
9717 checked_hash_insert (aarch64_cond_hsh, name,
9718 (void *) (aarch64_conds + i));
9719 /* Also hash the name in the upper case. */
9720 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9721 (void *) (aarch64_conds + i));
9722 }
9723 }
9724
9725 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9726 {
9727 const char *name = aarch64_barrier_options[i].name;
9728 /* Skip xx00 - the unallocated values of option. */
9729 if ((i & 0x3) == 0)
9730 continue;
9731 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9732 (void *) (aarch64_barrier_options + i));
9733 /* Also hash the name in the upper case. */
9734 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9735 (void *) (aarch64_barrier_options + i));
9736 }
9737
9738 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9739 {
9740 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9741 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9742 (void *) (aarch64_barrier_dsb_nxs_options + i));
9743 /* Also hash the name in the upper case. */
9744 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9745 (void *) (aarch64_barrier_dsb_nxs_options + i));
9746 }
9747
9748 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9749 {
9750 const char* name = aarch64_prfops[i].name;
9751 /* Skip the unallocated hint encodings. */
9752 if (name == NULL)
9753 continue;
9754 checked_hash_insert (aarch64_pldop_hsh, name,
9755 (void *) (aarch64_prfops + i));
9756 /* Also hash the name in the upper case. */
9757 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9758 (void *) (aarch64_prfops + i));
9759 }
9760
9761 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9762 {
9763 const char* name = aarch64_hint_options[i].name;
9764 const char* upper_name = get_upper_str(name);
9765
9766 checked_hash_insert (aarch64_hint_opt_hsh, name,
9767 (void *) (aarch64_hint_options + i));
9768
9769 /* Also hash the name in the upper case if not the same. */
9770 if (strcmp (name, upper_name) != 0)
9771 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9772 (void *) (aarch64_hint_options + i));
9773 }
9774
9775 /* Set the cpu variant based on the command-line options. */
9776 if (!mcpu_cpu_opt)
9777 mcpu_cpu_opt = march_cpu_opt;
9778
9779 if (!mcpu_cpu_opt)
9780 mcpu_cpu_opt = &cpu_default;
9781
9782 cpu_variant = *mcpu_cpu_opt;
9783
9784 /* Record the CPU type. */
9785 if(ilp32_p)
9786 mach = bfd_mach_aarch64_ilp32;
9787 else if (llp64_p)
9788 mach = bfd_mach_aarch64_llp64;
9789 else
9790 mach = bfd_mach_aarch64;
9791
9792 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9793 #ifdef OBJ_ELF
9794 /* FIXME - is there a better way to do it ? */
9795 aarch64_sframe_cfa_sp_reg = 31;
9796 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
9797 aarch64_sframe_cfa_ra_reg = 30;
9798 #endif
9799 }
9800
9801 /* Command line processing. */
9802
9803 const char *md_shortopts = "m:";
9804
9805 #ifdef AARCH64_BI_ENDIAN
9806 #define OPTION_EB (OPTION_MD_BASE + 0)
9807 #define OPTION_EL (OPTION_MD_BASE + 1)
9808 #else
9809 #if TARGET_BYTES_BIG_ENDIAN
9810 #define OPTION_EB (OPTION_MD_BASE + 0)
9811 #else
9812 #define OPTION_EL (OPTION_MD_BASE + 1)
9813 #endif
9814 #endif
9815
9816 struct option md_longopts[] = {
9817 #ifdef OPTION_EB
9818 {"EB", no_argument, NULL, OPTION_EB},
9819 #endif
9820 #ifdef OPTION_EL
9821 {"EL", no_argument, NULL, OPTION_EL},
9822 #endif
9823 {NULL, no_argument, NULL, 0}
9824 };
9825
9826 size_t md_longopts_size = sizeof (md_longopts);
9827
9828 struct aarch64_option_table
9829 {
9830 const char *option; /* Option name to match. */
9831 const char *help; /* Help information. */
9832 int *var; /* Variable to change. */
9833 int value; /* What to change it to. */
9834 char *deprecated; /* If non-null, print this message. */
9835 };
9836
9837 static struct aarch64_option_table aarch64_opts[] = {
9838 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9839 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9840 NULL},
9841 #ifdef DEBUG_AARCH64
9842 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9843 #endif /* DEBUG_AARCH64 */
9844 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9845 NULL},
9846 {"mno-verbose-error", N_("do not output verbose error messages"),
9847 &verbose_error_p, 0, NULL},
9848 {NULL, NULL, NULL, 0, NULL}
9849 };
9850
9851 struct aarch64_cpu_option_table
9852 {
9853 const char *name;
9854 const aarch64_feature_set value;
9855 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9856 case. */
9857 const char *canonical_name;
9858 };
9859
9860 /* This list should, at a minimum, contain all the cpu names
9861 recognized by GCC. */
9862 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9863 {"all", AARCH64_ANY, NULL},
9864 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9865 AARCH64_FEATURE_CRC), "Cortex-A34"},
9866 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9867 AARCH64_FEATURE_CRC), "Cortex-A35"},
9868 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9869 AARCH64_FEATURE_CRC), "Cortex-A53"},
9870 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9871 AARCH64_FEATURE_CRC), "Cortex-A57"},
9872 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9873 AARCH64_FEATURE_CRC), "Cortex-A72"},
9874 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9875 AARCH64_FEATURE_CRC), "Cortex-A73"},
9876 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9877 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9878 "Cortex-A55"},
9879 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9880 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9881 "Cortex-A75"},
9882 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9883 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9884 "Cortex-A76"},
9885 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9886 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9887 | AARCH64_FEATURE_DOTPROD
9888 | AARCH64_FEATURE_SSBS),
9889 "Cortex-A76AE"},
9890 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9891 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9892 | AARCH64_FEATURE_DOTPROD
9893 | AARCH64_FEATURE_SSBS),
9894 "Cortex-A77"},
9895 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9896 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9897 | AARCH64_FEATURE_DOTPROD
9898 | AARCH64_FEATURE_SSBS),
9899 "Cortex-A65"},
9900 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9901 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9902 | AARCH64_FEATURE_DOTPROD
9903 | AARCH64_FEATURE_SSBS),
9904 "Cortex-A65AE"},
9905 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9906 AARCH64_FEATURE_F16
9907 | AARCH64_FEATURE_RCPC
9908 | AARCH64_FEATURE_DOTPROD
9909 | AARCH64_FEATURE_SSBS
9910 | AARCH64_FEATURE_PROFILE),
9911 "Cortex-A78"},
9912 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9913 AARCH64_FEATURE_F16
9914 | AARCH64_FEATURE_RCPC
9915 | AARCH64_FEATURE_DOTPROD
9916 | AARCH64_FEATURE_SSBS
9917 | AARCH64_FEATURE_PROFILE),
9918 "Cortex-A78AE"},
9919 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9920 AARCH64_FEATURE_DOTPROD
9921 | AARCH64_FEATURE_F16
9922 | AARCH64_FEATURE_FLAGM
9923 | AARCH64_FEATURE_PAC
9924 | AARCH64_FEATURE_PROFILE
9925 | AARCH64_FEATURE_RCPC
9926 | AARCH64_FEATURE_SSBS),
9927 "Cortex-A78C"},
9928 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9929 AARCH64_FEATURE_BFLOAT16
9930 | AARCH64_FEATURE_I8MM
9931 | AARCH64_FEATURE_MEMTAG
9932 | AARCH64_FEATURE_SVE2_BITPERM),
9933 "Cortex-A510"},
9934 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9935 AARCH64_FEATURE_BFLOAT16
9936 | AARCH64_FEATURE_I8MM
9937 | AARCH64_FEATURE_MEMTAG
9938 | AARCH64_FEATURE_SVE2_BITPERM),
9939 "Cortex-A710"},
9940 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9941 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9942 | AARCH64_FEATURE_DOTPROD
9943 | AARCH64_FEATURE_PROFILE),
9944 "Ares"},
9945 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9946 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9947 "Samsung Exynos M1"},
9948 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9949 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9950 | AARCH64_FEATURE_RDMA),
9951 "Qualcomm Falkor"},
9952 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9953 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9954 | AARCH64_FEATURE_DOTPROD
9955 | AARCH64_FEATURE_SSBS),
9956 "Neoverse E1"},
9957 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9958 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9959 | AARCH64_FEATURE_DOTPROD
9960 | AARCH64_FEATURE_PROFILE),
9961 "Neoverse N1"},
9962 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9963 AARCH64_FEATURE_BFLOAT16
9964 | AARCH64_FEATURE_I8MM
9965 | AARCH64_FEATURE_F16
9966 | AARCH64_FEATURE_SVE
9967 | AARCH64_FEATURE_SVE2
9968 | AARCH64_FEATURE_SVE2_BITPERM
9969 | AARCH64_FEATURE_MEMTAG
9970 | AARCH64_FEATURE_RNG),
9971 "Neoverse N2"},
9972 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9973 AARCH64_FEATURE_PROFILE
9974 | AARCH64_FEATURE_CVADP
9975 | AARCH64_FEATURE_SVE
9976 | AARCH64_FEATURE_SSBS
9977 | AARCH64_FEATURE_RNG
9978 | AARCH64_FEATURE_F16
9979 | AARCH64_FEATURE_BFLOAT16
9980 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9981 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9982 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9983 | AARCH64_FEATURE_RDMA),
9984 "Qualcomm QDF24XX"},
9985 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9986 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9987 "Qualcomm Saphira"},
9988 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9989 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9990 "Cavium ThunderX"},
9991 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9992 AARCH64_FEATURE_CRYPTO),
9993 "Broadcom Vulcan"},
9994 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9995 in earlier releases and is superseded by 'xgene1' in all
9996 tools. */
9997 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9998 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9999 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
10000 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
10001 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
10002 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10003 AARCH64_FEATURE_F16
10004 | AARCH64_FEATURE_RCPC
10005 | AARCH64_FEATURE_DOTPROD
10006 | AARCH64_FEATURE_SSBS
10007 | AARCH64_FEATURE_PROFILE),
10008 "Cortex-X1"},
10009 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
10010 AARCH64_FEATURE_BFLOAT16
10011 | AARCH64_FEATURE_I8MM
10012 | AARCH64_FEATURE_MEMTAG
10013 | AARCH64_FEATURE_SVE2_BITPERM),
10014 "Cortex-X2"},
10015 {"generic", AARCH64_ARCH_V8, NULL},
10016
10017 {NULL, AARCH64_ARCH_NONE, NULL}
10018 };
10019
10020 struct aarch64_arch_option_table
10021 {
10022 const char *name;
10023 const aarch64_feature_set value;
10024 };
10025
10026 /* This list should, at a minimum, contain all the architecture names
10027 recognized by GCC. */
10028 static const struct aarch64_arch_option_table aarch64_archs[] = {
10029 {"all", AARCH64_ANY},
10030 {"armv8-a", AARCH64_ARCH_V8},
10031 {"armv8.1-a", AARCH64_ARCH_V8_1},
10032 {"armv8.2-a", AARCH64_ARCH_V8_2},
10033 {"armv8.3-a", AARCH64_ARCH_V8_3},
10034 {"armv8.4-a", AARCH64_ARCH_V8_4},
10035 {"armv8.5-a", AARCH64_ARCH_V8_5},
10036 {"armv8.6-a", AARCH64_ARCH_V8_6},
10037 {"armv8.7-a", AARCH64_ARCH_V8_7},
10038 {"armv8.8-a", AARCH64_ARCH_V8_8},
10039 {"armv8-r", AARCH64_ARCH_V8_R},
10040 {"armv9-a", AARCH64_ARCH_V9},
10041 {"armv9.1-a", AARCH64_ARCH_V9_1},
10042 {"armv9.2-a", AARCH64_ARCH_V9_2},
10043 {"armv9.3-a", AARCH64_ARCH_V9_3},
10044 {NULL, AARCH64_ARCH_NONE}
10045 };
10046
10047 /* ISA extensions. */
10048 struct aarch64_option_cpu_value_table
10049 {
10050 const char *name;
10051 const aarch64_feature_set value;
10052 const aarch64_feature_set require; /* Feature dependencies. */
10053 };
10054
10055 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10056 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
10057 AARCH64_ARCH_NONE},
10058 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
10059 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10060 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
10061 AARCH64_ARCH_NONE},
10062 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
10063 AARCH64_ARCH_NONE},
10064 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
10065 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10066 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
10067 AARCH64_ARCH_NONE},
10068 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
10069 AARCH64_ARCH_NONE},
10070 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
10071 AARCH64_ARCH_NONE},
10072 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
10073 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10074 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
10075 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10076 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
10077 AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
10078 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
10079 AARCH64_ARCH_NONE},
10080 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
10081 AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
10082 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
10083 AARCH64_ARCH_NONE},
10084 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
10085 AARCH64_FEATURE (AARCH64_FEATURE_F16
10086 | AARCH64_FEATURE_SIMD, 0)},
10087 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
10088 AARCH64_ARCH_NONE},
10089 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
10090 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10091 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
10092 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10093 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
10094 AARCH64_ARCH_NONE},
10095 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
10096 AARCH64_ARCH_NONE},
10097 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
10098 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10099 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
10100 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10101 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
10102 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
10103 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
10104 AARCH64_ARCH_NONE},
10105 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
10106 AARCH64_ARCH_NONE},
10107 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10108 AARCH64_ARCH_NONE},
10109 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10110 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10111 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10112 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10113 | AARCH64_FEATURE_SM4, 0)},
10114 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10115 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10116 | AARCH64_FEATURE_AES, 0)},
10117 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10118 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10119 | AARCH64_FEATURE_SHA3, 0)},
10120 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10121 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10122 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10123 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10124 | AARCH64_FEATURE_BFLOAT16, 0)},
10125 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10126 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10127 {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10128 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10129 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10130 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10131 {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10132 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10133 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10134 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10135 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10136 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10137 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10138 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10139 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10140 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10141 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10142 AARCH64_ARCH_NONE},
10143 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10144 AARCH64_ARCH_NONE},
10145 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10146 AARCH64_ARCH_NONE},
10147 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10148 AARCH64_ARCH_NONE},
10149 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10150 AARCH64_ARCH_NONE},
10151 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
10152 AARCH64_ARCH_NONE},
10153 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10154 };
10155
10156 struct aarch64_long_option_table
10157 {
10158 const char *option; /* Substring to match. */
10159 const char *help; /* Help information. */
10160 int (*func) (const char *subopt); /* Function to decode sub-option. */
10161 char *deprecated; /* If non-null, print this message. */
10162 };
10163
10164 /* Transitive closure of features depending on set. */
10165 static aarch64_feature_set
10166 aarch64_feature_disable_set (aarch64_feature_set set)
10167 {
10168 const struct aarch64_option_cpu_value_table *opt;
10169 aarch64_feature_set prev = 0;
10170
10171 while (prev != set) {
10172 prev = set;
10173 for (opt = aarch64_features; opt->name != NULL; opt++)
10174 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10175 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10176 }
10177 return set;
10178 }
10179
10180 /* Transitive closure of dependencies of set. */
10181 static aarch64_feature_set
10182 aarch64_feature_enable_set (aarch64_feature_set set)
10183 {
10184 const struct aarch64_option_cpu_value_table *opt;
10185 aarch64_feature_set prev = 0;
10186
10187 while (prev != set) {
10188 prev = set;
10189 for (opt = aarch64_features; opt->name != NULL; opt++)
10190 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10191 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10192 }
10193 return set;
10194 }
10195
10196 static int
10197 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10198 bool ext_only)
10199 {
10200 /* We insist on extensions being added before being removed. We achieve
10201 this by using the ADDING_VALUE variable to indicate whether we are
10202 adding an extension (1) or removing it (0) and only allowing it to
10203 change in the order -1 -> 1 -> 0. */
10204 int adding_value = -1;
10205 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10206
10207 /* Copy the feature set, so that we can modify it. */
10208 *ext_set = **opt_p;
10209 *opt_p = ext_set;
10210
10211 while (str != NULL && *str != 0)
10212 {
10213 const struct aarch64_option_cpu_value_table *opt;
10214 const char *ext = NULL;
10215 int optlen;
10216
10217 if (!ext_only)
10218 {
10219 if (*str != '+')
10220 {
10221 as_bad (_("invalid architectural extension"));
10222 return 0;
10223 }
10224
10225 ext = strchr (++str, '+');
10226 }
10227
10228 if (ext != NULL)
10229 optlen = ext - str;
10230 else
10231 optlen = strlen (str);
10232
10233 if (optlen >= 2 && startswith (str, "no"))
10234 {
10235 if (adding_value != 0)
10236 adding_value = 0;
10237 optlen -= 2;
10238 str += 2;
10239 }
10240 else if (optlen > 0)
10241 {
10242 if (adding_value == -1)
10243 adding_value = 1;
10244 else if (adding_value != 1)
10245 {
10246 as_bad (_("must specify extensions to add before specifying "
10247 "those to remove"));
10248 return false;
10249 }
10250 }
10251
10252 if (optlen == 0)
10253 {
10254 as_bad (_("missing architectural extension"));
10255 return 0;
10256 }
10257
10258 gas_assert (adding_value != -1);
10259
10260 for (opt = aarch64_features; opt->name != NULL; opt++)
10261 if (strncmp (opt->name, str, optlen) == 0)
10262 {
10263 aarch64_feature_set set;
10264
10265 /* Add or remove the extension. */
10266 if (adding_value)
10267 {
10268 set = aarch64_feature_enable_set (opt->value);
10269 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10270 }
10271 else
10272 {
10273 set = aarch64_feature_disable_set (opt->value);
10274 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10275 }
10276 break;
10277 }
10278
10279 if (opt->name == NULL)
10280 {
10281 as_bad (_("unknown architectural extension `%s'"), str);
10282 return 0;
10283 }
10284
10285 str = ext;
10286 };
10287
10288 return 1;
10289 }
10290
10291 static int
10292 aarch64_parse_cpu (const char *str)
10293 {
10294 const struct aarch64_cpu_option_table *opt;
10295 const char *ext = strchr (str, '+');
10296 size_t optlen;
10297
10298 if (ext != NULL)
10299 optlen = ext - str;
10300 else
10301 optlen = strlen (str);
10302
10303 if (optlen == 0)
10304 {
10305 as_bad (_("missing cpu name `%s'"), str);
10306 return 0;
10307 }
10308
10309 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10310 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10311 {
10312 mcpu_cpu_opt = &opt->value;
10313 if (ext != NULL)
10314 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10315
10316 return 1;
10317 }
10318
10319 as_bad (_("unknown cpu `%s'"), str);
10320 return 0;
10321 }
10322
10323 static int
10324 aarch64_parse_arch (const char *str)
10325 {
10326 const struct aarch64_arch_option_table *opt;
10327 const char *ext = strchr (str, '+');
10328 size_t optlen;
10329
10330 if (ext != NULL)
10331 optlen = ext - str;
10332 else
10333 optlen = strlen (str);
10334
10335 if (optlen == 0)
10336 {
10337 as_bad (_("missing architecture name `%s'"), str);
10338 return 0;
10339 }
10340
10341 for (opt = aarch64_archs; opt->name != NULL; opt++)
10342 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10343 {
10344 march_cpu_opt = &opt->value;
10345 if (ext != NULL)
10346 return aarch64_parse_features (ext, &march_cpu_opt, false);
10347
10348 return 1;
10349 }
10350
10351 as_bad (_("unknown architecture `%s'\n"), str);
10352 return 0;
10353 }
10354
10355 /* ABIs. */
10356 struct aarch64_option_abi_value_table
10357 {
10358 const char *name;
10359 enum aarch64_abi_type value;
10360 };
10361
10362 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10363 #ifdef OBJ_ELF
10364 {"ilp32", AARCH64_ABI_ILP32},
10365 {"lp64", AARCH64_ABI_LP64},
10366 #else
10367 {"llp64", AARCH64_ABI_LLP64},
10368 #endif
10369 };
10370
10371 static int
10372 aarch64_parse_abi (const char *str)
10373 {
10374 unsigned int i;
10375
10376 if (str[0] == '\0')
10377 {
10378 as_bad (_("missing abi name `%s'"), str);
10379 return 0;
10380 }
10381
10382 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10383 if (strcmp (str, aarch64_abis[i].name) == 0)
10384 {
10385 aarch64_abi = aarch64_abis[i].value;
10386 return 1;
10387 }
10388
10389 as_bad (_("unknown abi `%s'\n"), str);
10390 return 0;
10391 }
10392
10393 static struct aarch64_long_option_table aarch64_long_opts[] = {
10394 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10395 aarch64_parse_abi, NULL},
10396 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10397 aarch64_parse_cpu, NULL},
10398 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10399 aarch64_parse_arch, NULL},
10400 {NULL, NULL, 0, NULL}
10401 };
10402
10403 int
10404 md_parse_option (int c, const char *arg)
10405 {
10406 struct aarch64_option_table *opt;
10407 struct aarch64_long_option_table *lopt;
10408
10409 switch (c)
10410 {
10411 #ifdef OPTION_EB
10412 case OPTION_EB:
10413 target_big_endian = 1;
10414 break;
10415 #endif
10416
10417 #ifdef OPTION_EL
10418 case OPTION_EL:
10419 target_big_endian = 0;
10420 break;
10421 #endif
10422
10423 case 'a':
10424 /* Listing option. Just ignore these, we don't support additional
10425 ones. */
10426 return 0;
10427
10428 default:
10429 for (opt = aarch64_opts; opt->option != NULL; opt++)
10430 {
10431 if (c == opt->option[0]
10432 && ((arg == NULL && opt->option[1] == 0)
10433 || streq (arg, opt->option + 1)))
10434 {
10435 /* If the option is deprecated, tell the user. */
10436 if (opt->deprecated != NULL)
10437 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10438 arg ? arg : "", _(opt->deprecated));
10439
10440 if (opt->var != NULL)
10441 *opt->var = opt->value;
10442
10443 return 1;
10444 }
10445 }
10446
10447 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10448 {
10449 /* These options are expected to have an argument. */
10450 if (c == lopt->option[0]
10451 && arg != NULL
10452 && startswith (arg, lopt->option + 1))
10453 {
10454 /* If the option is deprecated, tell the user. */
10455 if (lopt->deprecated != NULL)
10456 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10457 _(lopt->deprecated));
10458
10459 /* Call the sup-option parser. */
10460 return lopt->func (arg + strlen (lopt->option) - 1);
10461 }
10462 }
10463
10464 return 0;
10465 }
10466
10467 return 1;
10468 }
10469
10470 void
10471 md_show_usage (FILE * fp)
10472 {
10473 struct aarch64_option_table *opt;
10474 struct aarch64_long_option_table *lopt;
10475
10476 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10477
10478 for (opt = aarch64_opts; opt->option != NULL; opt++)
10479 if (opt->help != NULL)
10480 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10481
10482 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10483 if (lopt->help != NULL)
10484 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10485
10486 #ifdef OPTION_EB
10487 fprintf (fp, _("\
10488 -EB assemble code for a big-endian cpu\n"));
10489 #endif
10490
10491 #ifdef OPTION_EL
10492 fprintf (fp, _("\
10493 -EL assemble code for a little-endian cpu\n"));
10494 #endif
10495 }
10496
10497 /* Parse a .cpu directive. */
10498
10499 static void
10500 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10501 {
10502 const struct aarch64_cpu_option_table *opt;
10503 char saved_char;
10504 char *name;
10505 char *ext;
10506 size_t optlen;
10507
10508 name = input_line_pointer;
10509 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10510 saved_char = *input_line_pointer;
10511 *input_line_pointer = 0;
10512
10513 ext = strchr (name, '+');
10514
10515 if (ext != NULL)
10516 optlen = ext - name;
10517 else
10518 optlen = strlen (name);
10519
10520 /* Skip the first "all" entry. */
10521 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10522 if (strlen (opt->name) == optlen
10523 && strncmp (name, opt->name, optlen) == 0)
10524 {
10525 mcpu_cpu_opt = &opt->value;
10526 if (ext != NULL)
10527 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10528 return;
10529
10530 cpu_variant = *mcpu_cpu_opt;
10531
10532 *input_line_pointer = saved_char;
10533 demand_empty_rest_of_line ();
10534 return;
10535 }
10536 as_bad (_("unknown cpu `%s'"), name);
10537 *input_line_pointer = saved_char;
10538 ignore_rest_of_line ();
10539 }
10540
10541
10542 /* Parse a .arch directive. */
10543
10544 static void
10545 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10546 {
10547 const struct aarch64_arch_option_table *opt;
10548 char saved_char;
10549 char *name;
10550 char *ext;
10551 size_t optlen;
10552
10553 name = input_line_pointer;
10554 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10555 saved_char = *input_line_pointer;
10556 *input_line_pointer = 0;
10557
10558 ext = strchr (name, '+');
10559
10560 if (ext != NULL)
10561 optlen = ext - name;
10562 else
10563 optlen = strlen (name);
10564
10565 /* Skip the first "all" entry. */
10566 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10567 if (strlen (opt->name) == optlen
10568 && strncmp (name, opt->name, optlen) == 0)
10569 {
10570 mcpu_cpu_opt = &opt->value;
10571 if (ext != NULL)
10572 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10573 return;
10574
10575 cpu_variant = *mcpu_cpu_opt;
10576
10577 *input_line_pointer = saved_char;
10578 demand_empty_rest_of_line ();
10579 return;
10580 }
10581
10582 as_bad (_("unknown architecture `%s'\n"), name);
10583 *input_line_pointer = saved_char;
10584 ignore_rest_of_line ();
10585 }
10586
10587 /* Parse a .arch_extension directive. */
10588
10589 static void
10590 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10591 {
10592 char saved_char;
10593 char *ext = input_line_pointer;
10594
10595 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10596 saved_char = *input_line_pointer;
10597 *input_line_pointer = 0;
10598
10599 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10600 return;
10601
10602 cpu_variant = *mcpu_cpu_opt;
10603
10604 *input_line_pointer = saved_char;
10605 demand_empty_rest_of_line ();
10606 }
10607
10608 /* Copy symbol information. */
10609
10610 void
10611 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10612 {
10613 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10614 }
10615
10616 #ifdef OBJ_ELF
10617 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10618 This is needed so AArch64 specific st_other values can be independently
10619 specified for an IFUNC resolver (that is called by the dynamic linker)
10620 and the symbol it resolves (aliased to the resolver). In particular,
10621 if a function symbol has special st_other value set via directives,
10622 then attaching an IFUNC resolver to that symbol should not override
10623 the st_other setting. Requiring the directive on the IFUNC resolver
10624 symbol would be unexpected and problematic in C code, where the two
10625 symbols appear as two independent function declarations. */
10626
10627 void
10628 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10629 {
10630 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10631 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10632 /* If size is unset, copy size from src. Because we don't track whether
10633 .size has been used, we can't differentiate .size dest, 0 from the case
10634 where dest's size is unset. */
10635 if (!destelf->size && S_GET_SIZE (dest) == 0)
10636 {
10637 if (srcelf->size)
10638 {
10639 destelf->size = XNEW (expressionS);
10640 *destelf->size = *srcelf->size;
10641 }
10642 S_SET_SIZE (dest, S_GET_SIZE (src));
10643 }
10644 }
10645 #endif