ad070cd06182944f04c55867956e6810933adba2
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame unwind info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* SME horizontal or vertical slice indicator, encoded in "V".
118 Values:
119 0 - Horizontal
120 1 - vertical
121 */
122 enum sme_hv_slice
123 {
124 HV_horizontal = 0,
125 HV_vertical = 1
126 };
127
128 /* Bits for DEFINED field in vector_type_el. */
129 #define NTA_HASTYPE 1
130 #define NTA_HASINDEX 2
131 #define NTA_HASVARWIDTH 4
132
133 struct vector_type_el
134 {
135 enum vector_el_type type;
136 unsigned char defined;
137 unsigned width;
138 int64_t index;
139 };
140
141 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
142
143 struct reloc
144 {
145 bfd_reloc_code_real_type type;
146 expressionS exp;
147 int pc_rel;
148 enum aarch64_opnd opnd;
149 uint32_t flags;
150 unsigned need_libopcodes_p : 1;
151 };
152
153 struct aarch64_instruction
154 {
155 /* libopcodes structure for instruction intermediate representation. */
156 aarch64_inst base;
157 /* Record assembly errors found during the parsing. */
158 struct
159 {
160 enum aarch64_operand_error_kind kind;
161 const char *error;
162 } parsing_error;
163 /* The condition that appears in the assembly line. */
164 int cond;
165 /* Relocation information (including the GAS internal fixup). */
166 struct reloc reloc;
167 /* Need to generate an immediate in the literal pool. */
168 unsigned gen_lit_pool : 1;
169 };
170
171 typedef struct aarch64_instruction aarch64_instruction;
172
173 static aarch64_instruction inst;
174
175 static bool parse_operands (char *, const aarch64_opcode *);
176 static bool programmer_friendly_fixup (aarch64_instruction *);
177
178 /* Diagnostics inline function utilities.
179
180 These are lightweight utilities which should only be called by parse_operands
181 and other parsers. GAS processes each assembly line by parsing it against
182 instruction template(s), in the case of multiple templates (for the same
183 mnemonic name), those templates are tried one by one until one succeeds or
184 all fail. An assembly line may fail a few templates before being
185 successfully parsed; an error saved here in most cases is not a user error
186 but an error indicating the current template is not the right template.
187 Therefore it is very important that errors can be saved at a low cost during
188 the parsing; we don't want to slow down the whole parsing by recording
189 non-user errors in detail.
190
191 Remember that the objective is to help GAS pick up the most appropriate
192 error message in the case of multiple templates, e.g. FMOV which has 8
193 templates. */
194
195 static inline void
196 clear_error (void)
197 {
198 inst.parsing_error.kind = AARCH64_OPDE_NIL;
199 inst.parsing_error.error = NULL;
200 }
201
202 static inline bool
203 error_p (void)
204 {
205 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
206 }
207
208 static inline const char *
209 get_error_message (void)
210 {
211 return inst.parsing_error.error;
212 }
213
214 static inline enum aarch64_operand_error_kind
215 get_error_kind (void)
216 {
217 return inst.parsing_error.kind;
218 }
219
220 static inline void
221 set_error (enum aarch64_operand_error_kind kind, const char *error)
222 {
223 inst.parsing_error.kind = kind;
224 inst.parsing_error.error = error;
225 }
226
227 static inline void
228 set_recoverable_error (const char *error)
229 {
230 set_error (AARCH64_OPDE_RECOVERABLE, error);
231 }
232
233 /* Use the DESC field of the corresponding aarch64_operand entry to compose
234 the error message. */
235 static inline void
236 set_default_error (void)
237 {
238 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
239 }
240
241 static inline void
242 set_syntax_error (const char *error)
243 {
244 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
245 }
246
247 static inline void
248 set_first_syntax_error (const char *error)
249 {
250 if (! error_p ())
251 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
252 }
253
254 static inline void
255 set_fatal_syntax_error (const char *error)
256 {
257 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
258 }
259 \f
260 /* Return value for certain parsers when the parsing fails; those parsers
261 return the information of the parsed result, e.g. register number, on
262 success. */
263 #define PARSE_FAIL -1
264
265 /* This is an invalid condition code that means no conditional field is
266 present. */
267 #define COND_ALWAYS 0x10
268
269 typedef struct
270 {
271 const char *template;
272 uint32_t value;
273 } asm_nzcv;
274
275 struct reloc_entry
276 {
277 char *name;
278 bfd_reloc_code_real_type reloc;
279 };
280
281 /* Macros to define the register types and masks for the purpose
282 of parsing. */
283
284 #undef AARCH64_REG_TYPES
285 #define AARCH64_REG_TYPES \
286 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
287 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
288 BASIC_REG_TYPE(SP_32) /* wsp */ \
289 BASIC_REG_TYPE(SP_64) /* sp */ \
290 BASIC_REG_TYPE(Z_32) /* wzr */ \
291 BASIC_REG_TYPE(Z_64) /* xzr */ \
292 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
293 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
294 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
295 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
296 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
297 BASIC_REG_TYPE(VN) /* v[0-31] */ \
298 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
299 BASIC_REG_TYPE(PN) /* p[0-15] */ \
300 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
301 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
302 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
303 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
304 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
305 /* Typecheck: same, plus SVE registers. */ \
306 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
307 | REG_TYPE(ZN)) \
308 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
309 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
310 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
311 /* Typecheck: same, plus SVE registers. */ \
312 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
314 | REG_TYPE(ZN)) \
315 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
316 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
317 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
318 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
319 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
320 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
321 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
322 /* Typecheck: any [BHSDQ]P FP. */ \
323 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
324 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
325 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
326 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
327 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
328 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
329 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
330 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
331 be used for SVE instructions, since Zn and Pn are valid symbols \
332 in other contexts. */ \
333 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
334 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
335 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
336 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
337 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
338 | REG_TYPE(ZN) | REG_TYPE(PN)) \
339 /* Any integer register; used for error messages only. */ \
340 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
341 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
342 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
343 /* Pseudo type to mark the end of the enumerator sequence. */ \
344 BASIC_REG_TYPE(MAX)
345
346 #undef BASIC_REG_TYPE
347 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
348 #undef MULTI_REG_TYPE
349 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
350
351 /* Register type enumerators. */
352 typedef enum aarch64_reg_type_
353 {
354 /* A list of REG_TYPE_*. */
355 AARCH64_REG_TYPES
356 } aarch64_reg_type;
357
358 #undef BASIC_REG_TYPE
359 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
360 #undef REG_TYPE
361 #define REG_TYPE(T) (1 << REG_TYPE_##T)
362 #undef MULTI_REG_TYPE
363 #define MULTI_REG_TYPE(T,V) V,
364
365 /* Structure for a hash table entry for a register. */
366 typedef struct
367 {
368 const char *name;
369 unsigned char number;
370 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
371 unsigned char builtin;
372 } reg_entry;
373
374 /* Values indexed by aarch64_reg_type to assist the type checking. */
375 static const unsigned reg_type_masks[] =
376 {
377 AARCH64_REG_TYPES
378 };
379
380 #undef BASIC_REG_TYPE
381 #undef REG_TYPE
382 #undef MULTI_REG_TYPE
383 #undef AARCH64_REG_TYPES
384
385 /* Diagnostics used when we don't get a register of the expected type.
386 Note: this has to synchronized with aarch64_reg_type definitions
387 above. */
388 static const char *
389 get_reg_expected_msg (aarch64_reg_type reg_type)
390 {
391 const char *msg;
392
393 switch (reg_type)
394 {
395 case REG_TYPE_R_32:
396 msg = N_("integer 32-bit register expected");
397 break;
398 case REG_TYPE_R_64:
399 msg = N_("integer 64-bit register expected");
400 break;
401 case REG_TYPE_R_N:
402 msg = N_("integer register expected");
403 break;
404 case REG_TYPE_R64_SP:
405 msg = N_("64-bit integer or SP register expected");
406 break;
407 case REG_TYPE_SVE_BASE:
408 msg = N_("base register expected");
409 break;
410 case REG_TYPE_R_Z:
411 msg = N_("integer or zero register expected");
412 break;
413 case REG_TYPE_SVE_OFFSET:
414 msg = N_("offset register expected");
415 break;
416 case REG_TYPE_R_SP:
417 msg = N_("integer or SP register expected");
418 break;
419 case REG_TYPE_R_Z_SP:
420 msg = N_("integer, zero or SP register expected");
421 break;
422 case REG_TYPE_FP_B:
423 msg = N_("8-bit SIMD scalar register expected");
424 break;
425 case REG_TYPE_FP_H:
426 msg = N_("16-bit SIMD scalar or floating-point half precision "
427 "register expected");
428 break;
429 case REG_TYPE_FP_S:
430 msg = N_("32-bit SIMD scalar or floating-point single precision "
431 "register expected");
432 break;
433 case REG_TYPE_FP_D:
434 msg = N_("64-bit SIMD scalar or floating-point double precision "
435 "register expected");
436 break;
437 case REG_TYPE_FP_Q:
438 msg = N_("128-bit SIMD scalar or floating-point quad precision "
439 "register expected");
440 break;
441 case REG_TYPE_R_Z_BHSDQ_V:
442 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
443 msg = N_("register expected");
444 break;
445 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
446 msg = N_("SIMD scalar or floating-point register expected");
447 break;
448 case REG_TYPE_VN: /* any V reg */
449 msg = N_("vector register expected");
450 break;
451 case REG_TYPE_ZN:
452 msg = N_("SVE vector register expected");
453 break;
454 case REG_TYPE_PN:
455 msg = N_("SVE predicate register expected");
456 break;
457 default:
458 as_fatal (_("invalid register type %d"), reg_type);
459 }
460 return msg;
461 }
462
463 /* Some well known registers that we refer to directly elsewhere. */
464 #define REG_SP 31
465 #define REG_ZR 31
466
467 /* Instructions take 4 bytes in the object file. */
468 #define INSN_SIZE 4
469
470 static htab_t aarch64_ops_hsh;
471 static htab_t aarch64_cond_hsh;
472 static htab_t aarch64_shift_hsh;
473 static htab_t aarch64_sys_regs_hsh;
474 static htab_t aarch64_pstatefield_hsh;
475 static htab_t aarch64_sys_regs_ic_hsh;
476 static htab_t aarch64_sys_regs_dc_hsh;
477 static htab_t aarch64_sys_regs_at_hsh;
478 static htab_t aarch64_sys_regs_tlbi_hsh;
479 static htab_t aarch64_sys_regs_sr_hsh;
480 static htab_t aarch64_reg_hsh;
481 static htab_t aarch64_barrier_opt_hsh;
482 static htab_t aarch64_nzcv_hsh;
483 static htab_t aarch64_pldop_hsh;
484 static htab_t aarch64_hint_opt_hsh;
485
486 /* Stuff needed to resolve the label ambiguity
487 As:
488 ...
489 label: <insn>
490 may differ from:
491 ...
492 label:
493 <insn> */
494
495 static symbolS *last_label_seen;
496
497 /* Literal pool structure. Held on a per-section
498 and per-sub-section basis. */
499
500 #define MAX_LITERAL_POOL_SIZE 1024
501 typedef struct literal_expression
502 {
503 expressionS exp;
504 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
505 LITTLENUM_TYPE * bignum;
506 } literal_expression;
507
508 typedef struct literal_pool
509 {
510 literal_expression literals[MAX_LITERAL_POOL_SIZE];
511 unsigned int next_free_entry;
512 unsigned int id;
513 symbolS *symbol;
514 segT section;
515 subsegT sub_section;
516 int size;
517 struct literal_pool *next;
518 } literal_pool;
519
520 /* Pointer to a linked list of literal pools. */
521 static literal_pool *list_of_pools = NULL;
522 \f
523 /* Pure syntax. */
524
525 /* This array holds the chars that always start a comment. If the
526 pre-processor is disabled, these aren't very useful. */
527 const char comment_chars[] = "";
528
529 /* This array holds the chars that only start a comment at the beginning of
530 a line. If the line seems to have the form '# 123 filename'
531 .line and .file directives will appear in the pre-processed output. */
532 /* Note that input_file.c hand checks for '#' at the beginning of the
533 first line of the input file. This is because the compiler outputs
534 #NO_APP at the beginning of its output. */
535 /* Also note that comments like this one will always work. */
536 const char line_comment_chars[] = "#";
537
538 const char line_separator_chars[] = ";";
539
540 /* Chars that can be used to separate mant
541 from exp in floating point numbers. */
542 const char EXP_CHARS[] = "eE";
543
544 /* Chars that mean this number is a floating point constant. */
545 /* As in 0f12.456 */
546 /* or 0d1.2345e12 */
547
548 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
549
550 /* Prefix character that indicates the start of an immediate value. */
551 #define is_immediate_prefix(C) ((C) == '#')
552
553 /* Separator character handling. */
554
555 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
556
557 static inline bool
558 skip_past_char (char **str, char c)
559 {
560 if (**str == c)
561 {
562 (*str)++;
563 return true;
564 }
565 else
566 return false;
567 }
568
569 #define skip_past_comma(str) skip_past_char (str, ',')
570
571 /* Arithmetic expressions (possibly involving symbols). */
572
573 static bool in_aarch64_get_expression = false;
574
575 /* Third argument to aarch64_get_expression. */
576 #define GE_NO_PREFIX false
577 #define GE_OPT_PREFIX true
578
579 /* Fourth argument to aarch64_get_expression. */
580 #define ALLOW_ABSENT false
581 #define REJECT_ABSENT true
582
583 /* Return TRUE if the string pointed by *STR is successfully parsed
584 as an valid expression; *EP will be filled with the information of
585 such an expression. Otherwise return FALSE.
586
587 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
588 If REJECT_ABSENT is true then trat missing expressions as an error. */
589
590 static bool
591 aarch64_get_expression (expressionS * ep,
592 char ** str,
593 bool allow_immediate_prefix,
594 bool reject_absent)
595 {
596 char *save_in;
597 segT seg;
598 bool prefix_present = false;
599
600 if (allow_immediate_prefix)
601 {
602 if (is_immediate_prefix (**str))
603 {
604 (*str)++;
605 prefix_present = true;
606 }
607 }
608
609 memset (ep, 0, sizeof (expressionS));
610
611 save_in = input_line_pointer;
612 input_line_pointer = *str;
613 in_aarch64_get_expression = true;
614 seg = expression (ep);
615 in_aarch64_get_expression = false;
616
617 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
618 {
619 /* We found a bad expression in md_operand(). */
620 *str = input_line_pointer;
621 input_line_pointer = save_in;
622 if (prefix_present && ! error_p ())
623 set_fatal_syntax_error (_("bad expression"));
624 else
625 set_first_syntax_error (_("bad expression"));
626 return false;
627 }
628
629 #ifdef OBJ_AOUT
630 if (seg != absolute_section
631 && seg != text_section
632 && seg != data_section
633 && seg != bss_section
634 && seg != undefined_section)
635 {
636 set_syntax_error (_("bad segment"));
637 *str = input_line_pointer;
638 input_line_pointer = save_in;
639 return false;
640 }
641 #else
642 (void) seg;
643 #endif
644
645 *str = input_line_pointer;
646 input_line_pointer = save_in;
647 return true;
648 }
649
650 /* Turn a string in input_line_pointer into a floating point constant
651 of type TYPE, and store the appropriate bytes in *LITP. The number
652 of LITTLENUMS emitted is stored in *SIZEP. An error message is
653 returned, or NULL on OK. */
654
655 const char *
656 md_atof (int type, char *litP, int *sizeP)
657 {
658 return ieee_md_atof (type, litP, sizeP, target_big_endian);
659 }
660
661 /* We handle all bad expressions here, so that we can report the faulty
662 instruction in the error message. */
663 void
664 md_operand (expressionS * exp)
665 {
666 if (in_aarch64_get_expression)
667 exp->X_op = O_illegal;
668 }
669
670 /* Immediate values. */
671
672 /* Errors may be set multiple times during parsing or bit encoding
673 (particularly in the Neon bits), but usually the earliest error which is set
674 will be the most meaningful. Avoid overwriting it with later (cascading)
675 errors by calling this function. */
676
677 static void
678 first_error (const char *error)
679 {
680 if (! error_p ())
681 set_syntax_error (error);
682 }
683
684 /* Similar to first_error, but this function accepts formatted error
685 message. */
686 static void
687 first_error_fmt (const char *format, ...)
688 {
689 va_list args;
690 enum
691 { size = 100 };
692 /* N.B. this single buffer will not cause error messages for different
693 instructions to pollute each other; this is because at the end of
694 processing of each assembly line, error message if any will be
695 collected by as_bad. */
696 static char buffer[size];
697
698 if (! error_p ())
699 {
700 int ret ATTRIBUTE_UNUSED;
701 va_start (args, format);
702 ret = vsnprintf (buffer, size, format, args);
703 know (ret <= size - 1 && ret >= 0);
704 va_end (args);
705 set_syntax_error (buffer);
706 }
707 }
708
709 /* Register parsing. */
710
711 /* Generic register parser which is called by other specialized
712 register parsers.
713 CCP points to what should be the beginning of a register name.
714 If it is indeed a valid register name, advance CCP over it and
715 return the reg_entry structure; otherwise return NULL.
716 It does not issue diagnostics. */
717
718 static reg_entry *
719 parse_reg (char **ccp)
720 {
721 char *start = *ccp;
722 char *p;
723 reg_entry *reg;
724
725 #ifdef REGISTER_PREFIX
726 if (*start != REGISTER_PREFIX)
727 return NULL;
728 start++;
729 #endif
730
731 p = start;
732 if (!ISALPHA (*p) || !is_name_beginner (*p))
733 return NULL;
734
735 do
736 p++;
737 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
738
739 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
740
741 if (!reg)
742 return NULL;
743
744 *ccp = p;
745 return reg;
746 }
747
748 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
749 return FALSE. */
750 static bool
751 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
752 {
753 return (reg_type_masks[type] & (1 << reg->type)) != 0;
754 }
755
756 /* Try to parse a base or offset register. Allow SVE base and offset
757 registers if REG_TYPE includes SVE registers. Return the register
758 entry on success, setting *QUALIFIER to the register qualifier.
759 Return null otherwise.
760
761 Note that this function does not issue any diagnostics. */
762
763 static const reg_entry *
764 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
765 aarch64_opnd_qualifier_t *qualifier)
766 {
767 char *str = *ccp;
768 const reg_entry *reg = parse_reg (&str);
769
770 if (reg == NULL)
771 return NULL;
772
773 switch (reg->type)
774 {
775 case REG_TYPE_R_32:
776 case REG_TYPE_SP_32:
777 case REG_TYPE_Z_32:
778 *qualifier = AARCH64_OPND_QLF_W;
779 break;
780
781 case REG_TYPE_R_64:
782 case REG_TYPE_SP_64:
783 case REG_TYPE_Z_64:
784 *qualifier = AARCH64_OPND_QLF_X;
785 break;
786
787 case REG_TYPE_ZN:
788 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
789 || str[0] != '.')
790 return NULL;
791 switch (TOLOWER (str[1]))
792 {
793 case 's':
794 *qualifier = AARCH64_OPND_QLF_S_S;
795 break;
796 case 'd':
797 *qualifier = AARCH64_OPND_QLF_S_D;
798 break;
799 default:
800 return NULL;
801 }
802 str += 2;
803 break;
804
805 default:
806 return NULL;
807 }
808
809 *ccp = str;
810
811 return reg;
812 }
813
814 /* Try to parse a base or offset register. Return the register entry
815 on success, setting *QUALIFIER to the register qualifier. Return null
816 otherwise.
817
818 Note that this function does not issue any diagnostics. */
819
820 static const reg_entry *
821 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
822 {
823 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
824 }
825
826 /* Parse the qualifier of a vector register or vector element of type
827 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
828 succeeds; otherwise return FALSE.
829
830 Accept only one occurrence of:
831 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
832 b h s d q */
833 static bool
834 parse_vector_type_for_operand (aarch64_reg_type reg_type,
835 struct vector_type_el *parsed_type, char **str)
836 {
837 char *ptr = *str;
838 unsigned width;
839 unsigned element_size;
840 enum vector_el_type type;
841
842 /* skip '.' */
843 gas_assert (*ptr == '.');
844 ptr++;
845
846 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
847 {
848 width = 0;
849 goto elt_size;
850 }
851 width = strtoul (ptr, &ptr, 10);
852 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
853 {
854 first_error_fmt (_("bad size %d in vector width specifier"), width);
855 return false;
856 }
857
858 elt_size:
859 switch (TOLOWER (*ptr))
860 {
861 case 'b':
862 type = NT_b;
863 element_size = 8;
864 break;
865 case 'h':
866 type = NT_h;
867 element_size = 16;
868 break;
869 case 's':
870 type = NT_s;
871 element_size = 32;
872 break;
873 case 'd':
874 type = NT_d;
875 element_size = 64;
876 break;
877 case 'q':
878 if (reg_type == REG_TYPE_ZN || width == 1)
879 {
880 type = NT_q;
881 element_size = 128;
882 break;
883 }
884 /* fall through. */
885 default:
886 if (*ptr != '\0')
887 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
888 else
889 first_error (_("missing element size"));
890 return false;
891 }
892 if (width != 0 && width * element_size != 64
893 && width * element_size != 128
894 && !(width == 2 && element_size == 16)
895 && !(width == 4 && element_size == 8))
896 {
897 first_error_fmt (_
898 ("invalid element size %d and vector size combination %c"),
899 width, *ptr);
900 return false;
901 }
902 ptr++;
903
904 parsed_type->type = type;
905 parsed_type->width = width;
906
907 *str = ptr;
908
909 return true;
910 }
911
912 /* *STR contains an SVE zero/merge predication suffix. Parse it into
913 *PARSED_TYPE and point *STR at the end of the suffix. */
914
915 static bool
916 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
917 {
918 char *ptr = *str;
919
920 /* Skip '/'. */
921 gas_assert (*ptr == '/');
922 ptr++;
923 switch (TOLOWER (*ptr))
924 {
925 case 'z':
926 parsed_type->type = NT_zero;
927 break;
928 case 'm':
929 parsed_type->type = NT_merge;
930 break;
931 default:
932 if (*ptr != '\0' && *ptr != ',')
933 first_error_fmt (_("unexpected character `%c' in predication type"),
934 *ptr);
935 else
936 first_error (_("missing predication type"));
937 return false;
938 }
939 parsed_type->width = 0;
940 *str = ptr + 1;
941 return true;
942 }
943
944 /* Parse a register of the type TYPE.
945
946 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
947 name or the parsed register is not of TYPE.
948
949 Otherwise return the register number, and optionally fill in the actual
950 type of the register in *RTYPE when multiple alternatives were given, and
951 return the register shape and element index information in *TYPEINFO.
952
953 IN_REG_LIST should be set with TRUE if the caller is parsing a register
954 list. */
955
956 static int
957 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
958 struct vector_type_el *typeinfo, bool in_reg_list)
959 {
960 char *str = *ccp;
961 const reg_entry *reg = parse_reg (&str);
962 struct vector_type_el atype;
963 struct vector_type_el parsetype;
964 bool is_typed_vecreg = false;
965
966 atype.defined = 0;
967 atype.type = NT_invtype;
968 atype.width = -1;
969 atype.index = 0;
970
971 if (reg == NULL)
972 {
973 if (typeinfo)
974 *typeinfo = atype;
975 set_default_error ();
976 return PARSE_FAIL;
977 }
978
979 if (! aarch64_check_reg_type (reg, type))
980 {
981 DEBUG_TRACE ("reg type check failed");
982 set_default_error ();
983 return PARSE_FAIL;
984 }
985 type = reg->type;
986
987 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
988 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
989 {
990 if (*str == '.')
991 {
992 if (!parse_vector_type_for_operand (type, &parsetype, &str))
993 return PARSE_FAIL;
994 }
995 else
996 {
997 if (!parse_predication_for_operand (&parsetype, &str))
998 return PARSE_FAIL;
999 }
1000
1001 /* Register if of the form Vn.[bhsdq]. */
1002 is_typed_vecreg = true;
1003
1004 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1005 {
1006 /* The width is always variable; we don't allow an integer width
1007 to be specified. */
1008 gas_assert (parsetype.width == 0);
1009 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1010 }
1011 else if (parsetype.width == 0)
1012 /* Expect index. In the new scheme we cannot have
1013 Vn.[bhsdq] represent a scalar. Therefore any
1014 Vn.[bhsdq] should have an index following it.
1015 Except in reglists of course. */
1016 atype.defined |= NTA_HASINDEX;
1017 else
1018 atype.defined |= NTA_HASTYPE;
1019
1020 atype.type = parsetype.type;
1021 atype.width = parsetype.width;
1022 }
1023
1024 if (skip_past_char (&str, '['))
1025 {
1026 expressionS exp;
1027
1028 /* Reject Sn[index] syntax. */
1029 if (!is_typed_vecreg)
1030 {
1031 first_error (_("this type of register can't be indexed"));
1032 return PARSE_FAIL;
1033 }
1034
1035 if (in_reg_list)
1036 {
1037 first_error (_("index not allowed inside register list"));
1038 return PARSE_FAIL;
1039 }
1040
1041 atype.defined |= NTA_HASINDEX;
1042
1043 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1044
1045 if (exp.X_op != O_constant)
1046 {
1047 first_error (_("constant expression required"));
1048 return PARSE_FAIL;
1049 }
1050
1051 if (! skip_past_char (&str, ']'))
1052 return PARSE_FAIL;
1053
1054 atype.index = exp.X_add_number;
1055 }
1056 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1057 {
1058 /* Indexed vector register expected. */
1059 first_error (_("indexed vector register expected"));
1060 return PARSE_FAIL;
1061 }
1062
1063 /* A vector reg Vn should be typed or indexed. */
1064 if (type == REG_TYPE_VN && atype.defined == 0)
1065 {
1066 first_error (_("invalid use of vector register"));
1067 }
1068
1069 if (typeinfo)
1070 *typeinfo = atype;
1071
1072 if (rtype)
1073 *rtype = type;
1074
1075 *ccp = str;
1076
1077 return reg->number;
1078 }
1079
1080 /* Parse register.
1081
1082 Return the register number on success; return PARSE_FAIL otherwise.
1083
1084 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1085 the register (e.g. NEON double or quad reg when either has been requested).
1086
1087 If this is a NEON vector register with additional type information, fill
1088 in the struct pointed to by VECTYPE (if non-NULL).
1089
1090 This parser does not handle register list. */
1091
1092 static int
1093 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1094 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1095 {
1096 struct vector_type_el atype;
1097 char *str = *ccp;
1098 int reg = parse_typed_reg (&str, type, rtype, &atype,
1099 /*in_reg_list= */ false);
1100
1101 if (reg == PARSE_FAIL)
1102 return PARSE_FAIL;
1103
1104 if (vectype)
1105 *vectype = atype;
1106
1107 *ccp = str;
1108
1109 return reg;
1110 }
1111
1112 static inline bool
1113 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1114 {
1115 return
1116 e1.type == e2.type
1117 && e1.defined == e2.defined
1118 && e1.width == e2.width && e1.index == e2.index;
1119 }
1120
1121 /* This function parses a list of vector registers of type TYPE.
1122 On success, it returns the parsed register list information in the
1123 following encoded format:
1124
1125 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1126 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1127
1128 The information of the register shape and/or index is returned in
1129 *VECTYPE.
1130
1131 It returns PARSE_FAIL if the register list is invalid.
1132
1133 The list contains one to four registers.
1134 Each register can be one of:
1135 <Vt>.<T>[<index>]
1136 <Vt>.<T>
1137 All <T> should be identical.
1138 All <index> should be identical.
1139 There are restrictions on <Vt> numbers which are checked later
1140 (by reg_list_valid_p). */
1141
1142 static int
1143 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1144 struct vector_type_el *vectype)
1145 {
1146 char *str = *ccp;
1147 int nb_regs;
1148 struct vector_type_el typeinfo, typeinfo_first;
1149 int val, val_range;
1150 int in_range;
1151 int ret_val;
1152 int i;
1153 bool error = false;
1154 bool expect_index = false;
1155
1156 if (*str != '{')
1157 {
1158 set_syntax_error (_("expecting {"));
1159 return PARSE_FAIL;
1160 }
1161 str++;
1162
1163 nb_regs = 0;
1164 typeinfo_first.defined = 0;
1165 typeinfo_first.type = NT_invtype;
1166 typeinfo_first.width = -1;
1167 typeinfo_first.index = 0;
1168 ret_val = 0;
1169 val = -1;
1170 val_range = -1;
1171 in_range = 0;
1172 do
1173 {
1174 if (in_range)
1175 {
1176 str++; /* skip over '-' */
1177 val_range = val;
1178 }
1179 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1180 /*in_reg_list= */ true);
1181 if (val == PARSE_FAIL)
1182 {
1183 set_first_syntax_error (_("invalid vector register in list"));
1184 error = true;
1185 continue;
1186 }
1187 /* reject [bhsd]n */
1188 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1189 {
1190 set_first_syntax_error (_("invalid scalar register in list"));
1191 error = true;
1192 continue;
1193 }
1194
1195 if (typeinfo.defined & NTA_HASINDEX)
1196 expect_index = true;
1197
1198 if (in_range)
1199 {
1200 if (val < val_range)
1201 {
1202 set_first_syntax_error
1203 (_("invalid range in vector register list"));
1204 error = true;
1205 }
1206 val_range++;
1207 }
1208 else
1209 {
1210 val_range = val;
1211 if (nb_regs == 0)
1212 typeinfo_first = typeinfo;
1213 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1214 {
1215 set_first_syntax_error
1216 (_("type mismatch in vector register list"));
1217 error = true;
1218 }
1219 }
1220 if (! error)
1221 for (i = val_range; i <= val; i++)
1222 {
1223 ret_val |= i << (5 * nb_regs);
1224 nb_regs++;
1225 }
1226 in_range = 0;
1227 }
1228 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1229
1230 skip_whitespace (str);
1231 if (*str != '}')
1232 {
1233 set_first_syntax_error (_("end of vector register list not found"));
1234 error = true;
1235 }
1236 str++;
1237
1238 skip_whitespace (str);
1239
1240 if (expect_index)
1241 {
1242 if (skip_past_char (&str, '['))
1243 {
1244 expressionS exp;
1245
1246 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1247 if (exp.X_op != O_constant)
1248 {
1249 set_first_syntax_error (_("constant expression required."));
1250 error = true;
1251 }
1252 if (! skip_past_char (&str, ']'))
1253 error = true;
1254 else
1255 typeinfo_first.index = exp.X_add_number;
1256 }
1257 else
1258 {
1259 set_first_syntax_error (_("expected index"));
1260 error = true;
1261 }
1262 }
1263
1264 if (nb_regs > 4)
1265 {
1266 set_first_syntax_error (_("too many registers in vector register list"));
1267 error = true;
1268 }
1269 else if (nb_regs == 0)
1270 {
1271 set_first_syntax_error (_("empty vector register list"));
1272 error = true;
1273 }
1274
1275 *ccp = str;
1276 if (! error)
1277 *vectype = typeinfo_first;
1278
1279 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1280 }
1281
1282 /* Directives: register aliases. */
1283
1284 static reg_entry *
1285 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1286 {
1287 reg_entry *new;
1288 const char *name;
1289
1290 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1291 {
1292 if (new->builtin)
1293 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1294 str);
1295
1296 /* Only warn about a redefinition if it's not defined as the
1297 same register. */
1298 else if (new->number != number || new->type != type)
1299 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1300
1301 return NULL;
1302 }
1303
1304 name = xstrdup (str);
1305 new = XNEW (reg_entry);
1306
1307 new->name = name;
1308 new->number = number;
1309 new->type = type;
1310 new->builtin = false;
1311
1312 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1313
1314 return new;
1315 }
1316
1317 /* Look for the .req directive. This is of the form:
1318
1319 new_register_name .req existing_register_name
1320
1321 If we find one, or if it looks sufficiently like one that we want to
1322 handle any error here, return TRUE. Otherwise return FALSE. */
1323
1324 static bool
1325 create_register_alias (char *newname, char *p)
1326 {
1327 const reg_entry *old;
1328 char *oldname, *nbuf;
1329 size_t nlen;
1330
1331 /* The input scrubber ensures that whitespace after the mnemonic is
1332 collapsed to single spaces. */
1333 oldname = p;
1334 if (!startswith (oldname, " .req "))
1335 return false;
1336
1337 oldname += 6;
1338 if (*oldname == '\0')
1339 return false;
1340
1341 old = str_hash_find (aarch64_reg_hsh, oldname);
1342 if (!old)
1343 {
1344 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1345 return true;
1346 }
1347
1348 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1349 the desired alias name, and p points to its end. If not, then
1350 the desired alias name is in the global original_case_string. */
1351 #ifdef TC_CASE_SENSITIVE
1352 nlen = p - newname;
1353 #else
1354 newname = original_case_string;
1355 nlen = strlen (newname);
1356 #endif
1357
1358 nbuf = xmemdup0 (newname, nlen);
1359
1360 /* Create aliases under the new name as stated; an all-lowercase
1361 version of the new name; and an all-uppercase version of the new
1362 name. */
1363 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1364 {
1365 for (p = nbuf; *p; p++)
1366 *p = TOUPPER (*p);
1367
1368 if (strncmp (nbuf, newname, nlen))
1369 {
1370 /* If this attempt to create an additional alias fails, do not bother
1371 trying to create the all-lower case alias. We will fail and issue
1372 a second, duplicate error message. This situation arises when the
1373 programmer does something like:
1374 foo .req r0
1375 Foo .req r1
1376 The second .req creates the "Foo" alias but then fails to create
1377 the artificial FOO alias because it has already been created by the
1378 first .req. */
1379 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1380 {
1381 free (nbuf);
1382 return true;
1383 }
1384 }
1385
1386 for (p = nbuf; *p; p++)
1387 *p = TOLOWER (*p);
1388
1389 if (strncmp (nbuf, newname, nlen))
1390 insert_reg_alias (nbuf, old->number, old->type);
1391 }
1392
1393 free (nbuf);
1394 return true;
1395 }
1396
1397 /* Should never be called, as .req goes between the alias and the
1398 register name, not at the beginning of the line. */
1399 static void
1400 s_req (int a ATTRIBUTE_UNUSED)
1401 {
1402 as_bad (_("invalid syntax for .req directive"));
1403 }
1404
1405 /* The .unreq directive deletes an alias which was previously defined
1406 by .req. For example:
1407
1408 my_alias .req r11
1409 .unreq my_alias */
1410
1411 static void
1412 s_unreq (int a ATTRIBUTE_UNUSED)
1413 {
1414 char *name;
1415 char saved_char;
1416
1417 name = input_line_pointer;
1418 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1419 saved_char = *input_line_pointer;
1420 *input_line_pointer = 0;
1421
1422 if (!*name)
1423 as_bad (_("invalid syntax for .unreq directive"));
1424 else
1425 {
1426 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1427
1428 if (!reg)
1429 as_bad (_("unknown register alias '%s'"), name);
1430 else if (reg->builtin)
1431 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1432 name);
1433 else
1434 {
1435 char *p;
1436 char *nbuf;
1437
1438 str_hash_delete (aarch64_reg_hsh, name);
1439 free ((char *) reg->name);
1440 free (reg);
1441
1442 /* Also locate the all upper case and all lower case versions.
1443 Do not complain if we cannot find one or the other as it
1444 was probably deleted above. */
1445
1446 nbuf = strdup (name);
1447 for (p = nbuf; *p; p++)
1448 *p = TOUPPER (*p);
1449 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1450 if (reg)
1451 {
1452 str_hash_delete (aarch64_reg_hsh, nbuf);
1453 free ((char *) reg->name);
1454 free (reg);
1455 }
1456
1457 for (p = nbuf; *p; p++)
1458 *p = TOLOWER (*p);
1459 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1460 if (reg)
1461 {
1462 str_hash_delete (aarch64_reg_hsh, nbuf);
1463 free ((char *) reg->name);
1464 free (reg);
1465 }
1466
1467 free (nbuf);
1468 }
1469 }
1470
1471 *input_line_pointer = saved_char;
1472 demand_empty_rest_of_line ();
1473 }
1474
1475 /* Directives: Instruction set selection. */
1476
1477 #if defined OBJ_ELF || defined OBJ_COFF
1478 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1479 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1480 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1481 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1482
1483 /* Create a new mapping symbol for the transition to STATE. */
1484
1485 static void
1486 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1487 {
1488 symbolS *symbolP;
1489 const char *symname;
1490 int type;
1491
1492 switch (state)
1493 {
1494 case MAP_DATA:
1495 symname = "$d";
1496 type = BSF_NO_FLAGS;
1497 break;
1498 case MAP_INSN:
1499 symname = "$x";
1500 type = BSF_NO_FLAGS;
1501 break;
1502 default:
1503 abort ();
1504 }
1505
1506 symbolP = symbol_new (symname, now_seg, frag, value);
1507 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1508
1509 /* Save the mapping symbols for future reference. Also check that
1510 we do not place two mapping symbols at the same offset within a
1511 frag. We'll handle overlap between frags in
1512 check_mapping_symbols.
1513
1514 If .fill or other data filling directive generates zero sized data,
1515 the mapping symbol for the following code will have the same value
1516 as the one generated for the data filling directive. In this case,
1517 we replace the old symbol with the new one at the same address. */
1518 if (value == 0)
1519 {
1520 if (frag->tc_frag_data.first_map != NULL)
1521 {
1522 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1523 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1524 &symbol_lastP);
1525 }
1526 frag->tc_frag_data.first_map = symbolP;
1527 }
1528 if (frag->tc_frag_data.last_map != NULL)
1529 {
1530 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1531 S_GET_VALUE (symbolP));
1532 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1533 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1534 &symbol_lastP);
1535 }
1536 frag->tc_frag_data.last_map = symbolP;
1537 }
1538
1539 /* We must sometimes convert a region marked as code to data during
1540 code alignment, if an odd number of bytes have to be padded. The
1541 code mapping symbol is pushed to an aligned address. */
1542
1543 static void
1544 insert_data_mapping_symbol (enum mstate state,
1545 valueT value, fragS * frag, offsetT bytes)
1546 {
1547 /* If there was already a mapping symbol, remove it. */
1548 if (frag->tc_frag_data.last_map != NULL
1549 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1550 frag->fr_address + value)
1551 {
1552 symbolS *symp = frag->tc_frag_data.last_map;
1553
1554 if (value == 0)
1555 {
1556 know (frag->tc_frag_data.first_map == symp);
1557 frag->tc_frag_data.first_map = NULL;
1558 }
1559 frag->tc_frag_data.last_map = NULL;
1560 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1561 }
1562
1563 make_mapping_symbol (MAP_DATA, value, frag);
1564 make_mapping_symbol (state, value + bytes, frag);
1565 }
1566
1567 static void mapping_state_2 (enum mstate state, int max_chars);
1568
1569 /* Set the mapping state to STATE. Only call this when about to
1570 emit some STATE bytes to the file. */
1571
1572 void
1573 mapping_state (enum mstate state)
1574 {
1575 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1576
1577 if (state == MAP_INSN)
1578 /* AArch64 instructions require 4-byte alignment. When emitting
1579 instructions into any section, record the appropriate section
1580 alignment. */
1581 record_alignment (now_seg, 2);
1582
1583 if (mapstate == state)
1584 /* The mapping symbol has already been emitted.
1585 There is nothing else to do. */
1586 return;
1587
1588 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1589 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1590 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1591 evaluated later in the next else. */
1592 return;
1593 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1594 {
1595 /* Only add the symbol if the offset is > 0:
1596 if we're at the first frag, check it's size > 0;
1597 if we're not at the first frag, then for sure
1598 the offset is > 0. */
1599 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1600 const int add_symbol = (frag_now != frag_first)
1601 || (frag_now_fix () > 0);
1602
1603 if (add_symbol)
1604 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1605 }
1606 #undef TRANSITION
1607
1608 mapping_state_2 (state, 0);
1609 }
1610
1611 /* Same as mapping_state, but MAX_CHARS bytes have already been
1612 allocated. Put the mapping symbol that far back. */
1613
1614 static void
1615 mapping_state_2 (enum mstate state, int max_chars)
1616 {
1617 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1618
1619 if (!SEG_NORMAL (now_seg))
1620 return;
1621
1622 if (mapstate == state)
1623 /* The mapping symbol has already been emitted.
1624 There is nothing else to do. */
1625 return;
1626
1627 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1628 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1629 }
1630 #else
1631 #define mapping_state(x) /* nothing */
1632 #define mapping_state_2(x, y) /* nothing */
1633 #endif
1634
1635 /* Directives: sectioning and alignment. */
1636
1637 static void
1638 s_bss (int ignore ATTRIBUTE_UNUSED)
1639 {
1640 /* We don't support putting frags in the BSS segment, we fake it by
1641 marking in_bss, then looking at s_skip for clues. */
1642 subseg_set (bss_section, 0);
1643 demand_empty_rest_of_line ();
1644 mapping_state (MAP_DATA);
1645 }
1646
1647 static void
1648 s_even (int ignore ATTRIBUTE_UNUSED)
1649 {
1650 /* Never make frag if expect extra pass. */
1651 if (!need_pass_2)
1652 frag_align (1, 0, 0);
1653
1654 record_alignment (now_seg, 1);
1655
1656 demand_empty_rest_of_line ();
1657 }
1658
1659 /* Directives: Literal pools. */
1660
1661 static literal_pool *
1662 find_literal_pool (int size)
1663 {
1664 literal_pool *pool;
1665
1666 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1667 {
1668 if (pool->section == now_seg
1669 && pool->sub_section == now_subseg && pool->size == size)
1670 break;
1671 }
1672
1673 return pool;
1674 }
1675
1676 static literal_pool *
1677 find_or_make_literal_pool (int size)
1678 {
1679 /* Next literal pool ID number. */
1680 static unsigned int latest_pool_num = 1;
1681 literal_pool *pool;
1682
1683 pool = find_literal_pool (size);
1684
1685 if (pool == NULL)
1686 {
1687 /* Create a new pool. */
1688 pool = XNEW (literal_pool);
1689 if (!pool)
1690 return NULL;
1691
1692 /* Currently we always put the literal pool in the current text
1693 section. If we were generating "small" model code where we
1694 knew that all code and initialised data was within 1MB then
1695 we could output literals to mergeable, read-only data
1696 sections. */
1697
1698 pool->next_free_entry = 0;
1699 pool->section = now_seg;
1700 pool->sub_section = now_subseg;
1701 pool->size = size;
1702 pool->next = list_of_pools;
1703 pool->symbol = NULL;
1704
1705 /* Add it to the list. */
1706 list_of_pools = pool;
1707 }
1708
1709 /* New pools, and emptied pools, will have a NULL symbol. */
1710 if (pool->symbol == NULL)
1711 {
1712 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1713 &zero_address_frag, 0);
1714 pool->id = latest_pool_num++;
1715 }
1716
1717 /* Done. */
1718 return pool;
1719 }
1720
1721 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1722 Return TRUE on success, otherwise return FALSE. */
1723 static bool
1724 add_to_lit_pool (expressionS *exp, int size)
1725 {
1726 literal_pool *pool;
1727 unsigned int entry;
1728
1729 pool = find_or_make_literal_pool (size);
1730
1731 /* Check if this literal value is already in the pool. */
1732 for (entry = 0; entry < pool->next_free_entry; entry++)
1733 {
1734 expressionS * litexp = & pool->literals[entry].exp;
1735
1736 if ((litexp->X_op == exp->X_op)
1737 && (exp->X_op == O_constant)
1738 && (litexp->X_add_number == exp->X_add_number)
1739 && (litexp->X_unsigned == exp->X_unsigned))
1740 break;
1741
1742 if ((litexp->X_op == exp->X_op)
1743 && (exp->X_op == O_symbol)
1744 && (litexp->X_add_number == exp->X_add_number)
1745 && (litexp->X_add_symbol == exp->X_add_symbol)
1746 && (litexp->X_op_symbol == exp->X_op_symbol))
1747 break;
1748 }
1749
1750 /* Do we need to create a new entry? */
1751 if (entry == pool->next_free_entry)
1752 {
1753 if (entry >= MAX_LITERAL_POOL_SIZE)
1754 {
1755 set_syntax_error (_("literal pool overflow"));
1756 return false;
1757 }
1758
1759 pool->literals[entry].exp = *exp;
1760 pool->next_free_entry += 1;
1761 if (exp->X_op == O_big)
1762 {
1763 /* PR 16688: Bignums are held in a single global array. We must
1764 copy and preserve that value now, before it is overwritten. */
1765 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1766 exp->X_add_number);
1767 memcpy (pool->literals[entry].bignum, generic_bignum,
1768 CHARS_PER_LITTLENUM * exp->X_add_number);
1769 }
1770 else
1771 pool->literals[entry].bignum = NULL;
1772 }
1773
1774 exp->X_op = O_symbol;
1775 exp->X_add_number = ((int) entry) * size;
1776 exp->X_add_symbol = pool->symbol;
1777
1778 return true;
1779 }
1780
1781 /* Can't use symbol_new here, so have to create a symbol and then at
1782 a later date assign it a value. That's what these functions do. */
1783
1784 static void
1785 symbol_locate (symbolS * symbolP,
1786 const char *name,/* It is copied, the caller can modify. */
1787 segT segment, /* Segment identifier (SEG_<something>). */
1788 valueT valu, /* Symbol value. */
1789 fragS * frag) /* Associated fragment. */
1790 {
1791 size_t name_length;
1792 char *preserved_copy_of_name;
1793
1794 name_length = strlen (name) + 1; /* +1 for \0. */
1795 obstack_grow (&notes, name, name_length);
1796 preserved_copy_of_name = obstack_finish (&notes);
1797
1798 #ifdef tc_canonicalize_symbol_name
1799 preserved_copy_of_name =
1800 tc_canonicalize_symbol_name (preserved_copy_of_name);
1801 #endif
1802
1803 S_SET_NAME (symbolP, preserved_copy_of_name);
1804
1805 S_SET_SEGMENT (symbolP, segment);
1806 S_SET_VALUE (symbolP, valu);
1807 symbol_clear_list_pointers (symbolP);
1808
1809 symbol_set_frag (symbolP, frag);
1810
1811 /* Link to end of symbol chain. */
1812 {
1813 extern int symbol_table_frozen;
1814
1815 if (symbol_table_frozen)
1816 abort ();
1817 }
1818
1819 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1820
1821 obj_symbol_new_hook (symbolP);
1822
1823 #ifdef tc_symbol_new_hook
1824 tc_symbol_new_hook (symbolP);
1825 #endif
1826
1827 #ifdef DEBUG_SYMS
1828 verify_symbol_chain (symbol_rootP, symbol_lastP);
1829 #endif /* DEBUG_SYMS */
1830 }
1831
1832
1833 static void
1834 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1835 {
1836 unsigned int entry;
1837 literal_pool *pool;
1838 char sym_name[20];
1839 int align;
1840
1841 for (align = 2; align <= 4; align++)
1842 {
1843 int size = 1 << align;
1844
1845 pool = find_literal_pool (size);
1846 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1847 continue;
1848
1849 /* Align pool as you have word accesses.
1850 Only make a frag if we have to. */
1851 if (!need_pass_2)
1852 frag_align (align, 0, 0);
1853
1854 mapping_state (MAP_DATA);
1855
1856 record_alignment (now_seg, align);
1857
1858 sprintf (sym_name, "$$lit_\002%x", pool->id);
1859
1860 symbol_locate (pool->symbol, sym_name, now_seg,
1861 (valueT) frag_now_fix (), frag_now);
1862 symbol_table_insert (pool->symbol);
1863
1864 for (entry = 0; entry < pool->next_free_entry; entry++)
1865 {
1866 expressionS * exp = & pool->literals[entry].exp;
1867
1868 if (exp->X_op == O_big)
1869 {
1870 /* PR 16688: Restore the global bignum value. */
1871 gas_assert (pool->literals[entry].bignum != NULL);
1872 memcpy (generic_bignum, pool->literals[entry].bignum,
1873 CHARS_PER_LITTLENUM * exp->X_add_number);
1874 }
1875
1876 /* First output the expression in the instruction to the pool. */
1877 emit_expr (exp, size); /* .word|.xword */
1878
1879 if (exp->X_op == O_big)
1880 {
1881 free (pool->literals[entry].bignum);
1882 pool->literals[entry].bignum = NULL;
1883 }
1884 }
1885
1886 /* Mark the pool as empty. */
1887 pool->next_free_entry = 0;
1888 pool->symbol = NULL;
1889 }
1890 }
1891
1892 #if defined(OBJ_ELF) || defined(OBJ_COFF)
1893 /* Forward declarations for functions below, in the MD interface
1894 section. */
1895 static struct reloc_table_entry * find_reloc_table_entry (char **);
1896
1897 /* Directives: Data. */
1898 /* N.B. the support for relocation suffix in this directive needs to be
1899 implemented properly. */
1900
1901 static void
1902 s_aarch64_cons (int nbytes)
1903 {
1904 expressionS exp;
1905
1906 #ifdef md_flush_pending_output
1907 md_flush_pending_output ();
1908 #endif
1909
1910 if (is_it_end_of_statement ())
1911 {
1912 demand_empty_rest_of_line ();
1913 return;
1914 }
1915
1916 #ifdef md_cons_align
1917 md_cons_align (nbytes);
1918 #endif
1919
1920 mapping_state (MAP_DATA);
1921 do
1922 {
1923 struct reloc_table_entry *reloc;
1924
1925 expression (&exp);
1926
1927 if (exp.X_op != O_symbol)
1928 emit_expr (&exp, (unsigned int) nbytes);
1929 else
1930 {
1931 skip_past_char (&input_line_pointer, '#');
1932 if (skip_past_char (&input_line_pointer, ':'))
1933 {
1934 reloc = find_reloc_table_entry (&input_line_pointer);
1935 if (reloc == NULL)
1936 as_bad (_("unrecognized relocation suffix"));
1937 else
1938 as_bad (_("unimplemented relocation suffix"));
1939 ignore_rest_of_line ();
1940 return;
1941 }
1942 else
1943 emit_expr (&exp, (unsigned int) nbytes);
1944 }
1945 }
1946 while (*input_line_pointer++ == ',');
1947
1948 /* Put terminator back into stream. */
1949 input_line_pointer--;
1950 demand_empty_rest_of_line ();
1951 }
1952 #endif
1953
1954 #ifdef OBJ_ELF
1955 /* Forward declarations for functions below, in the MD interface
1956 section. */
1957 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1958
1959 /* Mark symbol that it follows a variant PCS convention. */
1960
1961 static void
1962 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1963 {
1964 char *name;
1965 char c;
1966 symbolS *sym;
1967 asymbol *bfdsym;
1968 elf_symbol_type *elfsym;
1969
1970 c = get_symbol_name (&name);
1971 if (!*name)
1972 as_bad (_("Missing symbol name in directive"));
1973 sym = symbol_find_or_make (name);
1974 restore_line_pointer (c);
1975 demand_empty_rest_of_line ();
1976 bfdsym = symbol_get_bfdsym (sym);
1977 elfsym = elf_symbol_from (bfdsym);
1978 gas_assert (elfsym);
1979 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1980 }
1981 #endif /* OBJ_ELF */
1982
1983 /* Output a 32-bit word, but mark as an instruction. */
1984
1985 static void
1986 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1987 {
1988 expressionS exp;
1989 unsigned n = 0;
1990
1991 #ifdef md_flush_pending_output
1992 md_flush_pending_output ();
1993 #endif
1994
1995 if (is_it_end_of_statement ())
1996 {
1997 demand_empty_rest_of_line ();
1998 return;
1999 }
2000
2001 /* Sections are assumed to start aligned. In executable section, there is no
2002 MAP_DATA symbol pending. So we only align the address during
2003 MAP_DATA --> MAP_INSN transition.
2004 For other sections, this is not guaranteed. */
2005 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2006 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2007 frag_align_code (2, 0);
2008
2009 #ifdef OBJ_ELF
2010 mapping_state (MAP_INSN);
2011 #endif
2012
2013 do
2014 {
2015 expression (&exp);
2016 if (exp.X_op != O_constant)
2017 {
2018 as_bad (_("constant expression required"));
2019 ignore_rest_of_line ();
2020 return;
2021 }
2022
2023 if (target_big_endian)
2024 {
2025 unsigned int val = exp.X_add_number;
2026 exp.X_add_number = SWAP_32 (val);
2027 }
2028 emit_expr (&exp, INSN_SIZE);
2029 ++n;
2030 }
2031 while (*input_line_pointer++ == ',');
2032
2033 dwarf2_emit_insn (n * INSN_SIZE);
2034
2035 /* Put terminator back into stream. */
2036 input_line_pointer--;
2037 demand_empty_rest_of_line ();
2038 }
2039
2040 static void
2041 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2042 {
2043 demand_empty_rest_of_line ();
2044 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2045 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2046 }
2047
2048 #ifdef OBJ_ELF
2049 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2050
2051 static void
2052 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2053 {
2054 expressionS exp;
2055
2056 expression (&exp);
2057 frag_grow (4);
2058 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2059 BFD_RELOC_AARCH64_TLSDESC_ADD);
2060
2061 demand_empty_rest_of_line ();
2062 }
2063
2064 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2065
2066 static void
2067 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2068 {
2069 expressionS exp;
2070
2071 /* Since we're just labelling the code, there's no need to define a
2072 mapping symbol. */
2073 expression (&exp);
2074 /* Make sure there is enough room in this frag for the following
2075 blr. This trick only works if the blr follows immediately after
2076 the .tlsdesc directive. */
2077 frag_grow (4);
2078 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2079 BFD_RELOC_AARCH64_TLSDESC_CALL);
2080
2081 demand_empty_rest_of_line ();
2082 }
2083
2084 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2085
2086 static void
2087 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2088 {
2089 expressionS exp;
2090
2091 expression (&exp);
2092 frag_grow (4);
2093 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2094 BFD_RELOC_AARCH64_TLSDESC_LDR);
2095
2096 demand_empty_rest_of_line ();
2097 }
2098 #endif /* OBJ_ELF */
2099
2100 #ifdef TE_PE
2101 static void
2102 s_secrel (int dummy ATTRIBUTE_UNUSED)
2103 {
2104 expressionS exp;
2105
2106 do
2107 {
2108 expression (&exp);
2109 if (exp.X_op == O_symbol)
2110 exp.X_op = O_secrel;
2111
2112 emit_expr (&exp, 4);
2113 }
2114 while (*input_line_pointer++ == ',');
2115
2116 input_line_pointer--;
2117 demand_empty_rest_of_line ();
2118 }
2119 #endif /* TE_PE */
2120
2121 static void s_aarch64_arch (int);
2122 static void s_aarch64_cpu (int);
2123 static void s_aarch64_arch_extension (int);
2124
2125 /* This table describes all the machine specific pseudo-ops the assembler
2126 has to support. The fields are:
2127 pseudo-op name without dot
2128 function to call to execute this pseudo-op
2129 Integer arg to pass to the function. */
2130
2131 const pseudo_typeS md_pseudo_table[] = {
2132 /* Never called because '.req' does not start a line. */
2133 {"req", s_req, 0},
2134 {"unreq", s_unreq, 0},
2135 {"bss", s_bss, 0},
2136 {"even", s_even, 0},
2137 {"ltorg", s_ltorg, 0},
2138 {"pool", s_ltorg, 0},
2139 {"cpu", s_aarch64_cpu, 0},
2140 {"arch", s_aarch64_arch, 0},
2141 {"arch_extension", s_aarch64_arch_extension, 0},
2142 {"inst", s_aarch64_inst, 0},
2143 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2144 #ifdef OBJ_ELF
2145 {"tlsdescadd", s_tlsdescadd, 0},
2146 {"tlsdesccall", s_tlsdesccall, 0},
2147 {"tlsdescldr", s_tlsdescldr, 0},
2148 {"variant_pcs", s_variant_pcs, 0},
2149 #endif
2150 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2151 {"word", s_aarch64_cons, 4},
2152 {"long", s_aarch64_cons, 4},
2153 {"xword", s_aarch64_cons, 8},
2154 {"dword", s_aarch64_cons, 8},
2155 #endif
2156 #ifdef TE_PE
2157 {"secrel32", s_secrel, 0},
2158 #endif
2159 {"float16", float_cons, 'h'},
2160 {"bfloat16", float_cons, 'b'},
2161 {0, 0, 0}
2162 };
2163 \f
2164
2165 /* Check whether STR points to a register name followed by a comma or the
2166 end of line; REG_TYPE indicates which register types are checked
2167 against. Return TRUE if STR is such a register name; otherwise return
2168 FALSE. The function does not intend to produce any diagnostics, but since
2169 the register parser aarch64_reg_parse, which is called by this function,
2170 does produce diagnostics, we call clear_error to clear any diagnostics
2171 that may be generated by aarch64_reg_parse.
2172 Also, the function returns FALSE directly if there is any user error
2173 present at the function entry. This prevents the existing diagnostics
2174 state from being spoiled.
2175 The function currently serves parse_constant_immediate and
2176 parse_big_immediate only. */
2177 static bool
2178 reg_name_p (char *str, aarch64_reg_type reg_type)
2179 {
2180 int reg;
2181
2182 /* Prevent the diagnostics state from being spoiled. */
2183 if (error_p ())
2184 return false;
2185
2186 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2187
2188 /* Clear the parsing error that may be set by the reg parser. */
2189 clear_error ();
2190
2191 if (reg == PARSE_FAIL)
2192 return false;
2193
2194 skip_whitespace (str);
2195 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2196 return true;
2197
2198 return false;
2199 }
2200
2201 /* Parser functions used exclusively in instruction operands. */
2202
2203 /* Parse an immediate expression which may not be constant.
2204
2205 To prevent the expression parser from pushing a register name
2206 into the symbol table as an undefined symbol, firstly a check is
2207 done to find out whether STR is a register of type REG_TYPE followed
2208 by a comma or the end of line. Return FALSE if STR is such a string. */
2209
2210 static bool
2211 parse_immediate_expression (char **str, expressionS *exp,
2212 aarch64_reg_type reg_type)
2213 {
2214 if (reg_name_p (*str, reg_type))
2215 {
2216 set_recoverable_error (_("immediate operand required"));
2217 return false;
2218 }
2219
2220 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2221
2222 if (exp->X_op == O_absent)
2223 {
2224 set_fatal_syntax_error (_("missing immediate expression"));
2225 return false;
2226 }
2227
2228 return true;
2229 }
2230
2231 /* Constant immediate-value read function for use in insn parsing.
2232 STR points to the beginning of the immediate (with the optional
2233 leading #); *VAL receives the value. REG_TYPE says which register
2234 names should be treated as registers rather than as symbolic immediates.
2235
2236 Return TRUE on success; otherwise return FALSE. */
2237
2238 static bool
2239 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2240 {
2241 expressionS exp;
2242
2243 if (! parse_immediate_expression (str, &exp, reg_type))
2244 return false;
2245
2246 if (exp.X_op != O_constant)
2247 {
2248 set_syntax_error (_("constant expression required"));
2249 return false;
2250 }
2251
2252 *val = exp.X_add_number;
2253 return true;
2254 }
2255
2256 static uint32_t
2257 encode_imm_float_bits (uint32_t imm)
2258 {
2259 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2260 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2261 }
2262
2263 /* Return TRUE if the single-precision floating-point value encoded in IMM
2264 can be expressed in the AArch64 8-bit signed floating-point format with
2265 3-bit exponent and normalized 4 bits of precision; in other words, the
2266 floating-point value must be expressable as
2267 (+/-) n / 16 * power (2, r)
2268 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2269
2270 static bool
2271 aarch64_imm_float_p (uint32_t imm)
2272 {
2273 /* If a single-precision floating-point value has the following bit
2274 pattern, it can be expressed in the AArch64 8-bit floating-point
2275 format:
2276
2277 3 32222222 2221111111111
2278 1 09876543 21098765432109876543210
2279 n Eeeeeexx xxxx0000000000000000000
2280
2281 where n, e and each x are either 0 or 1 independently, with
2282 E == ~ e. */
2283
2284 uint32_t pattern;
2285
2286 /* Prepare the pattern for 'Eeeeee'. */
2287 if (((imm >> 30) & 0x1) == 0)
2288 pattern = 0x3e000000;
2289 else
2290 pattern = 0x40000000;
2291
2292 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2293 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2294 }
2295
2296 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2297 as an IEEE float without any loss of precision. Store the value in
2298 *FPWORD if so. */
2299
2300 static bool
2301 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2302 {
2303 /* If a double-precision floating-point value has the following bit
2304 pattern, it can be expressed in a float:
2305
2306 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2307 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2308 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2309
2310 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2311 if Eeee_eeee != 1111_1111
2312
2313 where n, e, s and S are either 0 or 1 independently and where ~ is the
2314 inverse of E. */
2315
2316 uint32_t pattern;
2317 uint32_t high32 = imm >> 32;
2318 uint32_t low32 = imm;
2319
2320 /* Lower 29 bits need to be 0s. */
2321 if ((imm & 0x1fffffff) != 0)
2322 return false;
2323
2324 /* Prepare the pattern for 'Eeeeeeeee'. */
2325 if (((high32 >> 30) & 0x1) == 0)
2326 pattern = 0x38000000;
2327 else
2328 pattern = 0x40000000;
2329
2330 /* Check E~~~. */
2331 if ((high32 & 0x78000000) != pattern)
2332 return false;
2333
2334 /* Check Eeee_eeee != 1111_1111. */
2335 if ((high32 & 0x7ff00000) == 0x47f00000)
2336 return false;
2337
2338 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2339 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2340 | (low32 >> 29)); /* 3 S bits. */
2341 return true;
2342 }
2343
2344 /* Return true if we should treat OPERAND as a double-precision
2345 floating-point operand rather than a single-precision one. */
2346 static bool
2347 double_precision_operand_p (const aarch64_opnd_info *operand)
2348 {
2349 /* Check for unsuffixed SVE registers, which are allowed
2350 for LDR and STR but not in instructions that require an
2351 immediate. We get better error messages if we arbitrarily
2352 pick one size, parse the immediate normally, and then
2353 report the match failure in the normal way. */
2354 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2355 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2356 }
2357
2358 /* Parse a floating-point immediate. Return TRUE on success and return the
2359 value in *IMMED in the format of IEEE754 single-precision encoding.
2360 *CCP points to the start of the string; DP_P is TRUE when the immediate
2361 is expected to be in double-precision (N.B. this only matters when
2362 hexadecimal representation is involved). REG_TYPE says which register
2363 names should be treated as registers rather than as symbolic immediates.
2364
2365 This routine accepts any IEEE float; it is up to the callers to reject
2366 invalid ones. */
2367
2368 static bool
2369 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2370 aarch64_reg_type reg_type)
2371 {
2372 char *str = *ccp;
2373 char *fpnum;
2374 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2375 int64_t val = 0;
2376 unsigned fpword = 0;
2377 bool hex_p = false;
2378
2379 skip_past_char (&str, '#');
2380
2381 fpnum = str;
2382 skip_whitespace (fpnum);
2383
2384 if (startswith (fpnum, "0x"))
2385 {
2386 /* Support the hexadecimal representation of the IEEE754 encoding.
2387 Double-precision is expected when DP_P is TRUE, otherwise the
2388 representation should be in single-precision. */
2389 if (! parse_constant_immediate (&str, &val, reg_type))
2390 goto invalid_fp;
2391
2392 if (dp_p)
2393 {
2394 if (!can_convert_double_to_float (val, &fpword))
2395 goto invalid_fp;
2396 }
2397 else if ((uint64_t) val > 0xffffffff)
2398 goto invalid_fp;
2399 else
2400 fpword = val;
2401
2402 hex_p = true;
2403 }
2404 else if (reg_name_p (str, reg_type))
2405 {
2406 set_recoverable_error (_("immediate operand required"));
2407 return false;
2408 }
2409
2410 if (! hex_p)
2411 {
2412 int i;
2413
2414 if ((str = atof_ieee (str, 's', words)) == NULL)
2415 goto invalid_fp;
2416
2417 /* Our FP word must be 32 bits (single-precision FP). */
2418 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2419 {
2420 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2421 fpword |= words[i];
2422 }
2423 }
2424
2425 *immed = fpword;
2426 *ccp = str;
2427 return true;
2428
2429 invalid_fp:
2430 set_fatal_syntax_error (_("invalid floating-point constant"));
2431 return false;
2432 }
2433
2434 /* Less-generic immediate-value read function with the possibility of loading
2435 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2436 instructions.
2437
2438 To prevent the expression parser from pushing a register name into the
2439 symbol table as an undefined symbol, a check is firstly done to find
2440 out whether STR is a register of type REG_TYPE followed by a comma or
2441 the end of line. Return FALSE if STR is such a register. */
2442
2443 static bool
2444 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2445 {
2446 char *ptr = *str;
2447
2448 if (reg_name_p (ptr, reg_type))
2449 {
2450 set_syntax_error (_("immediate operand required"));
2451 return false;
2452 }
2453
2454 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2455
2456 if (inst.reloc.exp.X_op == O_constant)
2457 *imm = inst.reloc.exp.X_add_number;
2458
2459 *str = ptr;
2460
2461 return true;
2462 }
2463
2464 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2465 if NEED_LIBOPCODES is non-zero, the fixup will need
2466 assistance from the libopcodes. */
2467
2468 static inline void
2469 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2470 const aarch64_opnd_info *operand,
2471 int need_libopcodes_p)
2472 {
2473 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2474 reloc->opnd = operand->type;
2475 if (need_libopcodes_p)
2476 reloc->need_libopcodes_p = 1;
2477 };
2478
2479 /* Return TRUE if the instruction needs to be fixed up later internally by
2480 the GAS; otherwise return FALSE. */
2481
2482 static inline bool
2483 aarch64_gas_internal_fixup_p (void)
2484 {
2485 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2486 }
2487
2488 /* Assign the immediate value to the relevant field in *OPERAND if
2489 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2490 needs an internal fixup in a later stage.
2491 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2492 IMM.VALUE that may get assigned with the constant. */
2493 static inline void
2494 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2495 aarch64_opnd_info *operand,
2496 int addr_off_p,
2497 int need_libopcodes_p,
2498 int skip_p)
2499 {
2500 if (reloc->exp.X_op == O_constant)
2501 {
2502 if (addr_off_p)
2503 operand->addr.offset.imm = reloc->exp.X_add_number;
2504 else
2505 operand->imm.value = reloc->exp.X_add_number;
2506 reloc->type = BFD_RELOC_UNUSED;
2507 }
2508 else
2509 {
2510 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2511 /* Tell libopcodes to ignore this operand or not. This is helpful
2512 when one of the operands needs to be fixed up later but we need
2513 libopcodes to check the other operands. */
2514 operand->skip = skip_p;
2515 }
2516 }
2517
2518 /* Relocation modifiers. Each entry in the table contains the textual
2519 name for the relocation which may be placed before a symbol used as
2520 a load/store offset, or add immediate. It must be surrounded by a
2521 leading and trailing colon, for example:
2522
2523 ldr x0, [x1, #:rello:varsym]
2524 add x0, x1, #:rello:varsym */
2525
2526 struct reloc_table_entry
2527 {
2528 const char *name;
2529 int pc_rel;
2530 bfd_reloc_code_real_type adr_type;
2531 bfd_reloc_code_real_type adrp_type;
2532 bfd_reloc_code_real_type movw_type;
2533 bfd_reloc_code_real_type add_type;
2534 bfd_reloc_code_real_type ldst_type;
2535 bfd_reloc_code_real_type ld_literal_type;
2536 };
2537
2538 static struct reloc_table_entry reloc_table[] =
2539 {
2540 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2541 {"lo12", 0,
2542 0, /* adr_type */
2543 0,
2544 0,
2545 BFD_RELOC_AARCH64_ADD_LO12,
2546 BFD_RELOC_AARCH64_LDST_LO12,
2547 0},
2548
2549 /* Higher 21 bits of pc-relative page offset: ADRP */
2550 {"pg_hi21", 1,
2551 0, /* adr_type */
2552 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2553 0,
2554 0,
2555 0,
2556 0},
2557
2558 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2559 {"pg_hi21_nc", 1,
2560 0, /* adr_type */
2561 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2562 0,
2563 0,
2564 0,
2565 0},
2566
2567 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2568 {"abs_g0", 0,
2569 0, /* adr_type */
2570 0,
2571 BFD_RELOC_AARCH64_MOVW_G0,
2572 0,
2573 0,
2574 0},
2575
2576 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2577 {"abs_g0_s", 0,
2578 0, /* adr_type */
2579 0,
2580 BFD_RELOC_AARCH64_MOVW_G0_S,
2581 0,
2582 0,
2583 0},
2584
2585 /* Less significant bits 0-15 of address/value: MOVK, no check */
2586 {"abs_g0_nc", 0,
2587 0, /* adr_type */
2588 0,
2589 BFD_RELOC_AARCH64_MOVW_G0_NC,
2590 0,
2591 0,
2592 0},
2593
2594 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2595 {"abs_g1", 0,
2596 0, /* adr_type */
2597 0,
2598 BFD_RELOC_AARCH64_MOVW_G1,
2599 0,
2600 0,
2601 0},
2602
2603 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2604 {"abs_g1_s", 0,
2605 0, /* adr_type */
2606 0,
2607 BFD_RELOC_AARCH64_MOVW_G1_S,
2608 0,
2609 0,
2610 0},
2611
2612 /* Less significant bits 16-31 of address/value: MOVK, no check */
2613 {"abs_g1_nc", 0,
2614 0, /* adr_type */
2615 0,
2616 BFD_RELOC_AARCH64_MOVW_G1_NC,
2617 0,
2618 0,
2619 0},
2620
2621 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2622 {"abs_g2", 0,
2623 0, /* adr_type */
2624 0,
2625 BFD_RELOC_AARCH64_MOVW_G2,
2626 0,
2627 0,
2628 0},
2629
2630 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2631 {"abs_g2_s", 0,
2632 0, /* adr_type */
2633 0,
2634 BFD_RELOC_AARCH64_MOVW_G2_S,
2635 0,
2636 0,
2637 0},
2638
2639 /* Less significant bits 32-47 of address/value: MOVK, no check */
2640 {"abs_g2_nc", 0,
2641 0, /* adr_type */
2642 0,
2643 BFD_RELOC_AARCH64_MOVW_G2_NC,
2644 0,
2645 0,
2646 0},
2647
2648 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2649 {"abs_g3", 0,
2650 0, /* adr_type */
2651 0,
2652 BFD_RELOC_AARCH64_MOVW_G3,
2653 0,
2654 0,
2655 0},
2656
2657 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2658 {"prel_g0", 1,
2659 0, /* adr_type */
2660 0,
2661 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2662 0,
2663 0,
2664 0},
2665
2666 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2667 {"prel_g0_nc", 1,
2668 0, /* adr_type */
2669 0,
2670 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2671 0,
2672 0,
2673 0},
2674
2675 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2676 {"prel_g1", 1,
2677 0, /* adr_type */
2678 0,
2679 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2680 0,
2681 0,
2682 0},
2683
2684 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2685 {"prel_g1_nc", 1,
2686 0, /* adr_type */
2687 0,
2688 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2689 0,
2690 0,
2691 0},
2692
2693 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2694 {"prel_g2", 1,
2695 0, /* adr_type */
2696 0,
2697 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2698 0,
2699 0,
2700 0},
2701
2702 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2703 {"prel_g2_nc", 1,
2704 0, /* adr_type */
2705 0,
2706 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2707 0,
2708 0,
2709 0},
2710
2711 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2712 {"prel_g3", 1,
2713 0, /* adr_type */
2714 0,
2715 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2716 0,
2717 0,
2718 0},
2719
2720 /* Get to the page containing GOT entry for a symbol. */
2721 {"got", 1,
2722 0, /* adr_type */
2723 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2724 0,
2725 0,
2726 0,
2727 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2728
2729 /* 12 bit offset into the page containing GOT entry for that symbol. */
2730 {"got_lo12", 0,
2731 0, /* adr_type */
2732 0,
2733 0,
2734 0,
2735 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2736 0},
2737
2738 /* 0-15 bits of address/value: MOVk, no check. */
2739 {"gotoff_g0_nc", 0,
2740 0, /* adr_type */
2741 0,
2742 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2743 0,
2744 0,
2745 0},
2746
2747 /* Most significant bits 16-31 of address/value: MOVZ. */
2748 {"gotoff_g1", 0,
2749 0, /* adr_type */
2750 0,
2751 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2752 0,
2753 0,
2754 0},
2755
2756 /* 15 bit offset into the page containing GOT entry for that symbol. */
2757 {"gotoff_lo15", 0,
2758 0, /* adr_type */
2759 0,
2760 0,
2761 0,
2762 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2763 0},
2764
2765 /* Get to the page containing GOT TLS entry for a symbol */
2766 {"gottprel_g0_nc", 0,
2767 0, /* adr_type */
2768 0,
2769 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2770 0,
2771 0,
2772 0},
2773
2774 /* Get to the page containing GOT TLS entry for a symbol */
2775 {"gottprel_g1", 0,
2776 0, /* adr_type */
2777 0,
2778 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2779 0,
2780 0,
2781 0},
2782
2783 /* Get to the page containing GOT TLS entry for a symbol */
2784 {"tlsgd", 0,
2785 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2786 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2787 0,
2788 0,
2789 0,
2790 0},
2791
2792 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2793 {"tlsgd_lo12", 0,
2794 0, /* adr_type */
2795 0,
2796 0,
2797 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2798 0,
2799 0},
2800
2801 /* Lower 16 bits address/value: MOVk. */
2802 {"tlsgd_g0_nc", 0,
2803 0, /* adr_type */
2804 0,
2805 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2806 0,
2807 0,
2808 0},
2809
2810 /* Most significant bits 16-31 of address/value: MOVZ. */
2811 {"tlsgd_g1", 0,
2812 0, /* adr_type */
2813 0,
2814 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2815 0,
2816 0,
2817 0},
2818
2819 /* Get to the page containing GOT TLS entry for a symbol */
2820 {"tlsdesc", 0,
2821 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2822 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2823 0,
2824 0,
2825 0,
2826 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2827
2828 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2829 {"tlsdesc_lo12", 0,
2830 0, /* adr_type */
2831 0,
2832 0,
2833 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2834 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2835 0},
2836
2837 /* Get to the page containing GOT TLS entry for a symbol.
2838 The same as GD, we allocate two consecutive GOT slots
2839 for module index and module offset, the only difference
2840 with GD is the module offset should be initialized to
2841 zero without any outstanding runtime relocation. */
2842 {"tlsldm", 0,
2843 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2844 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2845 0,
2846 0,
2847 0,
2848 0},
2849
2850 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2851 {"tlsldm_lo12_nc", 0,
2852 0, /* adr_type */
2853 0,
2854 0,
2855 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2856 0,
2857 0},
2858
2859 /* 12 bit offset into the module TLS base address. */
2860 {"dtprel_lo12", 0,
2861 0, /* adr_type */
2862 0,
2863 0,
2864 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2865 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2866 0},
2867
2868 /* Same as dtprel_lo12, no overflow check. */
2869 {"dtprel_lo12_nc", 0,
2870 0, /* adr_type */
2871 0,
2872 0,
2873 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2874 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2875 0},
2876
2877 /* bits[23:12] of offset to the module TLS base address. */
2878 {"dtprel_hi12", 0,
2879 0, /* adr_type */
2880 0,
2881 0,
2882 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2883 0,
2884 0},
2885
2886 /* bits[15:0] of offset to the module TLS base address. */
2887 {"dtprel_g0", 0,
2888 0, /* adr_type */
2889 0,
2890 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2891 0,
2892 0,
2893 0},
2894
2895 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2896 {"dtprel_g0_nc", 0,
2897 0, /* adr_type */
2898 0,
2899 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2900 0,
2901 0,
2902 0},
2903
2904 /* bits[31:16] of offset to the module TLS base address. */
2905 {"dtprel_g1", 0,
2906 0, /* adr_type */
2907 0,
2908 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2909 0,
2910 0,
2911 0},
2912
2913 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2914 {"dtprel_g1_nc", 0,
2915 0, /* adr_type */
2916 0,
2917 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2918 0,
2919 0,
2920 0},
2921
2922 /* bits[47:32] of offset to the module TLS base address. */
2923 {"dtprel_g2", 0,
2924 0, /* adr_type */
2925 0,
2926 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2927 0,
2928 0,
2929 0},
2930
2931 /* Lower 16 bit offset into GOT entry for a symbol */
2932 {"tlsdesc_off_g0_nc", 0,
2933 0, /* adr_type */
2934 0,
2935 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2936 0,
2937 0,
2938 0},
2939
2940 /* Higher 16 bit offset into GOT entry for a symbol */
2941 {"tlsdesc_off_g1", 0,
2942 0, /* adr_type */
2943 0,
2944 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2945 0,
2946 0,
2947 0},
2948
2949 /* Get to the page containing GOT TLS entry for a symbol */
2950 {"gottprel", 0,
2951 0, /* adr_type */
2952 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2953 0,
2954 0,
2955 0,
2956 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2957
2958 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2959 {"gottprel_lo12", 0,
2960 0, /* adr_type */
2961 0,
2962 0,
2963 0,
2964 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2965 0},
2966
2967 /* Get tp offset for a symbol. */
2968 {"tprel", 0,
2969 0, /* adr_type */
2970 0,
2971 0,
2972 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2973 0,
2974 0},
2975
2976 /* Get tp offset for a symbol. */
2977 {"tprel_lo12", 0,
2978 0, /* adr_type */
2979 0,
2980 0,
2981 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2982 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2983 0},
2984
2985 /* Get tp offset for a symbol. */
2986 {"tprel_hi12", 0,
2987 0, /* adr_type */
2988 0,
2989 0,
2990 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2991 0,
2992 0},
2993
2994 /* Get tp offset for a symbol. */
2995 {"tprel_lo12_nc", 0,
2996 0, /* adr_type */
2997 0,
2998 0,
2999 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3000 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3001 0},
3002
3003 /* Most significant bits 32-47 of address/value: MOVZ. */
3004 {"tprel_g2", 0,
3005 0, /* adr_type */
3006 0,
3007 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3008 0,
3009 0,
3010 0},
3011
3012 /* Most significant bits 16-31 of address/value: MOVZ. */
3013 {"tprel_g1", 0,
3014 0, /* adr_type */
3015 0,
3016 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3017 0,
3018 0,
3019 0},
3020
3021 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3022 {"tprel_g1_nc", 0,
3023 0, /* adr_type */
3024 0,
3025 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3026 0,
3027 0,
3028 0},
3029
3030 /* Most significant bits 0-15 of address/value: MOVZ. */
3031 {"tprel_g0", 0,
3032 0, /* adr_type */
3033 0,
3034 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3035 0,
3036 0,
3037 0},
3038
3039 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3040 {"tprel_g0_nc", 0,
3041 0, /* adr_type */
3042 0,
3043 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3044 0,
3045 0,
3046 0},
3047
3048 /* 15bit offset from got entry to base address of GOT table. */
3049 {"gotpage_lo15", 0,
3050 0,
3051 0,
3052 0,
3053 0,
3054 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3055 0},
3056
3057 /* 14bit offset from got entry to base address of GOT table. */
3058 {"gotpage_lo14", 0,
3059 0,
3060 0,
3061 0,
3062 0,
3063 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3064 0},
3065 };
3066
3067 /* Given the address of a pointer pointing to the textual name of a
3068 relocation as may appear in assembler source, attempt to find its
3069 details in reloc_table. The pointer will be updated to the character
3070 after the trailing colon. On failure, NULL will be returned;
3071 otherwise return the reloc_table_entry. */
3072
3073 static struct reloc_table_entry *
3074 find_reloc_table_entry (char **str)
3075 {
3076 unsigned int i;
3077 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3078 {
3079 int length = strlen (reloc_table[i].name);
3080
3081 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3082 && (*str)[length] == ':')
3083 {
3084 *str += (length + 1);
3085 return &reloc_table[i];
3086 }
3087 }
3088
3089 return NULL;
3090 }
3091
3092 /* Returns 0 if the relocation should never be forced,
3093 1 if the relocation must be forced, and -1 if either
3094 result is OK. */
3095
3096 static signed int
3097 aarch64_force_reloc (unsigned int type)
3098 {
3099 switch (type)
3100 {
3101 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3102 /* Perform these "immediate" internal relocations
3103 even if the symbol is extern or weak. */
3104 return 0;
3105
3106 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3107 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3108 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3109 /* Pseudo relocs that need to be fixed up according to
3110 ilp32_p. */
3111 return 1;
3112
3113 case BFD_RELOC_AARCH64_ADD_LO12:
3114 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3115 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3116 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3117 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3118 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3119 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3120 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3121 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3122 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3123 case BFD_RELOC_AARCH64_LDST128_LO12:
3124 case BFD_RELOC_AARCH64_LDST16_LO12:
3125 case BFD_RELOC_AARCH64_LDST32_LO12:
3126 case BFD_RELOC_AARCH64_LDST64_LO12:
3127 case BFD_RELOC_AARCH64_LDST8_LO12:
3128 case BFD_RELOC_AARCH64_LDST_LO12:
3129 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3130 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3131 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3132 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3133 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3134 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3135 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3136 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3137 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3138 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3139 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3140 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3141 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3142 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3143 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3144 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3145 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3146 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3147 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3148 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3149 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3150 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3151 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3152 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3153 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3154 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3155 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3156 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3157 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3158 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3159 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3160 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3161 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3162 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3163 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3164 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3165 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3166 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3167 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3168 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3169 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3170 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3171 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3172 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3173 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3174 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3175 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3176 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3177 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3178 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3179 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3180 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3181 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3182 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3183 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3184 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3185 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3186 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3187 /* Always leave these relocations for the linker. */
3188 return 1;
3189
3190 default:
3191 return -1;
3192 }
3193 }
3194
3195 int
3196 aarch64_force_relocation (struct fix *fixp)
3197 {
3198 int res = aarch64_force_reloc (fixp->fx_r_type);
3199
3200 if (res == -1)
3201 return generic_force_reloc (fixp);
3202 return res;
3203 }
3204
3205 /* Mode argument to parse_shift and parser_shifter_operand. */
3206 enum parse_shift_mode
3207 {
3208 SHIFTED_NONE, /* no shifter allowed */
3209 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3210 "#imm{,lsl #n}" */
3211 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3212 "#imm" */
3213 SHIFTED_LSL, /* bare "lsl #n" */
3214 SHIFTED_MUL, /* bare "mul #n" */
3215 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3216 SHIFTED_MUL_VL, /* "mul vl" */
3217 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3218 };
3219
3220 /* Parse a <shift> operator on an AArch64 data processing instruction.
3221 Return TRUE on success; otherwise return FALSE. */
3222 static bool
3223 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3224 {
3225 const struct aarch64_name_value_pair *shift_op;
3226 enum aarch64_modifier_kind kind;
3227 expressionS exp;
3228 int exp_has_prefix;
3229 char *s = *str;
3230 char *p = s;
3231
3232 for (p = *str; ISALPHA (*p); p++)
3233 ;
3234
3235 if (p == *str)
3236 {
3237 set_syntax_error (_("shift expression expected"));
3238 return false;
3239 }
3240
3241 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3242
3243 if (shift_op == NULL)
3244 {
3245 set_syntax_error (_("shift operator expected"));
3246 return false;
3247 }
3248
3249 kind = aarch64_get_operand_modifier (shift_op);
3250
3251 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3252 {
3253 set_syntax_error (_("invalid use of 'MSL'"));
3254 return false;
3255 }
3256
3257 if (kind == AARCH64_MOD_MUL
3258 && mode != SHIFTED_MUL
3259 && mode != SHIFTED_MUL_VL)
3260 {
3261 set_syntax_error (_("invalid use of 'MUL'"));
3262 return false;
3263 }
3264
3265 switch (mode)
3266 {
3267 case SHIFTED_LOGIC_IMM:
3268 if (aarch64_extend_operator_p (kind))
3269 {
3270 set_syntax_error (_("extending shift is not permitted"));
3271 return false;
3272 }
3273 break;
3274
3275 case SHIFTED_ARITH_IMM:
3276 if (kind == AARCH64_MOD_ROR)
3277 {
3278 set_syntax_error (_("'ROR' shift is not permitted"));
3279 return false;
3280 }
3281 break;
3282
3283 case SHIFTED_LSL:
3284 if (kind != AARCH64_MOD_LSL)
3285 {
3286 set_syntax_error (_("only 'LSL' shift is permitted"));
3287 return false;
3288 }
3289 break;
3290
3291 case SHIFTED_MUL:
3292 if (kind != AARCH64_MOD_MUL)
3293 {
3294 set_syntax_error (_("only 'MUL' is permitted"));
3295 return false;
3296 }
3297 break;
3298
3299 case SHIFTED_MUL_VL:
3300 /* "MUL VL" consists of two separate tokens. Require the first
3301 token to be "MUL" and look for a following "VL". */
3302 if (kind == AARCH64_MOD_MUL)
3303 {
3304 skip_whitespace (p);
3305 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3306 {
3307 p += 2;
3308 kind = AARCH64_MOD_MUL_VL;
3309 break;
3310 }
3311 }
3312 set_syntax_error (_("only 'MUL VL' is permitted"));
3313 return false;
3314
3315 case SHIFTED_REG_OFFSET:
3316 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3317 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3318 {
3319 set_fatal_syntax_error
3320 (_("invalid shift for the register offset addressing mode"));
3321 return false;
3322 }
3323 break;
3324
3325 case SHIFTED_LSL_MSL:
3326 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3327 {
3328 set_syntax_error (_("invalid shift operator"));
3329 return false;
3330 }
3331 break;
3332
3333 default:
3334 abort ();
3335 }
3336
3337 /* Whitespace can appear here if the next thing is a bare digit. */
3338 skip_whitespace (p);
3339
3340 /* Parse shift amount. */
3341 exp_has_prefix = 0;
3342 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3343 exp.X_op = O_absent;
3344 else
3345 {
3346 if (is_immediate_prefix (*p))
3347 {
3348 p++;
3349 exp_has_prefix = 1;
3350 }
3351 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3352 }
3353 if (kind == AARCH64_MOD_MUL_VL)
3354 /* For consistency, give MUL VL the same shift amount as an implicit
3355 MUL #1. */
3356 operand->shifter.amount = 1;
3357 else if (exp.X_op == O_absent)
3358 {
3359 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3360 {
3361 set_syntax_error (_("missing shift amount"));
3362 return false;
3363 }
3364 operand->shifter.amount = 0;
3365 }
3366 else if (exp.X_op != O_constant)
3367 {
3368 set_syntax_error (_("constant shift amount required"));
3369 return false;
3370 }
3371 /* For parsing purposes, MUL #n has no inherent range. The range
3372 depends on the operand and will be checked by operand-specific
3373 routines. */
3374 else if (kind != AARCH64_MOD_MUL
3375 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3376 {
3377 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3378 return false;
3379 }
3380 else
3381 {
3382 operand->shifter.amount = exp.X_add_number;
3383 operand->shifter.amount_present = 1;
3384 }
3385
3386 operand->shifter.operator_present = 1;
3387 operand->shifter.kind = kind;
3388
3389 *str = p;
3390 return true;
3391 }
3392
3393 /* Parse a <shifter_operand> for a data processing instruction:
3394
3395 #<immediate>
3396 #<immediate>, LSL #imm
3397
3398 Validation of immediate operands is deferred to md_apply_fix.
3399
3400 Return TRUE on success; otherwise return FALSE. */
3401
3402 static bool
3403 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3404 enum parse_shift_mode mode)
3405 {
3406 char *p;
3407
3408 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3409 return false;
3410
3411 p = *str;
3412
3413 /* Accept an immediate expression. */
3414 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3415 REJECT_ABSENT))
3416 return false;
3417
3418 /* Accept optional LSL for arithmetic immediate values. */
3419 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3420 if (! parse_shift (&p, operand, SHIFTED_LSL))
3421 return false;
3422
3423 /* Not accept any shifter for logical immediate values. */
3424 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3425 && parse_shift (&p, operand, mode))
3426 {
3427 set_syntax_error (_("unexpected shift operator"));
3428 return false;
3429 }
3430
3431 *str = p;
3432 return true;
3433 }
3434
3435 /* Parse a <shifter_operand> for a data processing instruction:
3436
3437 <Rm>
3438 <Rm>, <shift>
3439 #<immediate>
3440 #<immediate>, LSL #imm
3441
3442 where <shift> is handled by parse_shift above, and the last two
3443 cases are handled by the function above.
3444
3445 Validation of immediate operands is deferred to md_apply_fix.
3446
3447 Return TRUE on success; otherwise return FALSE. */
3448
3449 static bool
3450 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3451 enum parse_shift_mode mode)
3452 {
3453 const reg_entry *reg;
3454 aarch64_opnd_qualifier_t qualifier;
3455 enum aarch64_operand_class opd_class
3456 = aarch64_get_operand_class (operand->type);
3457
3458 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3459 if (reg)
3460 {
3461 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3462 {
3463 set_syntax_error (_("unexpected register in the immediate operand"));
3464 return false;
3465 }
3466
3467 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3468 {
3469 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3470 return false;
3471 }
3472
3473 operand->reg.regno = reg->number;
3474 operand->qualifier = qualifier;
3475
3476 /* Accept optional shift operation on register. */
3477 if (! skip_past_comma (str))
3478 return true;
3479
3480 if (! parse_shift (str, operand, mode))
3481 return false;
3482
3483 return true;
3484 }
3485 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3486 {
3487 set_syntax_error
3488 (_("integer register expected in the extended/shifted operand "
3489 "register"));
3490 return false;
3491 }
3492
3493 /* We have a shifted immediate variable. */
3494 return parse_shifter_operand_imm (str, operand, mode);
3495 }
3496
3497 /* Return TRUE on success; return FALSE otherwise. */
3498
3499 static bool
3500 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3501 enum parse_shift_mode mode)
3502 {
3503 char *p = *str;
3504
3505 /* Determine if we have the sequence of characters #: or just :
3506 coming next. If we do, then we check for a :rello: relocation
3507 modifier. If we don't, punt the whole lot to
3508 parse_shifter_operand. */
3509
3510 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3511 {
3512 struct reloc_table_entry *entry;
3513
3514 if (p[0] == '#')
3515 p += 2;
3516 else
3517 p++;
3518 *str = p;
3519
3520 /* Try to parse a relocation. Anything else is an error. */
3521 if (!(entry = find_reloc_table_entry (str)))
3522 {
3523 set_syntax_error (_("unknown relocation modifier"));
3524 return false;
3525 }
3526
3527 if (entry->add_type == 0)
3528 {
3529 set_syntax_error
3530 (_("this relocation modifier is not allowed on this instruction"));
3531 return false;
3532 }
3533
3534 /* Save str before we decompose it. */
3535 p = *str;
3536
3537 /* Next, we parse the expression. */
3538 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3539 REJECT_ABSENT))
3540 return false;
3541
3542 /* Record the relocation type (use the ADD variant here). */
3543 inst.reloc.type = entry->add_type;
3544 inst.reloc.pc_rel = entry->pc_rel;
3545
3546 /* If str is empty, we've reached the end, stop here. */
3547 if (**str == '\0')
3548 return true;
3549
3550 /* Otherwise, we have a shifted reloc modifier, so rewind to
3551 recover the variable name and continue parsing for the shifter. */
3552 *str = p;
3553 return parse_shifter_operand_imm (str, operand, mode);
3554 }
3555
3556 return parse_shifter_operand (str, operand, mode);
3557 }
3558
3559 /* Parse all forms of an address expression. Information is written
3560 to *OPERAND and/or inst.reloc.
3561
3562 The A64 instruction set has the following addressing modes:
3563
3564 Offset
3565 [base] // in SIMD ld/st structure
3566 [base{,#0}] // in ld/st exclusive
3567 [base{,#imm}]
3568 [base,Xm{,LSL #imm}]
3569 [base,Xm,SXTX {#imm}]
3570 [base,Wm,(S|U)XTW {#imm}]
3571 Pre-indexed
3572 [base]! // in ldraa/ldrab exclusive
3573 [base,#imm]!
3574 Post-indexed
3575 [base],#imm
3576 [base],Xm // in SIMD ld/st structure
3577 PC-relative (literal)
3578 label
3579 SVE:
3580 [base,#imm,MUL VL]
3581 [base,Zm.D{,LSL #imm}]
3582 [base,Zm.S,(S|U)XTW {#imm}]
3583 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3584 [Zn.S,#imm]
3585 [Zn.D,#imm]
3586 [Zn.S{, Xm}]
3587 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3588 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3589 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3590
3591 (As a convenience, the notation "=immediate" is permitted in conjunction
3592 with the pc-relative literal load instructions to automatically place an
3593 immediate value or symbolic address in a nearby literal pool and generate
3594 a hidden label which references it.)
3595
3596 Upon a successful parsing, the address structure in *OPERAND will be
3597 filled in the following way:
3598
3599 .base_regno = <base>
3600 .offset.is_reg // 1 if the offset is a register
3601 .offset.imm = <imm>
3602 .offset.regno = <Rm>
3603
3604 For different addressing modes defined in the A64 ISA:
3605
3606 Offset
3607 .pcrel=0; .preind=1; .postind=0; .writeback=0
3608 Pre-indexed
3609 .pcrel=0; .preind=1; .postind=0; .writeback=1
3610 Post-indexed
3611 .pcrel=0; .preind=0; .postind=1; .writeback=1
3612 PC-relative (literal)
3613 .pcrel=1; .preind=1; .postind=0; .writeback=0
3614
3615 The shift/extension information, if any, will be stored in .shifter.
3616 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3617 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3618 corresponding register.
3619
3620 BASE_TYPE says which types of base register should be accepted and
3621 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3622 is the type of shifter that is allowed for immediate offsets,
3623 or SHIFTED_NONE if none.
3624
3625 In all other respects, it is the caller's responsibility to check
3626 for addressing modes not supported by the instruction, and to set
3627 inst.reloc.type. */
3628
3629 static bool
3630 parse_address_main (char **str, aarch64_opnd_info *operand,
3631 aarch64_opnd_qualifier_t *base_qualifier,
3632 aarch64_opnd_qualifier_t *offset_qualifier,
3633 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3634 enum parse_shift_mode imm_shift_mode)
3635 {
3636 char *p = *str;
3637 const reg_entry *reg;
3638 expressionS *exp = &inst.reloc.exp;
3639
3640 *base_qualifier = AARCH64_OPND_QLF_NIL;
3641 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3642 if (! skip_past_char (&p, '['))
3643 {
3644 /* =immediate or label. */
3645 operand->addr.pcrel = 1;
3646 operand->addr.preind = 1;
3647
3648 /* #:<reloc_op>:<symbol> */
3649 skip_past_char (&p, '#');
3650 if (skip_past_char (&p, ':'))
3651 {
3652 bfd_reloc_code_real_type ty;
3653 struct reloc_table_entry *entry;
3654
3655 /* Try to parse a relocation modifier. Anything else is
3656 an error. */
3657 entry = find_reloc_table_entry (&p);
3658 if (! entry)
3659 {
3660 set_syntax_error (_("unknown relocation modifier"));
3661 return false;
3662 }
3663
3664 switch (operand->type)
3665 {
3666 case AARCH64_OPND_ADDR_PCREL21:
3667 /* adr */
3668 ty = entry->adr_type;
3669 break;
3670
3671 default:
3672 ty = entry->ld_literal_type;
3673 break;
3674 }
3675
3676 if (ty == 0)
3677 {
3678 set_syntax_error
3679 (_("this relocation modifier is not allowed on this "
3680 "instruction"));
3681 return false;
3682 }
3683
3684 /* #:<reloc_op>: */
3685 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3686 {
3687 set_syntax_error (_("invalid relocation expression"));
3688 return false;
3689 }
3690 /* #:<reloc_op>:<expr> */
3691 /* Record the relocation type. */
3692 inst.reloc.type = ty;
3693 inst.reloc.pc_rel = entry->pc_rel;
3694 }
3695 else
3696 {
3697 if (skip_past_char (&p, '='))
3698 /* =immediate; need to generate the literal in the literal pool. */
3699 inst.gen_lit_pool = 1;
3700
3701 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3702 {
3703 set_syntax_error (_("invalid address"));
3704 return false;
3705 }
3706 }
3707
3708 *str = p;
3709 return true;
3710 }
3711
3712 /* [ */
3713
3714 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3715 if (!reg || !aarch64_check_reg_type (reg, base_type))
3716 {
3717 set_syntax_error (_(get_reg_expected_msg (base_type)));
3718 return false;
3719 }
3720 operand->addr.base_regno = reg->number;
3721
3722 /* [Xn */
3723 if (skip_past_comma (&p))
3724 {
3725 /* [Xn, */
3726 operand->addr.preind = 1;
3727
3728 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3729 if (reg)
3730 {
3731 if (!aarch64_check_reg_type (reg, offset_type))
3732 {
3733 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3734 return false;
3735 }
3736
3737 /* [Xn,Rm */
3738 operand->addr.offset.regno = reg->number;
3739 operand->addr.offset.is_reg = 1;
3740 /* Shifted index. */
3741 if (skip_past_comma (&p))
3742 {
3743 /* [Xn,Rm, */
3744 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3745 /* Use the diagnostics set in parse_shift, so not set new
3746 error message here. */
3747 return false;
3748 }
3749 /* We only accept:
3750 [base,Xm] # For vector plus scalar SVE2 indexing.
3751 [base,Xm{,LSL #imm}]
3752 [base,Xm,SXTX {#imm}]
3753 [base,Wm,(S|U)XTW {#imm}] */
3754 if (operand->shifter.kind == AARCH64_MOD_NONE
3755 || operand->shifter.kind == AARCH64_MOD_LSL
3756 || operand->shifter.kind == AARCH64_MOD_SXTX)
3757 {
3758 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3759 {
3760 set_syntax_error (_("invalid use of 32-bit register offset"));
3761 return false;
3762 }
3763 if (aarch64_get_qualifier_esize (*base_qualifier)
3764 != aarch64_get_qualifier_esize (*offset_qualifier)
3765 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3766 || *base_qualifier != AARCH64_OPND_QLF_S_S
3767 || *offset_qualifier != AARCH64_OPND_QLF_X))
3768 {
3769 set_syntax_error (_("offset has different size from base"));
3770 return false;
3771 }
3772 }
3773 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3774 {
3775 set_syntax_error (_("invalid use of 64-bit register offset"));
3776 return false;
3777 }
3778 }
3779 else
3780 {
3781 /* [Xn,#:<reloc_op>:<symbol> */
3782 skip_past_char (&p, '#');
3783 if (skip_past_char (&p, ':'))
3784 {
3785 struct reloc_table_entry *entry;
3786
3787 /* Try to parse a relocation modifier. Anything else is
3788 an error. */
3789 if (!(entry = find_reloc_table_entry (&p)))
3790 {
3791 set_syntax_error (_("unknown relocation modifier"));
3792 return false;
3793 }
3794
3795 if (entry->ldst_type == 0)
3796 {
3797 set_syntax_error
3798 (_("this relocation modifier is not allowed on this "
3799 "instruction"));
3800 return false;
3801 }
3802
3803 /* [Xn,#:<reloc_op>: */
3804 /* We now have the group relocation table entry corresponding to
3805 the name in the assembler source. Next, we parse the
3806 expression. */
3807 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3808 {
3809 set_syntax_error (_("invalid relocation expression"));
3810 return false;
3811 }
3812
3813 /* [Xn,#:<reloc_op>:<expr> */
3814 /* Record the load/store relocation type. */
3815 inst.reloc.type = entry->ldst_type;
3816 inst.reloc.pc_rel = entry->pc_rel;
3817 }
3818 else
3819 {
3820 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3821 {
3822 set_syntax_error (_("invalid expression in the address"));
3823 return false;
3824 }
3825 /* [Xn,<expr> */
3826 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3827 /* [Xn,<expr>,<shifter> */
3828 if (! parse_shift (&p, operand, imm_shift_mode))
3829 return false;
3830 }
3831 }
3832 }
3833
3834 if (! skip_past_char (&p, ']'))
3835 {
3836 set_syntax_error (_("']' expected"));
3837 return false;
3838 }
3839
3840 if (skip_past_char (&p, '!'))
3841 {
3842 if (operand->addr.preind && operand->addr.offset.is_reg)
3843 {
3844 set_syntax_error (_("register offset not allowed in pre-indexed "
3845 "addressing mode"));
3846 return false;
3847 }
3848 /* [Xn]! */
3849 operand->addr.writeback = 1;
3850 }
3851 else if (skip_past_comma (&p))
3852 {
3853 /* [Xn], */
3854 operand->addr.postind = 1;
3855 operand->addr.writeback = 1;
3856
3857 if (operand->addr.preind)
3858 {
3859 set_syntax_error (_("cannot combine pre- and post-indexing"));
3860 return false;
3861 }
3862
3863 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3864 if (reg)
3865 {
3866 /* [Xn],Xm */
3867 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3868 {
3869 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3870 return false;
3871 }
3872
3873 operand->addr.offset.regno = reg->number;
3874 operand->addr.offset.is_reg = 1;
3875 }
3876 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3877 {
3878 /* [Xn],#expr */
3879 set_syntax_error (_("invalid expression in the address"));
3880 return false;
3881 }
3882 }
3883
3884 /* If at this point neither .preind nor .postind is set, we have a
3885 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3886 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3887 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3888 [Zn.<T>, xzr]. */
3889 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3890 {
3891 if (operand->addr.writeback)
3892 {
3893 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3894 {
3895 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3896 operand->addr.offset.is_reg = 0;
3897 operand->addr.offset.imm = 0;
3898 operand->addr.preind = 1;
3899 }
3900 else
3901 {
3902 /* Reject [Rn]! */
3903 set_syntax_error (_("missing offset in the pre-indexed address"));
3904 return false;
3905 }
3906 }
3907 else
3908 {
3909 operand->addr.preind = 1;
3910 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3911 {
3912 operand->addr.offset.is_reg = 1;
3913 operand->addr.offset.regno = REG_ZR;
3914 *offset_qualifier = AARCH64_OPND_QLF_X;
3915 }
3916 else
3917 {
3918 inst.reloc.exp.X_op = O_constant;
3919 inst.reloc.exp.X_add_number = 0;
3920 }
3921 }
3922 }
3923
3924 *str = p;
3925 return true;
3926 }
3927
3928 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3929 on success. */
3930 static bool
3931 parse_address (char **str, aarch64_opnd_info *operand)
3932 {
3933 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3934 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3935 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3936 }
3937
3938 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3939 The arguments have the same meaning as for parse_address_main.
3940 Return TRUE on success. */
3941 static bool
3942 parse_sve_address (char **str, aarch64_opnd_info *operand,
3943 aarch64_opnd_qualifier_t *base_qualifier,
3944 aarch64_opnd_qualifier_t *offset_qualifier)
3945 {
3946 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3947 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3948 SHIFTED_MUL_VL);
3949 }
3950
3951 /* Parse a register X0-X30. The register must be 64-bit and register 31
3952 is unallocated. */
3953 static bool
3954 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
3955 {
3956 const reg_entry *reg = parse_reg (str);
3957 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
3958 {
3959 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3960 return false;
3961 }
3962 operand->reg.regno = reg->number;
3963 operand->qualifier = AARCH64_OPND_QLF_X;
3964 return true;
3965 }
3966
3967 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3968 Return TRUE on success; otherwise return FALSE. */
3969 static bool
3970 parse_half (char **str, int *internal_fixup_p)
3971 {
3972 char *p = *str;
3973
3974 skip_past_char (&p, '#');
3975
3976 gas_assert (internal_fixup_p);
3977 *internal_fixup_p = 0;
3978
3979 if (*p == ':')
3980 {
3981 struct reloc_table_entry *entry;
3982
3983 /* Try to parse a relocation. Anything else is an error. */
3984 ++p;
3985
3986 if (!(entry = find_reloc_table_entry (&p)))
3987 {
3988 set_syntax_error (_("unknown relocation modifier"));
3989 return false;
3990 }
3991
3992 if (entry->movw_type == 0)
3993 {
3994 set_syntax_error
3995 (_("this relocation modifier is not allowed on this instruction"));
3996 return false;
3997 }
3998
3999 inst.reloc.type = entry->movw_type;
4000 }
4001 else
4002 *internal_fixup_p = 1;
4003
4004 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4005 return false;
4006
4007 *str = p;
4008 return true;
4009 }
4010
4011 /* Parse an operand for an ADRP instruction:
4012 ADRP <Xd>, <label>
4013 Return TRUE on success; otherwise return FALSE. */
4014
4015 static bool
4016 parse_adrp (char **str)
4017 {
4018 char *p;
4019
4020 p = *str;
4021 if (*p == ':')
4022 {
4023 struct reloc_table_entry *entry;
4024
4025 /* Try to parse a relocation. Anything else is an error. */
4026 ++p;
4027 if (!(entry = find_reloc_table_entry (&p)))
4028 {
4029 set_syntax_error (_("unknown relocation modifier"));
4030 return false;
4031 }
4032
4033 if (entry->adrp_type == 0)
4034 {
4035 set_syntax_error
4036 (_("this relocation modifier is not allowed on this instruction"));
4037 return false;
4038 }
4039
4040 inst.reloc.type = entry->adrp_type;
4041 }
4042 else
4043 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4044
4045 inst.reloc.pc_rel = 1;
4046 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4047 return false;
4048 *str = p;
4049 return true;
4050 }
4051
4052 /* Miscellaneous. */
4053
4054 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4055 of SIZE tokens in which index I gives the token for field value I,
4056 or is null if field value I is invalid. REG_TYPE says which register
4057 names should be treated as registers rather than as symbolic immediates.
4058
4059 Return true on success, moving *STR past the operand and storing the
4060 field value in *VAL. */
4061
4062 static int
4063 parse_enum_string (char **str, int64_t *val, const char *const *array,
4064 size_t size, aarch64_reg_type reg_type)
4065 {
4066 expressionS exp;
4067 char *p, *q;
4068 size_t i;
4069
4070 /* Match C-like tokens. */
4071 p = q = *str;
4072 while (ISALNUM (*q))
4073 q++;
4074
4075 for (i = 0; i < size; ++i)
4076 if (array[i]
4077 && strncasecmp (array[i], p, q - p) == 0
4078 && array[i][q - p] == 0)
4079 {
4080 *val = i;
4081 *str = q;
4082 return true;
4083 }
4084
4085 if (!parse_immediate_expression (&p, &exp, reg_type))
4086 return false;
4087
4088 if (exp.X_op == O_constant
4089 && (uint64_t) exp.X_add_number < size)
4090 {
4091 *val = exp.X_add_number;
4092 *str = p;
4093 return true;
4094 }
4095
4096 /* Use the default error for this operand. */
4097 return false;
4098 }
4099
4100 /* Parse an option for a preload instruction. Returns the encoding for the
4101 option, or PARSE_FAIL. */
4102
4103 static int
4104 parse_pldop (char **str)
4105 {
4106 char *p, *q;
4107 const struct aarch64_name_value_pair *o;
4108
4109 p = q = *str;
4110 while (ISALNUM (*q))
4111 q++;
4112
4113 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4114 if (!o)
4115 return PARSE_FAIL;
4116
4117 *str = q;
4118 return o->value;
4119 }
4120
4121 /* Parse an option for a barrier instruction. Returns the encoding for the
4122 option, or PARSE_FAIL. */
4123
4124 static int
4125 parse_barrier (char **str)
4126 {
4127 char *p, *q;
4128 const struct aarch64_name_value_pair *o;
4129
4130 p = q = *str;
4131 while (ISALPHA (*q))
4132 q++;
4133
4134 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4135 if (!o)
4136 return PARSE_FAIL;
4137
4138 *str = q;
4139 return o->value;
4140 }
4141
4142 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4143 return 0 if successful. Otherwise return PARSE_FAIL. */
4144
4145 static int
4146 parse_barrier_psb (char **str,
4147 const struct aarch64_name_value_pair ** hint_opt)
4148 {
4149 char *p, *q;
4150 const struct aarch64_name_value_pair *o;
4151
4152 p = q = *str;
4153 while (ISALPHA (*q))
4154 q++;
4155
4156 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4157 if (!o)
4158 {
4159 set_fatal_syntax_error
4160 ( _("unknown or missing option to PSB/TSB"));
4161 return PARSE_FAIL;
4162 }
4163
4164 if (o->value != 0x11)
4165 {
4166 /* PSB only accepts option name 'CSYNC'. */
4167 set_syntax_error
4168 (_("the specified option is not accepted for PSB/TSB"));
4169 return PARSE_FAIL;
4170 }
4171
4172 *str = q;
4173 *hint_opt = o;
4174 return 0;
4175 }
4176
4177 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4178 return 0 if successful. Otherwise return PARSE_FAIL. */
4179
4180 static int
4181 parse_bti_operand (char **str,
4182 const struct aarch64_name_value_pair ** hint_opt)
4183 {
4184 char *p, *q;
4185 const struct aarch64_name_value_pair *o;
4186
4187 p = q = *str;
4188 while (ISALPHA (*q))
4189 q++;
4190
4191 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4192 if (!o)
4193 {
4194 set_fatal_syntax_error
4195 ( _("unknown option to BTI"));
4196 return PARSE_FAIL;
4197 }
4198
4199 switch (o->value)
4200 {
4201 /* Valid BTI operands. */
4202 case HINT_OPD_C:
4203 case HINT_OPD_J:
4204 case HINT_OPD_JC:
4205 break;
4206
4207 default:
4208 set_syntax_error
4209 (_("unknown option to BTI"));
4210 return PARSE_FAIL;
4211 }
4212
4213 *str = q;
4214 *hint_opt = o;
4215 return 0;
4216 }
4217
4218 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4219 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4220 on failure. Format:
4221
4222 REG_TYPE.QUALIFIER
4223
4224 Side effect: Update STR with current parse position of success.
4225 */
4226
4227 static const reg_entry *
4228 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4229 aarch64_opnd_qualifier_t *qualifier)
4230 {
4231 char *q;
4232
4233 reg_entry *reg = parse_reg (str);
4234 if (reg != NULL && reg->type == reg_type)
4235 {
4236 if (!skip_past_char (str, '.'))
4237 {
4238 set_syntax_error (_("missing ZA tile element size separator"));
4239 return NULL;
4240 }
4241
4242 q = *str;
4243 switch (TOLOWER (*q))
4244 {
4245 case 'b':
4246 *qualifier = AARCH64_OPND_QLF_S_B;
4247 break;
4248 case 'h':
4249 *qualifier = AARCH64_OPND_QLF_S_H;
4250 break;
4251 case 's':
4252 *qualifier = AARCH64_OPND_QLF_S_S;
4253 break;
4254 case 'd':
4255 *qualifier = AARCH64_OPND_QLF_S_D;
4256 break;
4257 case 'q':
4258 *qualifier = AARCH64_OPND_QLF_S_Q;
4259 break;
4260 default:
4261 return NULL;
4262 }
4263 q++;
4264
4265 *str = q;
4266 return reg;
4267 }
4268
4269 return NULL;
4270 }
4271
4272 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4273 Function return tile QUALIFIER on success.
4274
4275 Tiles are in example format: za[0-9]\.[bhsd]
4276
4277 Function returns <ZAda> register number or PARSE_FAIL.
4278 */
4279 static int
4280 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4281 {
4282 int regno;
4283 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4284
4285 if (reg == NULL)
4286 return PARSE_FAIL;
4287 regno = reg->number;
4288
4289 switch (*qualifier)
4290 {
4291 case AARCH64_OPND_QLF_S_B:
4292 if (regno != 0x00)
4293 {
4294 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4295 return PARSE_FAIL;
4296 }
4297 break;
4298 case AARCH64_OPND_QLF_S_H:
4299 if (regno > 0x01)
4300 {
4301 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4302 return PARSE_FAIL;
4303 }
4304 break;
4305 case AARCH64_OPND_QLF_S_S:
4306 if (regno > 0x03)
4307 {
4308 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4309 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4310 return PARSE_FAIL;
4311 }
4312 break;
4313 case AARCH64_OPND_QLF_S_D:
4314 if (regno > 0x07)
4315 {
4316 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4317 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4318 return PARSE_FAIL;
4319 }
4320 break;
4321 default:
4322 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4323 return PARSE_FAIL;
4324 }
4325
4326 return regno;
4327 }
4328
4329 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4330
4331 #<imm>
4332 <imm>
4333
4334 Function return TRUE if immediate was found, or FALSE.
4335 */
4336 static bool
4337 parse_sme_immediate (char **str, int64_t *imm)
4338 {
4339 int64_t val;
4340 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4341 return false;
4342
4343 *imm = val;
4344 return true;
4345 }
4346
4347 /* Parse index with vector select register and immediate:
4348
4349 [<Wv>, <imm>]
4350 [<Wv>, #<imm>]
4351 where <Wv> is in W12-W15 range and # is optional for immediate.
4352
4353 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4354 is set to true.
4355
4356 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4357 IMM output.
4358 */
4359 static bool
4360 parse_sme_za_hv_tiles_operand_index (char **str,
4361 int *vector_select_register,
4362 int64_t *imm)
4363 {
4364 const reg_entry *reg;
4365
4366 if (!skip_past_char (str, '['))
4367 {
4368 set_syntax_error (_("expected '['"));
4369 return false;
4370 }
4371
4372 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4373 reg = parse_reg (str);
4374 if (reg == NULL || reg->type != REG_TYPE_R_32
4375 || reg->number < 12 || reg->number > 15)
4376 {
4377 set_syntax_error (_("expected vector select register W12-W15"));
4378 return false;
4379 }
4380 *vector_select_register = reg->number;
4381
4382 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4383 {
4384 set_syntax_error (_("expected ','"));
4385 return false;
4386 }
4387
4388 if (!parse_sme_immediate (str, imm))
4389 {
4390 set_syntax_error (_("index offset immediate expected"));
4391 return false;
4392 }
4393
4394 if (!skip_past_char (str, ']'))
4395 {
4396 set_syntax_error (_("expected ']'"));
4397 return false;
4398 }
4399
4400 return true;
4401 }
4402
4403 /* Parse SME ZA horizontal or vertical vector access to tiles.
4404 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4405 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4406 contains <Wv> select register and corresponding optional IMMEDIATE.
4407 In addition QUALIFIER is extracted.
4408
4409 Field format examples:
4410
4411 ZA0<HV>.B[<Wv>, #<imm>]
4412 <ZAn><HV>.H[<Wv>, #<imm>]
4413 <ZAn><HV>.S[<Wv>, #<imm>]
4414 <ZAn><HV>.D[<Wv>, #<imm>]
4415 <ZAn><HV>.Q[<Wv>, #<imm>]
4416
4417 Function returns <ZAda> register number or PARSE_FAIL.
4418 */
4419 static int
4420 parse_sme_za_hv_tiles_operand (char **str,
4421 enum sme_hv_slice *slice_indicator,
4422 int *vector_select_register,
4423 int *imm,
4424 aarch64_opnd_qualifier_t *qualifier)
4425 {
4426 char *qh, *qv;
4427 int regno;
4428 int regno_limit;
4429 int64_t imm_limit;
4430 int64_t imm_value;
4431 const reg_entry *reg;
4432
4433 qh = qv = *str;
4434 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4435 {
4436 *slice_indicator = HV_horizontal;
4437 *str = qh;
4438 }
4439 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4440 {
4441 *slice_indicator = HV_vertical;
4442 *str = qv;
4443 }
4444 else
4445 return PARSE_FAIL;
4446 regno = reg->number;
4447
4448 switch (*qualifier)
4449 {
4450 case AARCH64_OPND_QLF_S_B:
4451 regno_limit = 0;
4452 imm_limit = 15;
4453 break;
4454 case AARCH64_OPND_QLF_S_H:
4455 regno_limit = 1;
4456 imm_limit = 7;
4457 break;
4458 case AARCH64_OPND_QLF_S_S:
4459 regno_limit = 3;
4460 imm_limit = 3;
4461 break;
4462 case AARCH64_OPND_QLF_S_D:
4463 regno_limit = 7;
4464 imm_limit = 1;
4465 break;
4466 case AARCH64_OPND_QLF_S_Q:
4467 regno_limit = 15;
4468 imm_limit = 0;
4469 break;
4470 default:
4471 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4472 return PARSE_FAIL;
4473 }
4474
4475 /* Check if destination register ZA tile vector is in range for given
4476 instruction variant. */
4477 if (regno < 0 || regno > regno_limit)
4478 {
4479 set_syntax_error (_("ZA tile vector out of range"));
4480 return PARSE_FAIL;
4481 }
4482
4483 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4484 &imm_value))
4485 return PARSE_FAIL;
4486
4487 /* Check if optional index offset is in the range for instruction
4488 variant. */
4489 if (imm_value < 0 || imm_value > imm_limit)
4490 {
4491 set_syntax_error (_("index offset out of range"));
4492 return PARSE_FAIL;
4493 }
4494
4495 *imm = imm_value;
4496
4497 return regno;
4498 }
4499
4500
4501 static int
4502 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4503 enum sme_hv_slice *slice_indicator,
4504 int *vector_select_register,
4505 int *imm,
4506 aarch64_opnd_qualifier_t *qualifier)
4507 {
4508 int regno;
4509
4510 if (!skip_past_char (str, '{'))
4511 {
4512 set_syntax_error (_("expected '{'"));
4513 return PARSE_FAIL;
4514 }
4515
4516 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4517 vector_select_register, imm,
4518 qualifier);
4519
4520 if (regno == PARSE_FAIL)
4521 return PARSE_FAIL;
4522
4523 if (!skip_past_char (str, '}'))
4524 {
4525 set_syntax_error (_("expected '}'"));
4526 return PARSE_FAIL;
4527 }
4528
4529 return regno;
4530 }
4531
4532 /* Parse list of up to eight 64-bit element tile names separated by commas in
4533 SME's ZERO instruction:
4534
4535 ZERO { <mask> }
4536
4537 Function returns <mask>:
4538
4539 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4540 */
4541 static int
4542 parse_sme_zero_mask(char **str)
4543 {
4544 char *q;
4545 int mask;
4546 aarch64_opnd_qualifier_t qualifier;
4547
4548 mask = 0x00;
4549 q = *str;
4550 do
4551 {
4552 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4553 if (reg)
4554 {
4555 int regno = reg->number;
4556 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4557 {
4558 /* { ZA0.B } is assembled as all-ones immediate. */
4559 mask = 0xff;
4560 }
4561 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4562 mask |= 0x55 << regno;
4563 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4564 mask |= 0x11 << regno;
4565 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4566 mask |= 0x01 << regno;
4567 else
4568 {
4569 set_syntax_error (_("wrong ZA tile element format"));
4570 return PARSE_FAIL;
4571 }
4572 continue;
4573 }
4574 else if (strncasecmp (q, "za", 2) == 0
4575 && !ISALNUM (q[2]))
4576 {
4577 /* { ZA } is assembled as all-ones immediate. */
4578 mask = 0xff;
4579 q += 2;
4580 continue;
4581 }
4582 else
4583 {
4584 set_syntax_error (_("wrong ZA tile element format"));
4585 return PARSE_FAIL;
4586 }
4587 }
4588 while (skip_past_char (&q, ','));
4589
4590 *str = q;
4591 return mask;
4592 }
4593
4594 /* Wraps in curly braces <mask> operand ZERO instruction:
4595
4596 ZERO { <mask> }
4597
4598 Function returns value of <mask> bit-field.
4599 */
4600 static int
4601 parse_sme_list_of_64bit_tiles (char **str)
4602 {
4603 int regno;
4604
4605 if (!skip_past_char (str, '{'))
4606 {
4607 set_syntax_error (_("expected '{'"));
4608 return PARSE_FAIL;
4609 }
4610
4611 /* Empty <mask> list is an all-zeros immediate. */
4612 if (!skip_past_char (str, '}'))
4613 {
4614 regno = parse_sme_zero_mask (str);
4615 if (regno == PARSE_FAIL)
4616 return PARSE_FAIL;
4617
4618 if (!skip_past_char (str, '}'))
4619 {
4620 set_syntax_error (_("expected '}'"));
4621 return PARSE_FAIL;
4622 }
4623 }
4624 else
4625 regno = 0x00;
4626
4627 return regno;
4628 }
4629
4630 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4631 Operand format:
4632
4633 ZA[<Wv>, <imm>]
4634 ZA[<Wv>, #<imm>]
4635
4636 Function returns <Wv> or PARSE_FAIL.
4637 */
4638 static int
4639 parse_sme_za_array (char **str, int *imm)
4640 {
4641 char *p, *q;
4642 int regno;
4643 int64_t imm_value;
4644
4645 p = q = *str;
4646 while (ISALPHA (*q))
4647 q++;
4648
4649 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4650 {
4651 set_syntax_error (_("expected ZA array"));
4652 return PARSE_FAIL;
4653 }
4654
4655 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4656 return PARSE_FAIL;
4657
4658 if (imm_value < 0 || imm_value > 15)
4659 {
4660 set_syntax_error (_("offset out of range"));
4661 return PARSE_FAIL;
4662 }
4663
4664 *imm = imm_value;
4665 *str = q;
4666 return regno;
4667 }
4668
4669 /* Parse streaming mode operand for SMSTART and SMSTOP.
4670
4671 {SM | ZA}
4672
4673 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4674 */
4675 static int
4676 parse_sme_sm_za (char **str)
4677 {
4678 char *p, *q;
4679
4680 p = q = *str;
4681 while (ISALPHA (*q))
4682 q++;
4683
4684 if ((q - p != 2)
4685 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4686 {
4687 set_syntax_error (_("expected SM or ZA operand"));
4688 return PARSE_FAIL;
4689 }
4690
4691 *str = q;
4692 return TOLOWER (p[0]);
4693 }
4694
4695 /* Parse the name of the source scalable predicate register, the index base
4696 register W12-W15 and the element index. Function performs element index
4697 limit checks as well as qualifier type checks.
4698
4699 <Pn>.<T>[<Wv>, <imm>]
4700 <Pn>.<T>[<Wv>, #<imm>]
4701
4702 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4703 <imm> to IMM.
4704 Function returns <Pn>, or PARSE_FAIL.
4705 */
4706 static int
4707 parse_sme_pred_reg_with_index(char **str,
4708 int *index_base_reg,
4709 int *imm,
4710 aarch64_opnd_qualifier_t *qualifier)
4711 {
4712 int regno;
4713 int64_t imm_limit;
4714 int64_t imm_value;
4715 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4716
4717 if (reg == NULL)
4718 return PARSE_FAIL;
4719 regno = reg->number;
4720
4721 switch (*qualifier)
4722 {
4723 case AARCH64_OPND_QLF_S_B:
4724 imm_limit = 15;
4725 break;
4726 case AARCH64_OPND_QLF_S_H:
4727 imm_limit = 7;
4728 break;
4729 case AARCH64_OPND_QLF_S_S:
4730 imm_limit = 3;
4731 break;
4732 case AARCH64_OPND_QLF_S_D:
4733 imm_limit = 1;
4734 break;
4735 default:
4736 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4737 return PARSE_FAIL;
4738 }
4739
4740 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4741 return PARSE_FAIL;
4742
4743 if (imm_value < 0 || imm_value > imm_limit)
4744 {
4745 set_syntax_error (_("element index out of range for given variant"));
4746 return PARSE_FAIL;
4747 }
4748
4749 *imm = imm_value;
4750
4751 return regno;
4752 }
4753
4754 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4755 Returns the encoding for the option, or PARSE_FAIL.
4756
4757 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4758 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4759
4760 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4761 field, otherwise as a system register.
4762 */
4763
4764 static int
4765 parse_sys_reg (char **str, htab_t sys_regs,
4766 int imple_defined_p, int pstatefield_p,
4767 uint32_t* flags)
4768 {
4769 char *p, *q;
4770 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4771 const aarch64_sys_reg *o;
4772 int value;
4773
4774 p = buf;
4775 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4776 if (p < buf + (sizeof (buf) - 1))
4777 *p++ = TOLOWER (*q);
4778 *p = '\0';
4779
4780 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4781 valid system register. This is enforced by construction of the hash
4782 table. */
4783 if (p - buf != q - *str)
4784 return PARSE_FAIL;
4785
4786 o = str_hash_find (sys_regs, buf);
4787 if (!o)
4788 {
4789 if (!imple_defined_p)
4790 return PARSE_FAIL;
4791 else
4792 {
4793 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4794 unsigned int op0, op1, cn, cm, op2;
4795
4796 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4797 != 5)
4798 return PARSE_FAIL;
4799 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4800 return PARSE_FAIL;
4801 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4802 if (flags)
4803 *flags = 0;
4804 }
4805 }
4806 else
4807 {
4808 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4809 as_bad (_("selected processor does not support PSTATE field "
4810 "name '%s'"), buf);
4811 if (!pstatefield_p
4812 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4813 o->value, o->flags, o->features))
4814 as_bad (_("selected processor does not support system register "
4815 "name '%s'"), buf);
4816 if (aarch64_sys_reg_deprecated_p (o->flags))
4817 as_warn (_("system register name '%s' is deprecated and may be "
4818 "removed in a future release"), buf);
4819 value = o->value;
4820 if (flags)
4821 *flags = o->flags;
4822 }
4823
4824 *str = q;
4825 return value;
4826 }
4827
4828 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4829 for the option, or NULL. */
4830
4831 static const aarch64_sys_ins_reg *
4832 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4833 {
4834 char *p, *q;
4835 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4836 const aarch64_sys_ins_reg *o;
4837
4838 p = buf;
4839 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4840 if (p < buf + (sizeof (buf) - 1))
4841 *p++ = TOLOWER (*q);
4842 *p = '\0';
4843
4844 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4845 valid system register. This is enforced by construction of the hash
4846 table. */
4847 if (p - buf != q - *str)
4848 return NULL;
4849
4850 o = str_hash_find (sys_ins_regs, buf);
4851 if (!o)
4852 return NULL;
4853
4854 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4855 o->name, o->value, o->flags, 0))
4856 as_bad (_("selected processor does not support system register "
4857 "name '%s'"), buf);
4858 if (aarch64_sys_reg_deprecated_p (o->flags))
4859 as_warn (_("system register name '%s' is deprecated and may be "
4860 "removed in a future release"), buf);
4861
4862 *str = q;
4863 return o;
4864 }
4865 \f
4866 #define po_char_or_fail(chr) do { \
4867 if (! skip_past_char (&str, chr)) \
4868 goto failure; \
4869 } while (0)
4870
4871 #define po_reg_or_fail(regtype) do { \
4872 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4873 if (val == PARSE_FAIL) \
4874 { \
4875 set_default_error (); \
4876 goto failure; \
4877 } \
4878 } while (0)
4879
4880 #define po_int_reg_or_fail(reg_type) do { \
4881 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4882 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4883 { \
4884 set_default_error (); \
4885 goto failure; \
4886 } \
4887 info->reg.regno = reg->number; \
4888 info->qualifier = qualifier; \
4889 } while (0)
4890
4891 #define po_imm_nc_or_fail() do { \
4892 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4893 goto failure; \
4894 } while (0)
4895
4896 #define po_imm_or_fail(min, max) do { \
4897 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4898 goto failure; \
4899 if (val < min || val > max) \
4900 { \
4901 set_fatal_syntax_error (_("immediate value out of range "\
4902 #min " to "#max)); \
4903 goto failure; \
4904 } \
4905 } while (0)
4906
4907 #define po_enum_or_fail(array) do { \
4908 if (!parse_enum_string (&str, &val, array, \
4909 ARRAY_SIZE (array), imm_reg_type)) \
4910 goto failure; \
4911 } while (0)
4912
4913 #define po_misc_or_fail(expr) do { \
4914 if (!expr) \
4915 goto failure; \
4916 } while (0)
4917 \f
4918 /* encode the 12-bit imm field of Add/sub immediate */
4919 static inline uint32_t
4920 encode_addsub_imm (uint32_t imm)
4921 {
4922 return imm << 10;
4923 }
4924
4925 /* encode the shift amount field of Add/sub immediate */
4926 static inline uint32_t
4927 encode_addsub_imm_shift_amount (uint32_t cnt)
4928 {
4929 return cnt << 22;
4930 }
4931
4932
4933 /* encode the imm field of Adr instruction */
4934 static inline uint32_t
4935 encode_adr_imm (uint32_t imm)
4936 {
4937 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4938 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4939 }
4940
4941 /* encode the immediate field of Move wide immediate */
4942 static inline uint32_t
4943 encode_movw_imm (uint32_t imm)
4944 {
4945 return imm << 5;
4946 }
4947
4948 /* encode the 26-bit offset of unconditional branch */
4949 static inline uint32_t
4950 encode_branch_ofs_26 (uint32_t ofs)
4951 {
4952 return ofs & ((1 << 26) - 1);
4953 }
4954
4955 /* encode the 19-bit offset of conditional branch and compare & branch */
4956 static inline uint32_t
4957 encode_cond_branch_ofs_19 (uint32_t ofs)
4958 {
4959 return (ofs & ((1 << 19) - 1)) << 5;
4960 }
4961
4962 /* encode the 19-bit offset of ld literal */
4963 static inline uint32_t
4964 encode_ld_lit_ofs_19 (uint32_t ofs)
4965 {
4966 return (ofs & ((1 << 19) - 1)) << 5;
4967 }
4968
4969 /* Encode the 14-bit offset of test & branch. */
4970 static inline uint32_t
4971 encode_tst_branch_ofs_14 (uint32_t ofs)
4972 {
4973 return (ofs & ((1 << 14) - 1)) << 5;
4974 }
4975
4976 /* Encode the 16-bit imm field of svc/hvc/smc. */
4977 static inline uint32_t
4978 encode_svc_imm (uint32_t imm)
4979 {
4980 return imm << 5;
4981 }
4982
4983 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4984 static inline uint32_t
4985 reencode_addsub_switch_add_sub (uint32_t opcode)
4986 {
4987 return opcode ^ (1 << 30);
4988 }
4989
4990 static inline uint32_t
4991 reencode_movzn_to_movz (uint32_t opcode)
4992 {
4993 return opcode | (1 << 30);
4994 }
4995
4996 static inline uint32_t
4997 reencode_movzn_to_movn (uint32_t opcode)
4998 {
4999 return opcode & ~(1 << 30);
5000 }
5001
5002 /* Overall per-instruction processing. */
5003
5004 /* We need to be able to fix up arbitrary expressions in some statements.
5005 This is so that we can handle symbols that are an arbitrary distance from
5006 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5007 which returns part of an address in a form which will be valid for
5008 a data instruction. We do this by pushing the expression into a symbol
5009 in the expr_section, and creating a fix for that. */
5010
5011 static fixS *
5012 fix_new_aarch64 (fragS * frag,
5013 int where,
5014 short int size,
5015 expressionS * exp,
5016 int pc_rel,
5017 int reloc)
5018 {
5019 fixS *new_fix;
5020
5021 switch (exp->X_op)
5022 {
5023 case O_constant:
5024 case O_symbol:
5025 case O_add:
5026 case O_subtract:
5027 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5028 break;
5029
5030 default:
5031 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5032 pc_rel, reloc);
5033 break;
5034 }
5035 return new_fix;
5036 }
5037 \f
5038 /* Diagnostics on operands errors. */
5039
5040 /* By default, output verbose error message.
5041 Disable the verbose error message by -mno-verbose-error. */
5042 static int verbose_error_p = 1;
5043
5044 #ifdef DEBUG_AARCH64
5045 /* N.B. this is only for the purpose of debugging. */
5046 const char* operand_mismatch_kind_names[] =
5047 {
5048 "AARCH64_OPDE_NIL",
5049 "AARCH64_OPDE_RECOVERABLE",
5050 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5051 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5052 "AARCH64_OPDE_SYNTAX_ERROR",
5053 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5054 "AARCH64_OPDE_INVALID_VARIANT",
5055 "AARCH64_OPDE_OUT_OF_RANGE",
5056 "AARCH64_OPDE_UNALIGNED",
5057 "AARCH64_OPDE_REG_LIST",
5058 "AARCH64_OPDE_OTHER_ERROR",
5059 };
5060 #endif /* DEBUG_AARCH64 */
5061
5062 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5063
5064 When multiple errors of different kinds are found in the same assembly
5065 line, only the error of the highest severity will be picked up for
5066 issuing the diagnostics. */
5067
5068 static inline bool
5069 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5070 enum aarch64_operand_error_kind rhs)
5071 {
5072 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5073 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5074 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5075 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5076 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5077 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5078 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5079 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5080 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5081 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5082 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5083 return lhs > rhs;
5084 }
5085
5086 /* Helper routine to get the mnemonic name from the assembly instruction
5087 line; should only be called for the diagnosis purpose, as there is
5088 string copy operation involved, which may affect the runtime
5089 performance if used in elsewhere. */
5090
5091 static const char*
5092 get_mnemonic_name (const char *str)
5093 {
5094 static char mnemonic[32];
5095 char *ptr;
5096
5097 /* Get the first 15 bytes and assume that the full name is included. */
5098 strncpy (mnemonic, str, 31);
5099 mnemonic[31] = '\0';
5100
5101 /* Scan up to the end of the mnemonic, which must end in white space,
5102 '.', or end of string. */
5103 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5104 ;
5105
5106 *ptr = '\0';
5107
5108 /* Append '...' to the truncated long name. */
5109 if (ptr - mnemonic == 31)
5110 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5111
5112 return mnemonic;
5113 }
5114
5115 static void
5116 reset_aarch64_instruction (aarch64_instruction *instruction)
5117 {
5118 memset (instruction, '\0', sizeof (aarch64_instruction));
5119 instruction->reloc.type = BFD_RELOC_UNUSED;
5120 }
5121
5122 /* Data structures storing one user error in the assembly code related to
5123 operands. */
5124
5125 struct operand_error_record
5126 {
5127 const aarch64_opcode *opcode;
5128 aarch64_operand_error detail;
5129 struct operand_error_record *next;
5130 };
5131
5132 typedef struct operand_error_record operand_error_record;
5133
5134 struct operand_errors
5135 {
5136 operand_error_record *head;
5137 operand_error_record *tail;
5138 };
5139
5140 typedef struct operand_errors operand_errors;
5141
5142 /* Top-level data structure reporting user errors for the current line of
5143 the assembly code.
5144 The way md_assemble works is that all opcodes sharing the same mnemonic
5145 name are iterated to find a match to the assembly line. In this data
5146 structure, each of the such opcodes will have one operand_error_record
5147 allocated and inserted. In other words, excessive errors related with
5148 a single opcode are disregarded. */
5149 operand_errors operand_error_report;
5150
5151 /* Free record nodes. */
5152 static operand_error_record *free_opnd_error_record_nodes = NULL;
5153
5154 /* Initialize the data structure that stores the operand mismatch
5155 information on assembling one line of the assembly code. */
5156 static void
5157 init_operand_error_report (void)
5158 {
5159 if (operand_error_report.head != NULL)
5160 {
5161 gas_assert (operand_error_report.tail != NULL);
5162 operand_error_report.tail->next = free_opnd_error_record_nodes;
5163 free_opnd_error_record_nodes = operand_error_report.head;
5164 operand_error_report.head = NULL;
5165 operand_error_report.tail = NULL;
5166 return;
5167 }
5168 gas_assert (operand_error_report.tail == NULL);
5169 }
5170
5171 /* Return TRUE if some operand error has been recorded during the
5172 parsing of the current assembly line using the opcode *OPCODE;
5173 otherwise return FALSE. */
5174 static inline bool
5175 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5176 {
5177 operand_error_record *record = operand_error_report.head;
5178 return record && record->opcode == opcode;
5179 }
5180
5181 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5182 OPCODE field is initialized with OPCODE.
5183 N.B. only one record for each opcode, i.e. the maximum of one error is
5184 recorded for each instruction template. */
5185
5186 static void
5187 add_operand_error_record (const operand_error_record* new_record)
5188 {
5189 const aarch64_opcode *opcode = new_record->opcode;
5190 operand_error_record* record = operand_error_report.head;
5191
5192 /* The record may have been created for this opcode. If not, we need
5193 to prepare one. */
5194 if (! opcode_has_operand_error_p (opcode))
5195 {
5196 /* Get one empty record. */
5197 if (free_opnd_error_record_nodes == NULL)
5198 {
5199 record = XNEW (operand_error_record);
5200 }
5201 else
5202 {
5203 record = free_opnd_error_record_nodes;
5204 free_opnd_error_record_nodes = record->next;
5205 }
5206 record->opcode = opcode;
5207 /* Insert at the head. */
5208 record->next = operand_error_report.head;
5209 operand_error_report.head = record;
5210 if (operand_error_report.tail == NULL)
5211 operand_error_report.tail = record;
5212 }
5213 else if (record->detail.kind != AARCH64_OPDE_NIL
5214 && record->detail.index <= new_record->detail.index
5215 && operand_error_higher_severity_p (record->detail.kind,
5216 new_record->detail.kind))
5217 {
5218 /* In the case of multiple errors found on operands related with a
5219 single opcode, only record the error of the leftmost operand and
5220 only if the error is of higher severity. */
5221 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5222 " the existing error %s on operand %d",
5223 operand_mismatch_kind_names[new_record->detail.kind],
5224 new_record->detail.index,
5225 operand_mismatch_kind_names[record->detail.kind],
5226 record->detail.index);
5227 return;
5228 }
5229
5230 record->detail = new_record->detail;
5231 }
5232
5233 static inline void
5234 record_operand_error_info (const aarch64_opcode *opcode,
5235 aarch64_operand_error *error_info)
5236 {
5237 operand_error_record record;
5238 record.opcode = opcode;
5239 record.detail = *error_info;
5240 add_operand_error_record (&record);
5241 }
5242
5243 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5244 error message *ERROR, for operand IDX (count from 0). */
5245
5246 static void
5247 record_operand_error (const aarch64_opcode *opcode, int idx,
5248 enum aarch64_operand_error_kind kind,
5249 const char* error)
5250 {
5251 aarch64_operand_error info;
5252 memset(&info, 0, sizeof (info));
5253 info.index = idx;
5254 info.kind = kind;
5255 info.error = error;
5256 info.non_fatal = false;
5257 record_operand_error_info (opcode, &info);
5258 }
5259
5260 static void
5261 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5262 enum aarch64_operand_error_kind kind,
5263 const char* error, const int *extra_data)
5264 {
5265 aarch64_operand_error info;
5266 info.index = idx;
5267 info.kind = kind;
5268 info.error = error;
5269 info.data[0].i = extra_data[0];
5270 info.data[1].i = extra_data[1];
5271 info.data[2].i = extra_data[2];
5272 info.non_fatal = false;
5273 record_operand_error_info (opcode, &info);
5274 }
5275
5276 static void
5277 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5278 const char* error, int lower_bound,
5279 int upper_bound)
5280 {
5281 int data[3] = {lower_bound, upper_bound, 0};
5282 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5283 error, data);
5284 }
5285
5286 /* Remove the operand error record for *OPCODE. */
5287 static void ATTRIBUTE_UNUSED
5288 remove_operand_error_record (const aarch64_opcode *opcode)
5289 {
5290 if (opcode_has_operand_error_p (opcode))
5291 {
5292 operand_error_record* record = operand_error_report.head;
5293 gas_assert (record != NULL && operand_error_report.tail != NULL);
5294 operand_error_report.head = record->next;
5295 record->next = free_opnd_error_record_nodes;
5296 free_opnd_error_record_nodes = record;
5297 if (operand_error_report.head == NULL)
5298 {
5299 gas_assert (operand_error_report.tail == record);
5300 operand_error_report.tail = NULL;
5301 }
5302 }
5303 }
5304
5305 /* Given the instruction in *INSTR, return the index of the best matched
5306 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5307
5308 Return -1 if there is no qualifier sequence; return the first match
5309 if there is multiple matches found. */
5310
5311 static int
5312 find_best_match (const aarch64_inst *instr,
5313 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5314 {
5315 int i, num_opnds, max_num_matched, idx;
5316
5317 num_opnds = aarch64_num_of_operands (instr->opcode);
5318 if (num_opnds == 0)
5319 {
5320 DEBUG_TRACE ("no operand");
5321 return -1;
5322 }
5323
5324 max_num_matched = 0;
5325 idx = 0;
5326
5327 /* For each pattern. */
5328 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5329 {
5330 int j, num_matched;
5331 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5332
5333 /* Most opcodes has much fewer patterns in the list. */
5334 if (empty_qualifier_sequence_p (qualifiers))
5335 {
5336 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5337 break;
5338 }
5339
5340 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5341 if (*qualifiers == instr->operands[j].qualifier)
5342 ++num_matched;
5343
5344 if (num_matched > max_num_matched)
5345 {
5346 max_num_matched = num_matched;
5347 idx = i;
5348 }
5349 }
5350
5351 DEBUG_TRACE ("return with %d", idx);
5352 return idx;
5353 }
5354
5355 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5356 corresponding operands in *INSTR. */
5357
5358 static inline void
5359 assign_qualifier_sequence (aarch64_inst *instr,
5360 const aarch64_opnd_qualifier_t *qualifiers)
5361 {
5362 int i = 0;
5363 int num_opnds = aarch64_num_of_operands (instr->opcode);
5364 gas_assert (num_opnds);
5365 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5366 instr->operands[i].qualifier = *qualifiers;
5367 }
5368
5369 /* Callback used by aarch64_print_operand to apply STYLE to the
5370 disassembler output created from FMT and ARGS. The STYLER object holds
5371 any required state. Must return a pointer to a string (created from FMT
5372 and ARGS) that will continue to be valid until the complete disassembled
5373 instruction has been printed.
5374
5375 We don't currently add any styling to the output of the disassembler as
5376 used within assembler error messages, and so STYLE is ignored here. A
5377 new string is allocated on the obstack help within STYLER and returned
5378 to the caller. */
5379
5380 static const char *aarch64_apply_style
5381 (struct aarch64_styler *styler,
5382 enum disassembler_style style ATTRIBUTE_UNUSED,
5383 const char *fmt, va_list args)
5384 {
5385 int res;
5386 char *ptr;
5387 struct obstack *stack = (struct obstack *) styler->state;
5388 va_list ap;
5389
5390 /* Calculate the required space. */
5391 va_copy (ap, args);
5392 res = vsnprintf (NULL, 0, fmt, ap);
5393 va_end (ap);
5394 gas_assert (res >= 0);
5395
5396 /* Allocate space on the obstack and format the result. */
5397 ptr = (char *) obstack_alloc (stack, res + 1);
5398 res = vsnprintf (ptr, (res + 1), fmt, args);
5399 gas_assert (res >= 0);
5400
5401 return ptr;
5402 }
5403
5404 /* Print operands for the diagnosis purpose. */
5405
5406 static void
5407 print_operands (char *buf, const aarch64_opcode *opcode,
5408 const aarch64_opnd_info *opnds)
5409 {
5410 int i;
5411 struct aarch64_styler styler;
5412 struct obstack content;
5413 obstack_init (&content);
5414
5415 styler.apply_style = aarch64_apply_style;
5416 styler.state = (void *) &content;
5417
5418 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5419 {
5420 char str[128];
5421 char cmt[128];
5422
5423 /* We regard the opcode operand info more, however we also look into
5424 the inst->operands to support the disassembling of the optional
5425 operand.
5426 The two operand code should be the same in all cases, apart from
5427 when the operand can be optional. */
5428 if (opcode->operands[i] == AARCH64_OPND_NIL
5429 || opnds[i].type == AARCH64_OPND_NIL)
5430 break;
5431
5432 /* Generate the operand string in STR. */
5433 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5434 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5435
5436 /* Delimiter. */
5437 if (str[0] != '\0')
5438 strcat (buf, i == 0 ? " " : ", ");
5439
5440 /* Append the operand string. */
5441 strcat (buf, str);
5442
5443 /* Append a comment. This works because only the last operand ever
5444 adds a comment. If that ever changes then we'll need to be
5445 smarter here. */
5446 if (cmt[0] != '\0')
5447 {
5448 strcat (buf, "\t// ");
5449 strcat (buf, cmt);
5450 }
5451 }
5452
5453 obstack_free (&content, NULL);
5454 }
5455
5456 /* Send to stderr a string as information. */
5457
5458 static void
5459 output_info (const char *format, ...)
5460 {
5461 const char *file;
5462 unsigned int line;
5463 va_list args;
5464
5465 file = as_where (&line);
5466 if (file)
5467 {
5468 if (line != 0)
5469 fprintf (stderr, "%s:%u: ", file, line);
5470 else
5471 fprintf (stderr, "%s: ", file);
5472 }
5473 fprintf (stderr, _("Info: "));
5474 va_start (args, format);
5475 vfprintf (stderr, format, args);
5476 va_end (args);
5477 (void) putc ('\n', stderr);
5478 }
5479
5480 /* Output one operand error record. */
5481
5482 static void
5483 output_operand_error_record (const operand_error_record *record, char *str)
5484 {
5485 const aarch64_operand_error *detail = &record->detail;
5486 int idx = detail->index;
5487 const aarch64_opcode *opcode = record->opcode;
5488 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5489 : AARCH64_OPND_NIL);
5490
5491 typedef void (*handler_t)(const char *format, ...);
5492 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5493
5494 switch (detail->kind)
5495 {
5496 case AARCH64_OPDE_NIL:
5497 gas_assert (0);
5498 break;
5499
5500 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5501 handler (_("this `%s' should have an immediately preceding `%s'"
5502 " -- `%s'"),
5503 detail->data[0].s, detail->data[1].s, str);
5504 break;
5505
5506 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5507 handler (_("the preceding `%s' should be followed by `%s` rather"
5508 " than `%s` -- `%s'"),
5509 detail->data[1].s, detail->data[0].s, opcode->name, str);
5510 break;
5511
5512 case AARCH64_OPDE_SYNTAX_ERROR:
5513 case AARCH64_OPDE_RECOVERABLE:
5514 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5515 case AARCH64_OPDE_OTHER_ERROR:
5516 /* Use the prepared error message if there is, otherwise use the
5517 operand description string to describe the error. */
5518 if (detail->error != NULL)
5519 {
5520 if (idx < 0)
5521 handler (_("%s -- `%s'"), detail->error, str);
5522 else
5523 handler (_("%s at operand %d -- `%s'"),
5524 detail->error, idx + 1, str);
5525 }
5526 else
5527 {
5528 gas_assert (idx >= 0);
5529 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5530 aarch64_get_operand_desc (opd_code), str);
5531 }
5532 break;
5533
5534 case AARCH64_OPDE_INVALID_VARIANT:
5535 handler (_("operand mismatch -- `%s'"), str);
5536 if (verbose_error_p)
5537 {
5538 /* We will try to correct the erroneous instruction and also provide
5539 more information e.g. all other valid variants.
5540
5541 The string representation of the corrected instruction and other
5542 valid variants are generated by
5543
5544 1) obtaining the intermediate representation of the erroneous
5545 instruction;
5546 2) manipulating the IR, e.g. replacing the operand qualifier;
5547 3) printing out the instruction by calling the printer functions
5548 shared with the disassembler.
5549
5550 The limitation of this method is that the exact input assembly
5551 line cannot be accurately reproduced in some cases, for example an
5552 optional operand present in the actual assembly line will be
5553 omitted in the output; likewise for the optional syntax rules,
5554 e.g. the # before the immediate. Another limitation is that the
5555 assembly symbols and relocation operations in the assembly line
5556 currently cannot be printed out in the error report. Last but not
5557 least, when there is other error(s) co-exist with this error, the
5558 'corrected' instruction may be still incorrect, e.g. given
5559 'ldnp h0,h1,[x0,#6]!'
5560 this diagnosis will provide the version:
5561 'ldnp s0,s1,[x0,#6]!'
5562 which is still not right. */
5563 size_t len = strlen (get_mnemonic_name (str));
5564 int i, qlf_idx;
5565 bool result;
5566 char buf[2048];
5567 aarch64_inst *inst_base = &inst.base;
5568 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5569
5570 /* Init inst. */
5571 reset_aarch64_instruction (&inst);
5572 inst_base->opcode = opcode;
5573
5574 /* Reset the error report so that there is no side effect on the
5575 following operand parsing. */
5576 init_operand_error_report ();
5577
5578 /* Fill inst. */
5579 result = parse_operands (str + len, opcode)
5580 && programmer_friendly_fixup (&inst);
5581 gas_assert (result);
5582 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5583 NULL, NULL, insn_sequence);
5584 gas_assert (!result);
5585
5586 /* Find the most matched qualifier sequence. */
5587 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5588 gas_assert (qlf_idx > -1);
5589
5590 /* Assign the qualifiers. */
5591 assign_qualifier_sequence (inst_base,
5592 opcode->qualifiers_list[qlf_idx]);
5593
5594 /* Print the hint. */
5595 output_info (_(" did you mean this?"));
5596 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5597 print_operands (buf, opcode, inst_base->operands);
5598 output_info (_(" %s"), buf);
5599
5600 /* Print out other variant(s) if there is any. */
5601 if (qlf_idx != 0 ||
5602 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5603 output_info (_(" other valid variant(s):"));
5604
5605 /* For each pattern. */
5606 qualifiers_list = opcode->qualifiers_list;
5607 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5608 {
5609 /* Most opcodes has much fewer patterns in the list.
5610 First NIL qualifier indicates the end in the list. */
5611 if (empty_qualifier_sequence_p (*qualifiers_list))
5612 break;
5613
5614 if (i != qlf_idx)
5615 {
5616 /* Mnemonics name. */
5617 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5618
5619 /* Assign the qualifiers. */
5620 assign_qualifier_sequence (inst_base, *qualifiers_list);
5621
5622 /* Print instruction. */
5623 print_operands (buf, opcode, inst_base->operands);
5624
5625 output_info (_(" %s"), buf);
5626 }
5627 }
5628 }
5629 break;
5630
5631 case AARCH64_OPDE_UNTIED_IMMS:
5632 handler (_("operand %d must have the same immediate value "
5633 "as operand 1 -- `%s'"),
5634 detail->index + 1, str);
5635 break;
5636
5637 case AARCH64_OPDE_UNTIED_OPERAND:
5638 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5639 detail->index + 1, str);
5640 break;
5641
5642 case AARCH64_OPDE_OUT_OF_RANGE:
5643 if (detail->data[0].i != detail->data[1].i)
5644 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5645 detail->error ? detail->error : _("immediate value"),
5646 detail->data[0].i, detail->data[1].i, idx + 1, str);
5647 else
5648 handler (_("%s must be %d at operand %d -- `%s'"),
5649 detail->error ? detail->error : _("immediate value"),
5650 detail->data[0].i, idx + 1, str);
5651 break;
5652
5653 case AARCH64_OPDE_REG_LIST:
5654 if (detail->data[0].i == 1)
5655 handler (_("invalid number of registers in the list; "
5656 "only 1 register is expected at operand %d -- `%s'"),
5657 idx + 1, str);
5658 else
5659 handler (_("invalid number of registers in the list; "
5660 "%d registers are expected at operand %d -- `%s'"),
5661 detail->data[0].i, idx + 1, str);
5662 break;
5663
5664 case AARCH64_OPDE_UNALIGNED:
5665 handler (_("immediate value must be a multiple of "
5666 "%d at operand %d -- `%s'"),
5667 detail->data[0].i, idx + 1, str);
5668 break;
5669
5670 default:
5671 gas_assert (0);
5672 break;
5673 }
5674 }
5675
5676 /* Process and output the error message about the operand mismatching.
5677
5678 When this function is called, the operand error information had
5679 been collected for an assembly line and there will be multiple
5680 errors in the case of multiple instruction templates; output the
5681 error message that most closely describes the problem.
5682
5683 The errors to be printed can be filtered on printing all errors
5684 or only non-fatal errors. This distinction has to be made because
5685 the error buffer may already be filled with fatal errors we don't want to
5686 print due to the different instruction templates. */
5687
5688 static void
5689 output_operand_error_report (char *str, bool non_fatal_only)
5690 {
5691 int largest_error_pos;
5692 const char *msg = NULL;
5693 enum aarch64_operand_error_kind kind;
5694 operand_error_record *curr;
5695 operand_error_record *head = operand_error_report.head;
5696 operand_error_record *record = NULL;
5697
5698 /* No error to report. */
5699 if (head == NULL)
5700 return;
5701
5702 gas_assert (head != NULL && operand_error_report.tail != NULL);
5703
5704 /* Only one error. */
5705 if (head == operand_error_report.tail)
5706 {
5707 /* If the only error is a non-fatal one and we don't want to print it,
5708 just exit. */
5709 if (!non_fatal_only || head->detail.non_fatal)
5710 {
5711 DEBUG_TRACE ("single opcode entry with error kind: %s",
5712 operand_mismatch_kind_names[head->detail.kind]);
5713 output_operand_error_record (head, str);
5714 }
5715 return;
5716 }
5717
5718 /* Find the error kind of the highest severity. */
5719 DEBUG_TRACE ("multiple opcode entries with error kind");
5720 kind = AARCH64_OPDE_NIL;
5721 for (curr = head; curr != NULL; curr = curr->next)
5722 {
5723 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5724 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5725 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5726 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5727 kind = curr->detail.kind;
5728 }
5729
5730 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5731
5732 /* Pick up one of errors of KIND to report. */
5733 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5734 for (curr = head; curr != NULL; curr = curr->next)
5735 {
5736 /* If we don't want to print non-fatal errors then don't consider them
5737 at all. */
5738 if (curr->detail.kind != kind
5739 || (non_fatal_only && !curr->detail.non_fatal))
5740 continue;
5741 /* If there are multiple errors, pick up the one with the highest
5742 mismatching operand index. In the case of multiple errors with
5743 the equally highest operand index, pick up the first one or the
5744 first one with non-NULL error message. */
5745 if (curr->detail.index > largest_error_pos
5746 || (curr->detail.index == largest_error_pos && msg == NULL
5747 && curr->detail.error != NULL))
5748 {
5749 largest_error_pos = curr->detail.index;
5750 record = curr;
5751 msg = record->detail.error;
5752 }
5753 }
5754
5755 /* The way errors are collected in the back-end is a bit non-intuitive. But
5756 essentially, because each operand template is tried recursively you may
5757 always have errors collected from the previous tried OPND. These are
5758 usually skipped if there is one successful match. However now with the
5759 non-fatal errors we have to ignore those previously collected hard errors
5760 when we're only interested in printing the non-fatal ones. This condition
5761 prevents us from printing errors that are not appropriate, since we did
5762 match a condition, but it also has warnings that it wants to print. */
5763 if (non_fatal_only && !record)
5764 return;
5765
5766 gas_assert (largest_error_pos != -2 && record != NULL);
5767 DEBUG_TRACE ("Pick up error kind %s to report",
5768 operand_mismatch_kind_names[record->detail.kind]);
5769
5770 /* Output. */
5771 output_operand_error_record (record, str);
5772 }
5773 \f
5774 /* Write an AARCH64 instruction to buf - always little-endian. */
5775 static void
5776 put_aarch64_insn (char *buf, uint32_t insn)
5777 {
5778 unsigned char *where = (unsigned char *) buf;
5779 where[0] = insn;
5780 where[1] = insn >> 8;
5781 where[2] = insn >> 16;
5782 where[3] = insn >> 24;
5783 }
5784
5785 static uint32_t
5786 get_aarch64_insn (char *buf)
5787 {
5788 unsigned char *where = (unsigned char *) buf;
5789 uint32_t result;
5790 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5791 | ((uint32_t) where[3] << 24)));
5792 return result;
5793 }
5794
5795 static void
5796 output_inst (struct aarch64_inst *new_inst)
5797 {
5798 char *to = NULL;
5799
5800 to = frag_more (INSN_SIZE);
5801
5802 frag_now->tc_frag_data.recorded = 1;
5803
5804 put_aarch64_insn (to, inst.base.value);
5805
5806 if (inst.reloc.type != BFD_RELOC_UNUSED)
5807 {
5808 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5809 INSN_SIZE, &inst.reloc.exp,
5810 inst.reloc.pc_rel,
5811 inst.reloc.type);
5812 DEBUG_TRACE ("Prepared relocation fix up");
5813 /* Don't check the addend value against the instruction size,
5814 that's the job of our code in md_apply_fix(). */
5815 fixp->fx_no_overflow = 1;
5816 if (new_inst != NULL)
5817 fixp->tc_fix_data.inst = new_inst;
5818 if (aarch64_gas_internal_fixup_p ())
5819 {
5820 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5821 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5822 fixp->fx_addnumber = inst.reloc.flags;
5823 }
5824 }
5825
5826 dwarf2_emit_insn (INSN_SIZE);
5827 }
5828
5829 /* Link together opcodes of the same name. */
5830
5831 struct templates
5832 {
5833 const aarch64_opcode *opcode;
5834 struct templates *next;
5835 };
5836
5837 typedef struct templates templates;
5838
5839 static templates *
5840 lookup_mnemonic (const char *start, int len)
5841 {
5842 templates *templ = NULL;
5843
5844 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5845 return templ;
5846 }
5847
5848 /* Subroutine of md_assemble, responsible for looking up the primary
5849 opcode from the mnemonic the user wrote. BASE points to the beginning
5850 of the mnemonic, DOT points to the first '.' within the mnemonic
5851 (if any) and END points to the end of the mnemonic. */
5852
5853 static templates *
5854 opcode_lookup (char *base, char *dot, char *end)
5855 {
5856 const aarch64_cond *cond;
5857 char condname[16];
5858 int len;
5859
5860 if (dot == end)
5861 return 0;
5862
5863 inst.cond = COND_ALWAYS;
5864
5865 /* Handle a possible condition. */
5866 if (dot)
5867 {
5868 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5869 if (!cond)
5870 return 0;
5871 inst.cond = cond->value;
5872 len = dot - base;
5873 }
5874 else
5875 len = end - base;
5876
5877 if (inst.cond == COND_ALWAYS)
5878 {
5879 /* Look for unaffixed mnemonic. */
5880 return lookup_mnemonic (base, len);
5881 }
5882 else if (len <= 13)
5883 {
5884 /* append ".c" to mnemonic if conditional */
5885 memcpy (condname, base, len);
5886 memcpy (condname + len, ".c", 2);
5887 base = condname;
5888 len += 2;
5889 return lookup_mnemonic (base, len);
5890 }
5891
5892 return NULL;
5893 }
5894
5895 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5896 to a corresponding operand qualifier. */
5897
5898 static inline aarch64_opnd_qualifier_t
5899 vectype_to_qualifier (const struct vector_type_el *vectype)
5900 {
5901 /* Element size in bytes indexed by vector_el_type. */
5902 const unsigned char ele_size[5]
5903 = {1, 2, 4, 8, 16};
5904 const unsigned int ele_base [5] =
5905 {
5906 AARCH64_OPND_QLF_V_4B,
5907 AARCH64_OPND_QLF_V_2H,
5908 AARCH64_OPND_QLF_V_2S,
5909 AARCH64_OPND_QLF_V_1D,
5910 AARCH64_OPND_QLF_V_1Q
5911 };
5912
5913 if (!vectype->defined || vectype->type == NT_invtype)
5914 goto vectype_conversion_fail;
5915
5916 if (vectype->type == NT_zero)
5917 return AARCH64_OPND_QLF_P_Z;
5918 if (vectype->type == NT_merge)
5919 return AARCH64_OPND_QLF_P_M;
5920
5921 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5922
5923 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5924 {
5925 /* Special case S_4B. */
5926 if (vectype->type == NT_b && vectype->width == 4)
5927 return AARCH64_OPND_QLF_S_4B;
5928
5929 /* Special case S_2H. */
5930 if (vectype->type == NT_h && vectype->width == 2)
5931 return AARCH64_OPND_QLF_S_2H;
5932
5933 /* Vector element register. */
5934 return AARCH64_OPND_QLF_S_B + vectype->type;
5935 }
5936 else
5937 {
5938 /* Vector register. */
5939 int reg_size = ele_size[vectype->type] * vectype->width;
5940 unsigned offset;
5941 unsigned shift;
5942 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5943 goto vectype_conversion_fail;
5944
5945 /* The conversion is by calculating the offset from the base operand
5946 qualifier for the vector type. The operand qualifiers are regular
5947 enough that the offset can established by shifting the vector width by
5948 a vector-type dependent amount. */
5949 shift = 0;
5950 if (vectype->type == NT_b)
5951 shift = 3;
5952 else if (vectype->type == NT_h || vectype->type == NT_s)
5953 shift = 2;
5954 else if (vectype->type >= NT_d)
5955 shift = 1;
5956 else
5957 gas_assert (0);
5958
5959 offset = ele_base [vectype->type] + (vectype->width >> shift);
5960 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5961 && offset <= AARCH64_OPND_QLF_V_1Q);
5962 return offset;
5963 }
5964
5965 vectype_conversion_fail:
5966 first_error (_("bad vector arrangement type"));
5967 return AARCH64_OPND_QLF_NIL;
5968 }
5969
5970 /* Process an optional operand that is found omitted from the assembly line.
5971 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5972 instruction's opcode entry while IDX is the index of this omitted operand.
5973 */
5974
5975 static void
5976 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5977 int idx, aarch64_opnd_info *operand)
5978 {
5979 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5980 gas_assert (optional_operand_p (opcode, idx));
5981 gas_assert (!operand->present);
5982
5983 switch (type)
5984 {
5985 case AARCH64_OPND_Rd:
5986 case AARCH64_OPND_Rn:
5987 case AARCH64_OPND_Rm:
5988 case AARCH64_OPND_Rt:
5989 case AARCH64_OPND_Rt2:
5990 case AARCH64_OPND_Rt_LS64:
5991 case AARCH64_OPND_Rt_SP:
5992 case AARCH64_OPND_Rs:
5993 case AARCH64_OPND_Ra:
5994 case AARCH64_OPND_Rt_SYS:
5995 case AARCH64_OPND_Rd_SP:
5996 case AARCH64_OPND_Rn_SP:
5997 case AARCH64_OPND_Rm_SP:
5998 case AARCH64_OPND_Fd:
5999 case AARCH64_OPND_Fn:
6000 case AARCH64_OPND_Fm:
6001 case AARCH64_OPND_Fa:
6002 case AARCH64_OPND_Ft:
6003 case AARCH64_OPND_Ft2:
6004 case AARCH64_OPND_Sd:
6005 case AARCH64_OPND_Sn:
6006 case AARCH64_OPND_Sm:
6007 case AARCH64_OPND_Va:
6008 case AARCH64_OPND_Vd:
6009 case AARCH64_OPND_Vn:
6010 case AARCH64_OPND_Vm:
6011 case AARCH64_OPND_VdD1:
6012 case AARCH64_OPND_VnD1:
6013 operand->reg.regno = default_value;
6014 break;
6015
6016 case AARCH64_OPND_Ed:
6017 case AARCH64_OPND_En:
6018 case AARCH64_OPND_Em:
6019 case AARCH64_OPND_Em16:
6020 case AARCH64_OPND_SM3_IMM2:
6021 operand->reglane.regno = default_value;
6022 break;
6023
6024 case AARCH64_OPND_IDX:
6025 case AARCH64_OPND_BIT_NUM:
6026 case AARCH64_OPND_IMMR:
6027 case AARCH64_OPND_IMMS:
6028 case AARCH64_OPND_SHLL_IMM:
6029 case AARCH64_OPND_IMM_VLSL:
6030 case AARCH64_OPND_IMM_VLSR:
6031 case AARCH64_OPND_CCMP_IMM:
6032 case AARCH64_OPND_FBITS:
6033 case AARCH64_OPND_UIMM4:
6034 case AARCH64_OPND_UIMM3_OP1:
6035 case AARCH64_OPND_UIMM3_OP2:
6036 case AARCH64_OPND_IMM:
6037 case AARCH64_OPND_IMM_2:
6038 case AARCH64_OPND_WIDTH:
6039 case AARCH64_OPND_UIMM7:
6040 case AARCH64_OPND_NZCV:
6041 case AARCH64_OPND_SVE_PATTERN:
6042 case AARCH64_OPND_SVE_PRFOP:
6043 operand->imm.value = default_value;
6044 break;
6045
6046 case AARCH64_OPND_SVE_PATTERN_SCALED:
6047 operand->imm.value = default_value;
6048 operand->shifter.kind = AARCH64_MOD_MUL;
6049 operand->shifter.amount = 1;
6050 break;
6051
6052 case AARCH64_OPND_EXCEPTION:
6053 inst.reloc.type = BFD_RELOC_UNUSED;
6054 break;
6055
6056 case AARCH64_OPND_BARRIER_ISB:
6057 operand->barrier = aarch64_barrier_options + default_value;
6058 break;
6059
6060 case AARCH64_OPND_BTI_TARGET:
6061 operand->hint_option = aarch64_hint_options + default_value;
6062 break;
6063
6064 default:
6065 break;
6066 }
6067 }
6068
6069 /* Process the relocation type for move wide instructions.
6070 Return TRUE on success; otherwise return FALSE. */
6071
6072 static bool
6073 process_movw_reloc_info (void)
6074 {
6075 int is32;
6076 unsigned shift;
6077
6078 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6079
6080 if (inst.base.opcode->op == OP_MOVK)
6081 switch (inst.reloc.type)
6082 {
6083 case BFD_RELOC_AARCH64_MOVW_G0_S:
6084 case BFD_RELOC_AARCH64_MOVW_G1_S:
6085 case BFD_RELOC_AARCH64_MOVW_G2_S:
6086 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6087 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6088 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6089 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6090 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6091 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6092 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6093 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6094 set_syntax_error
6095 (_("the specified relocation type is not allowed for MOVK"));
6096 return false;
6097 default:
6098 break;
6099 }
6100
6101 switch (inst.reloc.type)
6102 {
6103 case BFD_RELOC_AARCH64_MOVW_G0:
6104 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6105 case BFD_RELOC_AARCH64_MOVW_G0_S:
6106 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6107 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6108 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6109 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6110 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6111 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6112 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6113 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6114 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6115 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6116 shift = 0;
6117 break;
6118 case BFD_RELOC_AARCH64_MOVW_G1:
6119 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6120 case BFD_RELOC_AARCH64_MOVW_G1_S:
6121 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6122 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6123 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6124 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6125 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6126 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6127 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6128 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6129 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6130 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6131 shift = 16;
6132 break;
6133 case BFD_RELOC_AARCH64_MOVW_G2:
6134 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6135 case BFD_RELOC_AARCH64_MOVW_G2_S:
6136 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6137 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6138 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6139 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6140 if (is32)
6141 {
6142 set_fatal_syntax_error
6143 (_("the specified relocation type is not allowed for 32-bit "
6144 "register"));
6145 return false;
6146 }
6147 shift = 32;
6148 break;
6149 case BFD_RELOC_AARCH64_MOVW_G3:
6150 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6151 if (is32)
6152 {
6153 set_fatal_syntax_error
6154 (_("the specified relocation type is not allowed for 32-bit "
6155 "register"));
6156 return false;
6157 }
6158 shift = 48;
6159 break;
6160 default:
6161 /* More cases should be added when more MOVW-related relocation types
6162 are supported in GAS. */
6163 gas_assert (aarch64_gas_internal_fixup_p ());
6164 /* The shift amount should have already been set by the parser. */
6165 return true;
6166 }
6167 inst.base.operands[1].shifter.amount = shift;
6168 return true;
6169 }
6170
6171 /* A primitive log calculator. */
6172
6173 static inline unsigned int
6174 get_logsz (unsigned int size)
6175 {
6176 const unsigned char ls[16] =
6177 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6178 if (size > 16)
6179 {
6180 gas_assert (0);
6181 return -1;
6182 }
6183 gas_assert (ls[size - 1] != (unsigned char)-1);
6184 return ls[size - 1];
6185 }
6186
6187 /* Determine and return the real reloc type code for an instruction
6188 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6189
6190 static inline bfd_reloc_code_real_type
6191 ldst_lo12_determine_real_reloc_type (void)
6192 {
6193 unsigned logsz, max_logsz;
6194 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6195 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6196
6197 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6198 {
6199 BFD_RELOC_AARCH64_LDST8_LO12,
6200 BFD_RELOC_AARCH64_LDST16_LO12,
6201 BFD_RELOC_AARCH64_LDST32_LO12,
6202 BFD_RELOC_AARCH64_LDST64_LO12,
6203 BFD_RELOC_AARCH64_LDST128_LO12
6204 },
6205 {
6206 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6207 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6208 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6209 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6210 BFD_RELOC_AARCH64_NONE
6211 },
6212 {
6213 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6214 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6215 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6216 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6217 BFD_RELOC_AARCH64_NONE
6218 },
6219 {
6220 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6221 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6222 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6223 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6224 BFD_RELOC_AARCH64_NONE
6225 },
6226 {
6227 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6228 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6229 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6230 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6231 BFD_RELOC_AARCH64_NONE
6232 }
6233 };
6234
6235 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6236 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6237 || (inst.reloc.type
6238 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6239 || (inst.reloc.type
6240 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6241 || (inst.reloc.type
6242 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6243 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6244
6245 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6246 opd1_qlf =
6247 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6248 1, opd0_qlf, 0);
6249 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6250
6251 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6252
6253 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6254 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6255 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6256 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6257 max_logsz = 3;
6258 else
6259 max_logsz = 4;
6260
6261 if (logsz > max_logsz)
6262 {
6263 /* SEE PR 27904 for an example of this. */
6264 set_fatal_syntax_error
6265 (_("relocation qualifier does not match instruction size"));
6266 return BFD_RELOC_AARCH64_NONE;
6267 }
6268
6269 /* In reloc.c, these pseudo relocation types should be defined in similar
6270 order as above reloc_ldst_lo12 array. Because the array index calculation
6271 below relies on this. */
6272 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6273 }
6274
6275 /* Check whether a register list REGINFO is valid. The registers must be
6276 numbered in increasing order (modulo 32), in increments of one or two.
6277
6278 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6279 increments of two.
6280
6281 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6282
6283 static bool
6284 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6285 {
6286 uint32_t i, nb_regs, prev_regno, incr;
6287
6288 nb_regs = 1 + (reginfo & 0x3);
6289 reginfo >>= 2;
6290 prev_regno = reginfo & 0x1f;
6291 incr = accept_alternate ? 2 : 1;
6292
6293 for (i = 1; i < nb_regs; ++i)
6294 {
6295 uint32_t curr_regno;
6296 reginfo >>= 5;
6297 curr_regno = reginfo & 0x1f;
6298 if (curr_regno != ((prev_regno + incr) & 0x1f))
6299 return false;
6300 prev_regno = curr_regno;
6301 }
6302
6303 return true;
6304 }
6305
6306 /* Generic instruction operand parser. This does no encoding and no
6307 semantic validation; it merely squirrels values away in the inst
6308 structure. Returns TRUE or FALSE depending on whether the
6309 specified grammar matched. */
6310
6311 static bool
6312 parse_operands (char *str, const aarch64_opcode *opcode)
6313 {
6314 int i;
6315 char *backtrack_pos = 0;
6316 const enum aarch64_opnd *operands = opcode->operands;
6317 aarch64_reg_type imm_reg_type;
6318
6319 clear_error ();
6320 skip_whitespace (str);
6321
6322 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6323 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6324 else
6325 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6326
6327 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6328 {
6329 int64_t val;
6330 const reg_entry *reg;
6331 int comma_skipped_p = 0;
6332 aarch64_reg_type rtype;
6333 struct vector_type_el vectype;
6334 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6335 aarch64_opnd_info *info = &inst.base.operands[i];
6336 aarch64_reg_type reg_type;
6337
6338 DEBUG_TRACE ("parse operand %d", i);
6339
6340 /* Assign the operand code. */
6341 info->type = operands[i];
6342
6343 if (optional_operand_p (opcode, i))
6344 {
6345 /* Remember where we are in case we need to backtrack. */
6346 gas_assert (!backtrack_pos);
6347 backtrack_pos = str;
6348 }
6349
6350 /* Expect comma between operands; the backtrack mechanism will take
6351 care of cases of omitted optional operand. */
6352 if (i > 0 && ! skip_past_char (&str, ','))
6353 {
6354 set_syntax_error (_("comma expected between operands"));
6355 goto failure;
6356 }
6357 else
6358 comma_skipped_p = 1;
6359
6360 switch (operands[i])
6361 {
6362 case AARCH64_OPND_Rd:
6363 case AARCH64_OPND_Rn:
6364 case AARCH64_OPND_Rm:
6365 case AARCH64_OPND_Rt:
6366 case AARCH64_OPND_Rt2:
6367 case AARCH64_OPND_Rs:
6368 case AARCH64_OPND_Ra:
6369 case AARCH64_OPND_Rt_LS64:
6370 case AARCH64_OPND_Rt_SYS:
6371 case AARCH64_OPND_PAIRREG:
6372 case AARCH64_OPND_SVE_Rm:
6373 po_int_reg_or_fail (REG_TYPE_R_Z);
6374
6375 /* In LS64 load/store instructions Rt register number must be even
6376 and <=22. */
6377 if (operands[i] == AARCH64_OPND_Rt_LS64)
6378 {
6379 /* We've already checked if this is valid register.
6380 This will check if register number (Rt) is not undefined for LS64
6381 instructions:
6382 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6383 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6384 {
6385 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6386 goto failure;
6387 }
6388 }
6389 break;
6390
6391 case AARCH64_OPND_Rd_SP:
6392 case AARCH64_OPND_Rn_SP:
6393 case AARCH64_OPND_Rt_SP:
6394 case AARCH64_OPND_SVE_Rn_SP:
6395 case AARCH64_OPND_Rm_SP:
6396 po_int_reg_or_fail (REG_TYPE_R_SP);
6397 break;
6398
6399 case AARCH64_OPND_Rm_EXT:
6400 case AARCH64_OPND_Rm_SFT:
6401 po_misc_or_fail (parse_shifter_operand
6402 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6403 ? SHIFTED_ARITH_IMM
6404 : SHIFTED_LOGIC_IMM)));
6405 if (!info->shifter.operator_present)
6406 {
6407 /* Default to LSL if not present. Libopcodes prefers shifter
6408 kind to be explicit. */
6409 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6410 info->shifter.kind = AARCH64_MOD_LSL;
6411 /* For Rm_EXT, libopcodes will carry out further check on whether
6412 or not stack pointer is used in the instruction (Recall that
6413 "the extend operator is not optional unless at least one of
6414 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6415 }
6416 break;
6417
6418 case AARCH64_OPND_Fd:
6419 case AARCH64_OPND_Fn:
6420 case AARCH64_OPND_Fm:
6421 case AARCH64_OPND_Fa:
6422 case AARCH64_OPND_Ft:
6423 case AARCH64_OPND_Ft2:
6424 case AARCH64_OPND_Sd:
6425 case AARCH64_OPND_Sn:
6426 case AARCH64_OPND_Sm:
6427 case AARCH64_OPND_SVE_VZn:
6428 case AARCH64_OPND_SVE_Vd:
6429 case AARCH64_OPND_SVE_Vm:
6430 case AARCH64_OPND_SVE_Vn:
6431 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6432 if (val == PARSE_FAIL)
6433 {
6434 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6435 goto failure;
6436 }
6437 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6438
6439 info->reg.regno = val;
6440 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6441 break;
6442
6443 case AARCH64_OPND_SVE_Pd:
6444 case AARCH64_OPND_SVE_Pg3:
6445 case AARCH64_OPND_SVE_Pg4_5:
6446 case AARCH64_OPND_SVE_Pg4_10:
6447 case AARCH64_OPND_SVE_Pg4_16:
6448 case AARCH64_OPND_SVE_Pm:
6449 case AARCH64_OPND_SVE_Pn:
6450 case AARCH64_OPND_SVE_Pt:
6451 case AARCH64_OPND_SME_Pm:
6452 reg_type = REG_TYPE_PN;
6453 goto vector_reg;
6454
6455 case AARCH64_OPND_SVE_Za_5:
6456 case AARCH64_OPND_SVE_Za_16:
6457 case AARCH64_OPND_SVE_Zd:
6458 case AARCH64_OPND_SVE_Zm_5:
6459 case AARCH64_OPND_SVE_Zm_16:
6460 case AARCH64_OPND_SVE_Zn:
6461 case AARCH64_OPND_SVE_Zt:
6462 reg_type = REG_TYPE_ZN;
6463 goto vector_reg;
6464
6465 case AARCH64_OPND_Va:
6466 case AARCH64_OPND_Vd:
6467 case AARCH64_OPND_Vn:
6468 case AARCH64_OPND_Vm:
6469 reg_type = REG_TYPE_VN;
6470 vector_reg:
6471 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6472 if (val == PARSE_FAIL)
6473 {
6474 first_error (_(get_reg_expected_msg (reg_type)));
6475 goto failure;
6476 }
6477 if (vectype.defined & NTA_HASINDEX)
6478 goto failure;
6479
6480 info->reg.regno = val;
6481 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6482 && vectype.type == NT_invtype)
6483 /* Unqualified Pn and Zn registers are allowed in certain
6484 contexts. Rely on F_STRICT qualifier checking to catch
6485 invalid uses. */
6486 info->qualifier = AARCH64_OPND_QLF_NIL;
6487 else
6488 {
6489 info->qualifier = vectype_to_qualifier (&vectype);
6490 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6491 goto failure;
6492 }
6493 break;
6494
6495 case AARCH64_OPND_VdD1:
6496 case AARCH64_OPND_VnD1:
6497 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6498 if (val == PARSE_FAIL)
6499 {
6500 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6501 goto failure;
6502 }
6503 if (vectype.type != NT_d || vectype.index != 1)
6504 {
6505 set_fatal_syntax_error
6506 (_("the top half of a 128-bit FP/SIMD register is expected"));
6507 goto failure;
6508 }
6509 info->reg.regno = val;
6510 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6511 here; it is correct for the purpose of encoding/decoding since
6512 only the register number is explicitly encoded in the related
6513 instructions, although this appears a bit hacky. */
6514 info->qualifier = AARCH64_OPND_QLF_S_D;
6515 break;
6516
6517 case AARCH64_OPND_SVE_Zm3_INDEX:
6518 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6519 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6520 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6521 case AARCH64_OPND_SVE_Zm4_INDEX:
6522 case AARCH64_OPND_SVE_Zn_INDEX:
6523 reg_type = REG_TYPE_ZN;
6524 goto vector_reg_index;
6525
6526 case AARCH64_OPND_Ed:
6527 case AARCH64_OPND_En:
6528 case AARCH64_OPND_Em:
6529 case AARCH64_OPND_Em16:
6530 case AARCH64_OPND_SM3_IMM2:
6531 reg_type = REG_TYPE_VN;
6532 vector_reg_index:
6533 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6534 if (val == PARSE_FAIL)
6535 {
6536 first_error (_(get_reg_expected_msg (reg_type)));
6537 goto failure;
6538 }
6539 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6540 goto failure;
6541
6542 info->reglane.regno = val;
6543 info->reglane.index = vectype.index;
6544 info->qualifier = vectype_to_qualifier (&vectype);
6545 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6546 goto failure;
6547 break;
6548
6549 case AARCH64_OPND_SVE_ZnxN:
6550 case AARCH64_OPND_SVE_ZtxN:
6551 reg_type = REG_TYPE_ZN;
6552 goto vector_reg_list;
6553
6554 case AARCH64_OPND_LVn:
6555 case AARCH64_OPND_LVt:
6556 case AARCH64_OPND_LVt_AL:
6557 case AARCH64_OPND_LEt:
6558 reg_type = REG_TYPE_VN;
6559 vector_reg_list:
6560 if (reg_type == REG_TYPE_ZN
6561 && get_opcode_dependent_value (opcode) == 1
6562 && *str != '{')
6563 {
6564 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6565 if (val == PARSE_FAIL)
6566 {
6567 first_error (_(get_reg_expected_msg (reg_type)));
6568 goto failure;
6569 }
6570 info->reglist.first_regno = val;
6571 info->reglist.num_regs = 1;
6572 }
6573 else
6574 {
6575 val = parse_vector_reg_list (&str, reg_type, &vectype);
6576 if (val == PARSE_FAIL)
6577 goto failure;
6578
6579 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6580 {
6581 set_fatal_syntax_error (_("invalid register list"));
6582 goto failure;
6583 }
6584
6585 if (vectype.width != 0 && *str != ',')
6586 {
6587 set_fatal_syntax_error
6588 (_("expected element type rather than vector type"));
6589 goto failure;
6590 }
6591
6592 info->reglist.first_regno = (val >> 2) & 0x1f;
6593 info->reglist.num_regs = (val & 0x3) + 1;
6594 }
6595 if (operands[i] == AARCH64_OPND_LEt)
6596 {
6597 if (!(vectype.defined & NTA_HASINDEX))
6598 goto failure;
6599 info->reglist.has_index = 1;
6600 info->reglist.index = vectype.index;
6601 }
6602 else
6603 {
6604 if (vectype.defined & NTA_HASINDEX)
6605 goto failure;
6606 if (!(vectype.defined & NTA_HASTYPE))
6607 {
6608 if (reg_type == REG_TYPE_ZN)
6609 set_fatal_syntax_error (_("missing type suffix"));
6610 goto failure;
6611 }
6612 }
6613 info->qualifier = vectype_to_qualifier (&vectype);
6614 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6615 goto failure;
6616 break;
6617
6618 case AARCH64_OPND_CRn:
6619 case AARCH64_OPND_CRm:
6620 {
6621 char prefix = *(str++);
6622 if (prefix != 'c' && prefix != 'C')
6623 goto failure;
6624
6625 po_imm_nc_or_fail ();
6626 if (val > 15)
6627 {
6628 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6629 goto failure;
6630 }
6631 info->qualifier = AARCH64_OPND_QLF_CR;
6632 info->imm.value = val;
6633 break;
6634 }
6635
6636 case AARCH64_OPND_SHLL_IMM:
6637 case AARCH64_OPND_IMM_VLSR:
6638 po_imm_or_fail (1, 64);
6639 info->imm.value = val;
6640 break;
6641
6642 case AARCH64_OPND_CCMP_IMM:
6643 case AARCH64_OPND_SIMM5:
6644 case AARCH64_OPND_FBITS:
6645 case AARCH64_OPND_TME_UIMM16:
6646 case AARCH64_OPND_UIMM4:
6647 case AARCH64_OPND_UIMM4_ADDG:
6648 case AARCH64_OPND_UIMM10:
6649 case AARCH64_OPND_UIMM3_OP1:
6650 case AARCH64_OPND_UIMM3_OP2:
6651 case AARCH64_OPND_IMM_VLSL:
6652 case AARCH64_OPND_IMM:
6653 case AARCH64_OPND_IMM_2:
6654 case AARCH64_OPND_WIDTH:
6655 case AARCH64_OPND_SVE_INV_LIMM:
6656 case AARCH64_OPND_SVE_LIMM:
6657 case AARCH64_OPND_SVE_LIMM_MOV:
6658 case AARCH64_OPND_SVE_SHLIMM_PRED:
6659 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6660 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6661 case AARCH64_OPND_SVE_SHRIMM_PRED:
6662 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6663 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6664 case AARCH64_OPND_SVE_SIMM5:
6665 case AARCH64_OPND_SVE_SIMM5B:
6666 case AARCH64_OPND_SVE_SIMM6:
6667 case AARCH64_OPND_SVE_SIMM8:
6668 case AARCH64_OPND_SVE_UIMM3:
6669 case AARCH64_OPND_SVE_UIMM7:
6670 case AARCH64_OPND_SVE_UIMM8:
6671 case AARCH64_OPND_SVE_UIMM8_53:
6672 case AARCH64_OPND_IMM_ROT1:
6673 case AARCH64_OPND_IMM_ROT2:
6674 case AARCH64_OPND_IMM_ROT3:
6675 case AARCH64_OPND_SVE_IMM_ROT1:
6676 case AARCH64_OPND_SVE_IMM_ROT2:
6677 case AARCH64_OPND_SVE_IMM_ROT3:
6678 case AARCH64_OPND_CSSC_SIMM8:
6679 case AARCH64_OPND_CSSC_UIMM8:
6680 po_imm_nc_or_fail ();
6681 info->imm.value = val;
6682 break;
6683
6684 case AARCH64_OPND_SVE_AIMM:
6685 case AARCH64_OPND_SVE_ASIMM:
6686 po_imm_nc_or_fail ();
6687 info->imm.value = val;
6688 skip_whitespace (str);
6689 if (skip_past_comma (&str))
6690 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6691 else
6692 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6693 break;
6694
6695 case AARCH64_OPND_SVE_PATTERN:
6696 po_enum_or_fail (aarch64_sve_pattern_array);
6697 info->imm.value = val;
6698 break;
6699
6700 case AARCH64_OPND_SVE_PATTERN_SCALED:
6701 po_enum_or_fail (aarch64_sve_pattern_array);
6702 info->imm.value = val;
6703 if (skip_past_comma (&str)
6704 && !parse_shift (&str, info, SHIFTED_MUL))
6705 goto failure;
6706 if (!info->shifter.operator_present)
6707 {
6708 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6709 info->shifter.kind = AARCH64_MOD_MUL;
6710 info->shifter.amount = 1;
6711 }
6712 break;
6713
6714 case AARCH64_OPND_SVE_PRFOP:
6715 po_enum_or_fail (aarch64_sve_prfop_array);
6716 info->imm.value = val;
6717 break;
6718
6719 case AARCH64_OPND_UIMM7:
6720 po_imm_or_fail (0, 127);
6721 info->imm.value = val;
6722 break;
6723
6724 case AARCH64_OPND_IDX:
6725 case AARCH64_OPND_MASK:
6726 case AARCH64_OPND_BIT_NUM:
6727 case AARCH64_OPND_IMMR:
6728 case AARCH64_OPND_IMMS:
6729 po_imm_or_fail (0, 63);
6730 info->imm.value = val;
6731 break;
6732
6733 case AARCH64_OPND_IMM0:
6734 po_imm_nc_or_fail ();
6735 if (val != 0)
6736 {
6737 set_fatal_syntax_error (_("immediate zero expected"));
6738 goto failure;
6739 }
6740 info->imm.value = 0;
6741 break;
6742
6743 case AARCH64_OPND_FPIMM0:
6744 {
6745 int qfloat;
6746 bool res1 = false, res2 = false;
6747 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6748 it is probably not worth the effort to support it. */
6749 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6750 imm_reg_type))
6751 && (error_p ()
6752 || !(res2 = parse_constant_immediate (&str, &val,
6753 imm_reg_type))))
6754 goto failure;
6755 if ((res1 && qfloat == 0) || (res2 && val == 0))
6756 {
6757 info->imm.value = 0;
6758 info->imm.is_fp = 1;
6759 break;
6760 }
6761 set_fatal_syntax_error (_("immediate zero expected"));
6762 goto failure;
6763 }
6764
6765 case AARCH64_OPND_IMM_MOV:
6766 {
6767 char *saved = str;
6768 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6769 reg_name_p (str, REG_TYPE_VN))
6770 goto failure;
6771 str = saved;
6772 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6773 GE_OPT_PREFIX, REJECT_ABSENT));
6774 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6775 later. fix_mov_imm_insn will try to determine a machine
6776 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6777 message if the immediate cannot be moved by a single
6778 instruction. */
6779 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6780 inst.base.operands[i].skip = 1;
6781 }
6782 break;
6783
6784 case AARCH64_OPND_SIMD_IMM:
6785 case AARCH64_OPND_SIMD_IMM_SFT:
6786 if (! parse_big_immediate (&str, &val, imm_reg_type))
6787 goto failure;
6788 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6789 /* addr_off_p */ 0,
6790 /* need_libopcodes_p */ 1,
6791 /* skip_p */ 1);
6792 /* Parse shift.
6793 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6794 shift, we don't check it here; we leave the checking to
6795 the libopcodes (operand_general_constraint_met_p). By
6796 doing this, we achieve better diagnostics. */
6797 if (skip_past_comma (&str)
6798 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6799 goto failure;
6800 if (!info->shifter.operator_present
6801 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6802 {
6803 /* Default to LSL if not present. Libopcodes prefers shifter
6804 kind to be explicit. */
6805 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6806 info->shifter.kind = AARCH64_MOD_LSL;
6807 }
6808 break;
6809
6810 case AARCH64_OPND_FPIMM:
6811 case AARCH64_OPND_SIMD_FPIMM:
6812 case AARCH64_OPND_SVE_FPIMM8:
6813 {
6814 int qfloat;
6815 bool dp_p;
6816
6817 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6818 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6819 || !aarch64_imm_float_p (qfloat))
6820 {
6821 if (!error_p ())
6822 set_fatal_syntax_error (_("invalid floating-point"
6823 " constant"));
6824 goto failure;
6825 }
6826 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6827 inst.base.operands[i].imm.is_fp = 1;
6828 }
6829 break;
6830
6831 case AARCH64_OPND_SVE_I1_HALF_ONE:
6832 case AARCH64_OPND_SVE_I1_HALF_TWO:
6833 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6834 {
6835 int qfloat;
6836 bool dp_p;
6837
6838 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6839 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6840 {
6841 if (!error_p ())
6842 set_fatal_syntax_error (_("invalid floating-point"
6843 " constant"));
6844 goto failure;
6845 }
6846 inst.base.operands[i].imm.value = qfloat;
6847 inst.base.operands[i].imm.is_fp = 1;
6848 }
6849 break;
6850
6851 case AARCH64_OPND_LIMM:
6852 po_misc_or_fail (parse_shifter_operand (&str, info,
6853 SHIFTED_LOGIC_IMM));
6854 if (info->shifter.operator_present)
6855 {
6856 set_fatal_syntax_error
6857 (_("shift not allowed for bitmask immediate"));
6858 goto failure;
6859 }
6860 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6861 /* addr_off_p */ 0,
6862 /* need_libopcodes_p */ 1,
6863 /* skip_p */ 1);
6864 break;
6865
6866 case AARCH64_OPND_AIMM:
6867 if (opcode->op == OP_ADD)
6868 /* ADD may have relocation types. */
6869 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6870 SHIFTED_ARITH_IMM));
6871 else
6872 po_misc_or_fail (parse_shifter_operand (&str, info,
6873 SHIFTED_ARITH_IMM));
6874 switch (inst.reloc.type)
6875 {
6876 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6877 info->shifter.amount = 12;
6878 break;
6879 case BFD_RELOC_UNUSED:
6880 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6881 if (info->shifter.kind != AARCH64_MOD_NONE)
6882 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6883 inst.reloc.pc_rel = 0;
6884 break;
6885 default:
6886 break;
6887 }
6888 info->imm.value = 0;
6889 if (!info->shifter.operator_present)
6890 {
6891 /* Default to LSL if not present. Libopcodes prefers shifter
6892 kind to be explicit. */
6893 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6894 info->shifter.kind = AARCH64_MOD_LSL;
6895 }
6896 break;
6897
6898 case AARCH64_OPND_HALF:
6899 {
6900 /* #<imm16> or relocation. */
6901 int internal_fixup_p;
6902 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6903 if (internal_fixup_p)
6904 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6905 skip_whitespace (str);
6906 if (skip_past_comma (&str))
6907 {
6908 /* {, LSL #<shift>} */
6909 if (! aarch64_gas_internal_fixup_p ())
6910 {
6911 set_fatal_syntax_error (_("can't mix relocation modifier "
6912 "with explicit shift"));
6913 goto failure;
6914 }
6915 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6916 }
6917 else
6918 inst.base.operands[i].shifter.amount = 0;
6919 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6920 inst.base.operands[i].imm.value = 0;
6921 if (! process_movw_reloc_info ())
6922 goto failure;
6923 }
6924 break;
6925
6926 case AARCH64_OPND_EXCEPTION:
6927 case AARCH64_OPND_UNDEFINED:
6928 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6929 imm_reg_type));
6930 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6931 /* addr_off_p */ 0,
6932 /* need_libopcodes_p */ 0,
6933 /* skip_p */ 1);
6934 break;
6935
6936 case AARCH64_OPND_NZCV:
6937 {
6938 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6939 if (nzcv != NULL)
6940 {
6941 str += 4;
6942 info->imm.value = nzcv->value;
6943 break;
6944 }
6945 po_imm_or_fail (0, 15);
6946 info->imm.value = val;
6947 }
6948 break;
6949
6950 case AARCH64_OPND_COND:
6951 case AARCH64_OPND_COND1:
6952 {
6953 char *start = str;
6954 do
6955 str++;
6956 while (ISALPHA (*str));
6957 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6958 if (info->cond == NULL)
6959 {
6960 set_syntax_error (_("invalid condition"));
6961 goto failure;
6962 }
6963 else if (operands[i] == AARCH64_OPND_COND1
6964 && (info->cond->value & 0xe) == 0xe)
6965 {
6966 /* Do not allow AL or NV. */
6967 set_default_error ();
6968 goto failure;
6969 }
6970 }
6971 break;
6972
6973 case AARCH64_OPND_ADDR_ADRP:
6974 po_misc_or_fail (parse_adrp (&str));
6975 /* Clear the value as operand needs to be relocated. */
6976 info->imm.value = 0;
6977 break;
6978
6979 case AARCH64_OPND_ADDR_PCREL14:
6980 case AARCH64_OPND_ADDR_PCREL19:
6981 case AARCH64_OPND_ADDR_PCREL21:
6982 case AARCH64_OPND_ADDR_PCREL26:
6983 po_misc_or_fail (parse_address (&str, info));
6984 if (!info->addr.pcrel)
6985 {
6986 set_syntax_error (_("invalid pc-relative address"));
6987 goto failure;
6988 }
6989 if (inst.gen_lit_pool
6990 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6991 {
6992 /* Only permit "=value" in the literal load instructions.
6993 The literal will be generated by programmer_friendly_fixup. */
6994 set_syntax_error (_("invalid use of \"=immediate\""));
6995 goto failure;
6996 }
6997 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6998 {
6999 set_syntax_error (_("unrecognized relocation suffix"));
7000 goto failure;
7001 }
7002 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7003 {
7004 info->imm.value = inst.reloc.exp.X_add_number;
7005 inst.reloc.type = BFD_RELOC_UNUSED;
7006 }
7007 else
7008 {
7009 info->imm.value = 0;
7010 if (inst.reloc.type == BFD_RELOC_UNUSED)
7011 switch (opcode->iclass)
7012 {
7013 case compbranch:
7014 case condbranch:
7015 /* e.g. CBZ or B.COND */
7016 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7017 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7018 break;
7019 case testbranch:
7020 /* e.g. TBZ */
7021 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7022 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7023 break;
7024 case branch_imm:
7025 /* e.g. B or BL */
7026 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7027 inst.reloc.type =
7028 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7029 : BFD_RELOC_AARCH64_JUMP26;
7030 break;
7031 case loadlit:
7032 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7033 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7034 break;
7035 case pcreladdr:
7036 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7037 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7038 break;
7039 default:
7040 gas_assert (0);
7041 abort ();
7042 }
7043 inst.reloc.pc_rel = 1;
7044 }
7045 break;
7046
7047 case AARCH64_OPND_ADDR_SIMPLE:
7048 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7049 {
7050 /* [<Xn|SP>{, #<simm>}] */
7051 char *start = str;
7052 /* First use the normal address-parsing routines, to get
7053 the usual syntax errors. */
7054 po_misc_or_fail (parse_address (&str, info));
7055 if (info->addr.pcrel || info->addr.offset.is_reg
7056 || !info->addr.preind || info->addr.postind
7057 || info->addr.writeback)
7058 {
7059 set_syntax_error (_("invalid addressing mode"));
7060 goto failure;
7061 }
7062
7063 /* Then retry, matching the specific syntax of these addresses. */
7064 str = start;
7065 po_char_or_fail ('[');
7066 po_reg_or_fail (REG_TYPE_R64_SP);
7067 /* Accept optional ", #0". */
7068 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7069 && skip_past_char (&str, ','))
7070 {
7071 skip_past_char (&str, '#');
7072 if (! skip_past_char (&str, '0'))
7073 {
7074 set_fatal_syntax_error
7075 (_("the optional immediate offset can only be 0"));
7076 goto failure;
7077 }
7078 }
7079 po_char_or_fail (']');
7080 break;
7081 }
7082
7083 case AARCH64_OPND_ADDR_REGOFF:
7084 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7085 po_misc_or_fail (parse_address (&str, info));
7086 regoff_addr:
7087 if (info->addr.pcrel || !info->addr.offset.is_reg
7088 || !info->addr.preind || info->addr.postind
7089 || info->addr.writeback)
7090 {
7091 set_syntax_error (_("invalid addressing mode"));
7092 goto failure;
7093 }
7094 if (!info->shifter.operator_present)
7095 {
7096 /* Default to LSL if not present. Libopcodes prefers shifter
7097 kind to be explicit. */
7098 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7099 info->shifter.kind = AARCH64_MOD_LSL;
7100 }
7101 /* Qualifier to be deduced by libopcodes. */
7102 break;
7103
7104 case AARCH64_OPND_ADDR_SIMM7:
7105 po_misc_or_fail (parse_address (&str, info));
7106 if (info->addr.pcrel || info->addr.offset.is_reg
7107 || (!info->addr.preind && !info->addr.postind))
7108 {
7109 set_syntax_error (_("invalid addressing mode"));
7110 goto failure;
7111 }
7112 if (inst.reloc.type != BFD_RELOC_UNUSED)
7113 {
7114 set_syntax_error (_("relocation not allowed"));
7115 goto failure;
7116 }
7117 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7118 /* addr_off_p */ 1,
7119 /* need_libopcodes_p */ 1,
7120 /* skip_p */ 0);
7121 break;
7122
7123 case AARCH64_OPND_ADDR_SIMM9:
7124 case AARCH64_OPND_ADDR_SIMM9_2:
7125 case AARCH64_OPND_ADDR_SIMM11:
7126 case AARCH64_OPND_ADDR_SIMM13:
7127 po_misc_or_fail (parse_address (&str, info));
7128 if (info->addr.pcrel || info->addr.offset.is_reg
7129 || (!info->addr.preind && !info->addr.postind)
7130 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7131 && info->addr.writeback))
7132 {
7133 set_syntax_error (_("invalid addressing mode"));
7134 goto failure;
7135 }
7136 if (inst.reloc.type != BFD_RELOC_UNUSED)
7137 {
7138 set_syntax_error (_("relocation not allowed"));
7139 goto failure;
7140 }
7141 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7142 /* addr_off_p */ 1,
7143 /* need_libopcodes_p */ 1,
7144 /* skip_p */ 0);
7145 break;
7146
7147 case AARCH64_OPND_ADDR_SIMM10:
7148 case AARCH64_OPND_ADDR_OFFSET:
7149 po_misc_or_fail (parse_address (&str, info));
7150 if (info->addr.pcrel || info->addr.offset.is_reg
7151 || !info->addr.preind || info->addr.postind)
7152 {
7153 set_syntax_error (_("invalid addressing mode"));
7154 goto failure;
7155 }
7156 if (inst.reloc.type != BFD_RELOC_UNUSED)
7157 {
7158 set_syntax_error (_("relocation not allowed"));
7159 goto failure;
7160 }
7161 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7162 /* addr_off_p */ 1,
7163 /* need_libopcodes_p */ 1,
7164 /* skip_p */ 0);
7165 break;
7166
7167 case AARCH64_OPND_ADDR_UIMM12:
7168 po_misc_or_fail (parse_address (&str, info));
7169 if (info->addr.pcrel || info->addr.offset.is_reg
7170 || !info->addr.preind || info->addr.writeback)
7171 {
7172 set_syntax_error (_("invalid addressing mode"));
7173 goto failure;
7174 }
7175 if (inst.reloc.type == BFD_RELOC_UNUSED)
7176 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7177 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7178 || (inst.reloc.type
7179 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7180 || (inst.reloc.type
7181 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7182 || (inst.reloc.type
7183 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7184 || (inst.reloc.type
7185 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7186 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7187 /* Leave qualifier to be determined by libopcodes. */
7188 break;
7189
7190 case AARCH64_OPND_SIMD_ADDR_POST:
7191 /* [<Xn|SP>], <Xm|#<amount>> */
7192 po_misc_or_fail (parse_address (&str, info));
7193 if (!info->addr.postind || !info->addr.writeback)
7194 {
7195 set_syntax_error (_("invalid addressing mode"));
7196 goto failure;
7197 }
7198 if (!info->addr.offset.is_reg)
7199 {
7200 if (inst.reloc.exp.X_op == O_constant)
7201 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7202 else
7203 {
7204 set_fatal_syntax_error
7205 (_("writeback value must be an immediate constant"));
7206 goto failure;
7207 }
7208 }
7209 /* No qualifier. */
7210 break;
7211
7212 case AARCH64_OPND_SME_SM_ZA:
7213 /* { SM | ZA } */
7214 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7215 {
7216 set_syntax_error (_("unknown or missing PSTATE field name"));
7217 goto failure;
7218 }
7219 info->reg.regno = val;
7220 break;
7221
7222 case AARCH64_OPND_SME_PnT_Wm_imm:
7223 /* <Pn>.<T>[<Wm>, #<imm>] */
7224 {
7225 int index_base_reg;
7226 int imm;
7227 val = parse_sme_pred_reg_with_index (&str,
7228 &index_base_reg,
7229 &imm,
7230 &qualifier);
7231 if (val == PARSE_FAIL)
7232 goto failure;
7233
7234 info->za_tile_vector.regno = val;
7235 info->za_tile_vector.index.regno = index_base_reg;
7236 info->za_tile_vector.index.imm = imm;
7237 info->qualifier = qualifier;
7238 break;
7239 }
7240
7241 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7242 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7243 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7244 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7245 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7246 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7247 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7248 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7249 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7250 case AARCH64_OPND_SVE_ADDR_RI_U6:
7251 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7252 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7253 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7254 /* [X<n>{, #imm, MUL VL}]
7255 [X<n>{, #imm}]
7256 but recognizing SVE registers. */
7257 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7258 &offset_qualifier));
7259 if (base_qualifier != AARCH64_OPND_QLF_X)
7260 {
7261 set_syntax_error (_("invalid addressing mode"));
7262 goto failure;
7263 }
7264 sve_regimm:
7265 if (info->addr.pcrel || info->addr.offset.is_reg
7266 || !info->addr.preind || info->addr.writeback)
7267 {
7268 set_syntax_error (_("invalid addressing mode"));
7269 goto failure;
7270 }
7271 if (inst.reloc.type != BFD_RELOC_UNUSED
7272 || inst.reloc.exp.X_op != O_constant)
7273 {
7274 /* Make sure this has priority over
7275 "invalid addressing mode". */
7276 set_fatal_syntax_error (_("constant offset required"));
7277 goto failure;
7278 }
7279 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7280 break;
7281
7282 case AARCH64_OPND_SVE_ADDR_R:
7283 /* [<Xn|SP>{, <R><m>}]
7284 but recognizing SVE registers. */
7285 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7286 &offset_qualifier));
7287 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7288 {
7289 offset_qualifier = AARCH64_OPND_QLF_X;
7290 info->addr.offset.is_reg = 1;
7291 info->addr.offset.regno = 31;
7292 }
7293 else if (base_qualifier != AARCH64_OPND_QLF_X
7294 || offset_qualifier != AARCH64_OPND_QLF_X)
7295 {
7296 set_syntax_error (_("invalid addressing mode"));
7297 goto failure;
7298 }
7299 goto regoff_addr;
7300
7301 case AARCH64_OPND_SVE_ADDR_RR:
7302 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7303 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7304 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7305 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7306 case AARCH64_OPND_SVE_ADDR_RX:
7307 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7308 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7309 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7310 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7311 but recognizing SVE registers. */
7312 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7313 &offset_qualifier));
7314 if (base_qualifier != AARCH64_OPND_QLF_X
7315 || offset_qualifier != AARCH64_OPND_QLF_X)
7316 {
7317 set_syntax_error (_("invalid addressing mode"));
7318 goto failure;
7319 }
7320 goto regoff_addr;
7321
7322 case AARCH64_OPND_SVE_ADDR_RZ:
7323 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7324 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7325 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7326 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7327 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7328 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7329 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7330 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7331 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7332 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7333 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7334 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7335 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7336 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7337 &offset_qualifier));
7338 if (base_qualifier != AARCH64_OPND_QLF_X
7339 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7340 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7341 {
7342 set_syntax_error (_("invalid addressing mode"));
7343 goto failure;
7344 }
7345 info->qualifier = offset_qualifier;
7346 goto regoff_addr;
7347
7348 case AARCH64_OPND_SVE_ADDR_ZX:
7349 /* [Zn.<T>{, <Xm>}]. */
7350 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7351 &offset_qualifier));
7352 /* Things to check:
7353 base_qualifier either S_S or S_D
7354 offset_qualifier must be X
7355 */
7356 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7357 && base_qualifier != AARCH64_OPND_QLF_S_D)
7358 || offset_qualifier != AARCH64_OPND_QLF_X)
7359 {
7360 set_syntax_error (_("invalid addressing mode"));
7361 goto failure;
7362 }
7363 info->qualifier = base_qualifier;
7364 if (!info->addr.offset.is_reg || info->addr.pcrel
7365 || !info->addr.preind || info->addr.writeback
7366 || info->shifter.operator_present != 0)
7367 {
7368 set_syntax_error (_("invalid addressing mode"));
7369 goto failure;
7370 }
7371 info->shifter.kind = AARCH64_MOD_LSL;
7372 break;
7373
7374
7375 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7376 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7377 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7378 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7379 /* [Z<n>.<T>{, #imm}] */
7380 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7381 &offset_qualifier));
7382 if (base_qualifier != AARCH64_OPND_QLF_S_S
7383 && base_qualifier != AARCH64_OPND_QLF_S_D)
7384 {
7385 set_syntax_error (_("invalid addressing mode"));
7386 goto failure;
7387 }
7388 info->qualifier = base_qualifier;
7389 goto sve_regimm;
7390
7391 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7392 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7393 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7394 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7395 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7396
7397 We don't reject:
7398
7399 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7400
7401 here since we get better error messages by leaving it to
7402 the qualifier checking routines. */
7403 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7404 &offset_qualifier));
7405 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7406 && base_qualifier != AARCH64_OPND_QLF_S_D)
7407 || offset_qualifier != base_qualifier)
7408 {
7409 set_syntax_error (_("invalid addressing mode"));
7410 goto failure;
7411 }
7412 info->qualifier = base_qualifier;
7413 goto regoff_addr;
7414
7415 case AARCH64_OPND_SYSREG:
7416 {
7417 uint32_t sysreg_flags;
7418 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7419 &sysreg_flags)) == PARSE_FAIL)
7420 {
7421 set_syntax_error (_("unknown or missing system register name"));
7422 goto failure;
7423 }
7424 inst.base.operands[i].sysreg.value = val;
7425 inst.base.operands[i].sysreg.flags = sysreg_flags;
7426 break;
7427 }
7428
7429 case AARCH64_OPND_PSTATEFIELD:
7430 {
7431 uint32_t sysreg_flags;
7432 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7433 &sysreg_flags)) == PARSE_FAIL)
7434 {
7435 set_syntax_error (_("unknown or missing PSTATE field name"));
7436 goto failure;
7437 }
7438 inst.base.operands[i].pstatefield = val;
7439 inst.base.operands[i].sysreg.flags = sysreg_flags;
7440 break;
7441 }
7442
7443 case AARCH64_OPND_SYSREG_IC:
7444 inst.base.operands[i].sysins_op =
7445 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7446 goto sys_reg_ins;
7447
7448 case AARCH64_OPND_SYSREG_DC:
7449 inst.base.operands[i].sysins_op =
7450 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7451 goto sys_reg_ins;
7452
7453 case AARCH64_OPND_SYSREG_AT:
7454 inst.base.operands[i].sysins_op =
7455 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7456 goto sys_reg_ins;
7457
7458 case AARCH64_OPND_SYSREG_SR:
7459 inst.base.operands[i].sysins_op =
7460 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7461 goto sys_reg_ins;
7462
7463 case AARCH64_OPND_SYSREG_TLBI:
7464 inst.base.operands[i].sysins_op =
7465 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7466 sys_reg_ins:
7467 if (inst.base.operands[i].sysins_op == NULL)
7468 {
7469 set_fatal_syntax_error ( _("unknown or missing operation name"));
7470 goto failure;
7471 }
7472 break;
7473
7474 case AARCH64_OPND_BARRIER:
7475 case AARCH64_OPND_BARRIER_ISB:
7476 val = parse_barrier (&str);
7477 if (val != PARSE_FAIL
7478 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7479 {
7480 /* ISB only accepts options name 'sy'. */
7481 set_syntax_error
7482 (_("the specified option is not accepted in ISB"));
7483 /* Turn off backtrack as this optional operand is present. */
7484 backtrack_pos = 0;
7485 goto failure;
7486 }
7487 if (val != PARSE_FAIL
7488 && operands[i] == AARCH64_OPND_BARRIER)
7489 {
7490 /* Regular barriers accept options CRm (C0-C15).
7491 DSB nXS barrier variant accepts values > 15. */
7492 if (val < 0 || val > 15)
7493 {
7494 set_syntax_error (_("the specified option is not accepted in DSB"));
7495 goto failure;
7496 }
7497 }
7498 /* This is an extension to accept a 0..15 immediate. */
7499 if (val == PARSE_FAIL)
7500 po_imm_or_fail (0, 15);
7501 info->barrier = aarch64_barrier_options + val;
7502 break;
7503
7504 case AARCH64_OPND_BARRIER_DSB_NXS:
7505 val = parse_barrier (&str);
7506 if (val != PARSE_FAIL)
7507 {
7508 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7509 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7510 {
7511 set_syntax_error (_("the specified option is not accepted in DSB"));
7512 /* Turn off backtrack as this optional operand is present. */
7513 backtrack_pos = 0;
7514 goto failure;
7515 }
7516 }
7517 else
7518 {
7519 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7520 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7521 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7522 goto failure;
7523 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7524 {
7525 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7526 goto failure;
7527 }
7528 }
7529 /* Option index is encoded as 2-bit value in val<3:2>. */
7530 val = (val >> 2) - 4;
7531 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7532 break;
7533
7534 case AARCH64_OPND_PRFOP:
7535 val = parse_pldop (&str);
7536 /* This is an extension to accept a 0..31 immediate. */
7537 if (val == PARSE_FAIL)
7538 po_imm_or_fail (0, 31);
7539 inst.base.operands[i].prfop = aarch64_prfops + val;
7540 break;
7541
7542 case AARCH64_OPND_BARRIER_PSB:
7543 val = parse_barrier_psb (&str, &(info->hint_option));
7544 if (val == PARSE_FAIL)
7545 goto failure;
7546 break;
7547
7548 case AARCH64_OPND_BTI_TARGET:
7549 val = parse_bti_operand (&str, &(info->hint_option));
7550 if (val == PARSE_FAIL)
7551 goto failure;
7552 break;
7553
7554 case AARCH64_OPND_SME_ZAda_2b:
7555 case AARCH64_OPND_SME_ZAda_3b:
7556 val = parse_sme_zada_operand (&str, &qualifier);
7557 if (val == PARSE_FAIL)
7558 goto failure;
7559 info->reg.regno = val;
7560 info->qualifier = qualifier;
7561 break;
7562
7563 case AARCH64_OPND_SME_ZA_HV_idx_src:
7564 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7565 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7566 {
7567 enum sme_hv_slice slice_indicator;
7568 int vector_select_register;
7569 int imm;
7570
7571 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7572 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7573 &slice_indicator,
7574 &vector_select_register,
7575 &imm,
7576 &qualifier);
7577 else
7578 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7579 &vector_select_register,
7580 &imm,
7581 &qualifier);
7582 if (val == PARSE_FAIL)
7583 goto failure;
7584 info->za_tile_vector.regno = val;
7585 info->za_tile_vector.index.regno = vector_select_register;
7586 info->za_tile_vector.index.imm = imm;
7587 info->za_tile_vector.v = slice_indicator;
7588 info->qualifier = qualifier;
7589 break;
7590 }
7591
7592 case AARCH64_OPND_SME_list_of_64bit_tiles:
7593 val = parse_sme_list_of_64bit_tiles (&str);
7594 if (val == PARSE_FAIL)
7595 goto failure;
7596 info->imm.value = val;
7597 break;
7598
7599 case AARCH64_OPND_SME_ZA_array:
7600 {
7601 int imm;
7602 val = parse_sme_za_array (&str, &imm);
7603 if (val == PARSE_FAIL)
7604 goto failure;
7605 info->za_tile_vector.index.regno = val;
7606 info->za_tile_vector.index.imm = imm;
7607 break;
7608 }
7609
7610 case AARCH64_OPND_MOPS_ADDR_Rd:
7611 case AARCH64_OPND_MOPS_ADDR_Rs:
7612 po_char_or_fail ('[');
7613 if (!parse_x0_to_x30 (&str, info))
7614 goto failure;
7615 po_char_or_fail (']');
7616 po_char_or_fail ('!');
7617 break;
7618
7619 case AARCH64_OPND_MOPS_WB_Rn:
7620 if (!parse_x0_to_x30 (&str, info))
7621 goto failure;
7622 po_char_or_fail ('!');
7623 break;
7624
7625 default:
7626 as_fatal (_("unhandled operand code %d"), operands[i]);
7627 }
7628
7629 /* If we get here, this operand was successfully parsed. */
7630 inst.base.operands[i].present = 1;
7631 continue;
7632
7633 failure:
7634 /* The parse routine should already have set the error, but in case
7635 not, set a default one here. */
7636 if (! error_p ())
7637 set_default_error ();
7638
7639 if (! backtrack_pos)
7640 goto parse_operands_return;
7641
7642 {
7643 /* We reach here because this operand is marked as optional, and
7644 either no operand was supplied or the operand was supplied but it
7645 was syntactically incorrect. In the latter case we report an
7646 error. In the former case we perform a few more checks before
7647 dropping through to the code to insert the default operand. */
7648
7649 char *tmp = backtrack_pos;
7650 char endchar = END_OF_INSN;
7651
7652 if (i != (aarch64_num_of_operands (opcode) - 1))
7653 endchar = ',';
7654 skip_past_char (&tmp, ',');
7655
7656 if (*tmp != endchar)
7657 /* The user has supplied an operand in the wrong format. */
7658 goto parse_operands_return;
7659
7660 /* Make sure there is not a comma before the optional operand.
7661 For example the fifth operand of 'sys' is optional:
7662
7663 sys #0,c0,c0,#0, <--- wrong
7664 sys #0,c0,c0,#0 <--- correct. */
7665 if (comma_skipped_p && i && endchar == END_OF_INSN)
7666 {
7667 set_fatal_syntax_error
7668 (_("unexpected comma before the omitted optional operand"));
7669 goto parse_operands_return;
7670 }
7671 }
7672
7673 /* Reaching here means we are dealing with an optional operand that is
7674 omitted from the assembly line. */
7675 gas_assert (optional_operand_p (opcode, i));
7676 info->present = 0;
7677 process_omitted_operand (operands[i], opcode, i, info);
7678
7679 /* Try again, skipping the optional operand at backtrack_pos. */
7680 str = backtrack_pos;
7681 backtrack_pos = 0;
7682
7683 /* Clear any error record after the omitted optional operand has been
7684 successfully handled. */
7685 clear_error ();
7686 }
7687
7688 /* Check if we have parsed all the operands. */
7689 if (*str != '\0' && ! error_p ())
7690 {
7691 /* Set I to the index of the last present operand; this is
7692 for the purpose of diagnostics. */
7693 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7694 ;
7695 set_fatal_syntax_error
7696 (_("unexpected characters following instruction"));
7697 }
7698
7699 parse_operands_return:
7700
7701 if (error_p ())
7702 {
7703 DEBUG_TRACE ("parsing FAIL: %s - %s",
7704 operand_mismatch_kind_names[get_error_kind ()],
7705 get_error_message ());
7706 /* Record the operand error properly; this is useful when there
7707 are multiple instruction templates for a mnemonic name, so that
7708 later on, we can select the error that most closely describes
7709 the problem. */
7710 record_operand_error (opcode, i, get_error_kind (),
7711 get_error_message ());
7712 return false;
7713 }
7714 else
7715 {
7716 DEBUG_TRACE ("parsing SUCCESS");
7717 return true;
7718 }
7719 }
7720
7721 /* It does some fix-up to provide some programmer friendly feature while
7722 keeping the libopcodes happy, i.e. libopcodes only accepts
7723 the preferred architectural syntax.
7724 Return FALSE if there is any failure; otherwise return TRUE. */
7725
7726 static bool
7727 programmer_friendly_fixup (aarch64_instruction *instr)
7728 {
7729 aarch64_inst *base = &instr->base;
7730 const aarch64_opcode *opcode = base->opcode;
7731 enum aarch64_op op = opcode->op;
7732 aarch64_opnd_info *operands = base->operands;
7733
7734 DEBUG_TRACE ("enter");
7735
7736 switch (opcode->iclass)
7737 {
7738 case testbranch:
7739 /* TBNZ Xn|Wn, #uimm6, label
7740 Test and Branch Not Zero: conditionally jumps to label if bit number
7741 uimm6 in register Xn is not zero. The bit number implies the width of
7742 the register, which may be written and should be disassembled as Wn if
7743 uimm is less than 32. */
7744 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7745 {
7746 if (operands[1].imm.value >= 32)
7747 {
7748 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7749 0, 31);
7750 return false;
7751 }
7752 operands[0].qualifier = AARCH64_OPND_QLF_X;
7753 }
7754 break;
7755 case loadlit:
7756 /* LDR Wt, label | =value
7757 As a convenience assemblers will typically permit the notation
7758 "=value" in conjunction with the pc-relative literal load instructions
7759 to automatically place an immediate value or symbolic address in a
7760 nearby literal pool and generate a hidden label which references it.
7761 ISREG has been set to 0 in the case of =value. */
7762 if (instr->gen_lit_pool
7763 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7764 {
7765 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7766 if (op == OP_LDRSW_LIT)
7767 size = 4;
7768 if (instr->reloc.exp.X_op != O_constant
7769 && instr->reloc.exp.X_op != O_big
7770 && instr->reloc.exp.X_op != O_symbol)
7771 {
7772 record_operand_error (opcode, 1,
7773 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7774 _("constant expression expected"));
7775 return false;
7776 }
7777 if (! add_to_lit_pool (&instr->reloc.exp, size))
7778 {
7779 record_operand_error (opcode, 1,
7780 AARCH64_OPDE_OTHER_ERROR,
7781 _("literal pool insertion failed"));
7782 return false;
7783 }
7784 }
7785 break;
7786 case log_shift:
7787 case bitfield:
7788 /* UXT[BHW] Wd, Wn
7789 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7790 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7791 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7792 A programmer-friendly assembler should accept a destination Xd in
7793 place of Wd, however that is not the preferred form for disassembly.
7794 */
7795 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7796 && operands[1].qualifier == AARCH64_OPND_QLF_W
7797 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7798 operands[0].qualifier = AARCH64_OPND_QLF_W;
7799 break;
7800
7801 case addsub_ext:
7802 {
7803 /* In the 64-bit form, the final register operand is written as Wm
7804 for all but the (possibly omitted) UXTX/LSL and SXTX
7805 operators.
7806 As a programmer-friendly assembler, we accept e.g.
7807 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7808 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7809 int idx = aarch64_operand_index (opcode->operands,
7810 AARCH64_OPND_Rm_EXT);
7811 gas_assert (idx == 1 || idx == 2);
7812 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7813 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7814 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7815 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7816 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7817 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7818 }
7819 break;
7820
7821 default:
7822 break;
7823 }
7824
7825 DEBUG_TRACE ("exit with SUCCESS");
7826 return true;
7827 }
7828
7829 /* Check for loads and stores that will cause unpredictable behavior. */
7830
7831 static void
7832 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7833 {
7834 aarch64_inst *base = &instr->base;
7835 const aarch64_opcode *opcode = base->opcode;
7836 const aarch64_opnd_info *opnds = base->operands;
7837 switch (opcode->iclass)
7838 {
7839 case ldst_pos:
7840 case ldst_imm9:
7841 case ldst_imm10:
7842 case ldst_unscaled:
7843 case ldst_unpriv:
7844 /* Loading/storing the base register is unpredictable if writeback. */
7845 if ((aarch64_get_operand_class (opnds[0].type)
7846 == AARCH64_OPND_CLASS_INT_REG)
7847 && opnds[0].reg.regno == opnds[1].addr.base_regno
7848 && opnds[1].addr.base_regno != REG_SP
7849 /* Exempt STG/STZG/ST2G/STZ2G. */
7850 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7851 && opnds[1].addr.writeback)
7852 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7853 break;
7854
7855 case ldstpair_off:
7856 case ldstnapair_offs:
7857 case ldstpair_indexed:
7858 /* Loading/storing the base register is unpredictable if writeback. */
7859 if ((aarch64_get_operand_class (opnds[0].type)
7860 == AARCH64_OPND_CLASS_INT_REG)
7861 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7862 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7863 && opnds[2].addr.base_regno != REG_SP
7864 /* Exempt STGP. */
7865 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7866 && opnds[2].addr.writeback)
7867 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7868 /* Load operations must load different registers. */
7869 if ((opcode->opcode & (1 << 22))
7870 && opnds[0].reg.regno == opnds[1].reg.regno)
7871 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7872 break;
7873
7874 case ldstexcl:
7875 if ((aarch64_get_operand_class (opnds[0].type)
7876 == AARCH64_OPND_CLASS_INT_REG)
7877 && (aarch64_get_operand_class (opnds[1].type)
7878 == AARCH64_OPND_CLASS_INT_REG))
7879 {
7880 if ((opcode->opcode & (1 << 22)))
7881 {
7882 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7883 if ((opcode->opcode & (1 << 21))
7884 && opnds[0].reg.regno == opnds[1].reg.regno)
7885 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7886 }
7887 else
7888 {
7889 /* Store-Exclusive is unpredictable if Rt == Rs. */
7890 if (opnds[0].reg.regno == opnds[1].reg.regno)
7891 as_warn
7892 (_("unpredictable: identical transfer and status registers"
7893 " --`%s'"),str);
7894
7895 if (opnds[0].reg.regno == opnds[2].reg.regno)
7896 {
7897 if (!(opcode->opcode & (1 << 21)))
7898 /* Store-Exclusive is unpredictable if Rn == Rs. */
7899 as_warn
7900 (_("unpredictable: identical base and status registers"
7901 " --`%s'"),str);
7902 else
7903 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7904 as_warn
7905 (_("unpredictable: "
7906 "identical transfer and status registers"
7907 " --`%s'"),str);
7908 }
7909
7910 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7911 if ((opcode->opcode & (1 << 21))
7912 && opnds[0].reg.regno == opnds[3].reg.regno
7913 && opnds[3].reg.regno != REG_SP)
7914 as_warn (_("unpredictable: identical base and status registers"
7915 " --`%s'"),str);
7916 }
7917 }
7918 break;
7919
7920 default:
7921 break;
7922 }
7923 }
7924
7925 static void
7926 force_automatic_sequence_close (void)
7927 {
7928 struct aarch64_segment_info_type *tc_seg_info;
7929
7930 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7931 if (tc_seg_info->insn_sequence.instr)
7932 {
7933 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7934 _("previous `%s' sequence has not been closed"),
7935 tc_seg_info->insn_sequence.instr->opcode->name);
7936 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7937 }
7938 }
7939
7940 /* A wrapper function to interface with libopcodes on encoding and
7941 record the error message if there is any.
7942
7943 Return TRUE on success; otherwise return FALSE. */
7944
7945 static bool
7946 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7947 aarch64_insn *code)
7948 {
7949 aarch64_operand_error error_info;
7950 memset (&error_info, '\0', sizeof (error_info));
7951 error_info.kind = AARCH64_OPDE_NIL;
7952 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7953 && !error_info.non_fatal)
7954 return true;
7955
7956 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7957 record_operand_error_info (opcode, &error_info);
7958 return error_info.non_fatal;
7959 }
7960
7961 #ifdef DEBUG_AARCH64
7962 static inline void
7963 dump_opcode_operands (const aarch64_opcode *opcode)
7964 {
7965 int i = 0;
7966 while (opcode->operands[i] != AARCH64_OPND_NIL)
7967 {
7968 aarch64_verbose ("\t\t opnd%d: %s", i,
7969 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7970 ? aarch64_get_operand_name (opcode->operands[i])
7971 : aarch64_get_operand_desc (opcode->operands[i]));
7972 ++i;
7973 }
7974 }
7975 #endif /* DEBUG_AARCH64 */
7976
7977 /* This is the guts of the machine-dependent assembler. STR points to a
7978 machine dependent instruction. This function is supposed to emit
7979 the frags/bytes it assembles to. */
7980
7981 void
7982 md_assemble (char *str)
7983 {
7984 templates *template;
7985 const aarch64_opcode *opcode;
7986 struct aarch64_segment_info_type *tc_seg_info;
7987 aarch64_inst *inst_base;
7988 unsigned saved_cond;
7989
7990 /* Align the previous label if needed. */
7991 if (last_label_seen != NULL)
7992 {
7993 symbol_set_frag (last_label_seen, frag_now);
7994 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7995 S_SET_SEGMENT (last_label_seen, now_seg);
7996 }
7997
7998 /* Update the current insn_sequence from the segment. */
7999 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8000 insn_sequence = &tc_seg_info->insn_sequence;
8001 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
8002
8003 inst.reloc.type = BFD_RELOC_UNUSED;
8004
8005 DEBUG_TRACE ("\n\n");
8006 DEBUG_TRACE ("==============================");
8007 DEBUG_TRACE ("Enter md_assemble with %s", str);
8008
8009 /* Scan up to the end of the mnemonic, which must end in whitespace,
8010 '.', or end of string. */
8011 char *p = str;
8012 char *dot = 0;
8013 for (; is_part_of_name (*p); p++)
8014 if (*p == '.' && !dot)
8015 dot = p;
8016
8017 if (p == str)
8018 {
8019 as_bad (_("unknown mnemonic -- `%s'"), str);
8020 return;
8021 }
8022
8023 if (!dot && create_register_alias (str, p))
8024 return;
8025
8026 template = opcode_lookup (str, dot, p);
8027 if (!template)
8028 {
8029 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8030 str);
8031 return;
8032 }
8033
8034 skip_whitespace (p);
8035 if (*p == ',')
8036 {
8037 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8038 get_mnemonic_name (str), str);
8039 return;
8040 }
8041
8042 init_operand_error_report ();
8043
8044 /* Sections are assumed to start aligned. In executable section, there is no
8045 MAP_DATA symbol pending. So we only align the address during
8046 MAP_DATA --> MAP_INSN transition.
8047 For other sections, this is not guaranteed. */
8048 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8049 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8050 frag_align_code (2, 0);
8051
8052 saved_cond = inst.cond;
8053 reset_aarch64_instruction (&inst);
8054 inst.cond = saved_cond;
8055
8056 /* Iterate through all opcode entries with the same mnemonic name. */
8057 do
8058 {
8059 opcode = template->opcode;
8060
8061 DEBUG_TRACE ("opcode %s found", opcode->name);
8062 #ifdef DEBUG_AARCH64
8063 if (debug_dump)
8064 dump_opcode_operands (opcode);
8065 #endif /* DEBUG_AARCH64 */
8066
8067 mapping_state (MAP_INSN);
8068
8069 inst_base = &inst.base;
8070 inst_base->opcode = opcode;
8071
8072 /* Truly conditionally executed instructions, e.g. b.cond. */
8073 if (opcode->flags & F_COND)
8074 {
8075 gas_assert (inst.cond != COND_ALWAYS);
8076 inst_base->cond = get_cond_from_value (inst.cond);
8077 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8078 }
8079 else if (inst.cond != COND_ALWAYS)
8080 {
8081 /* It shouldn't arrive here, where the assembly looks like a
8082 conditional instruction but the found opcode is unconditional. */
8083 gas_assert (0);
8084 continue;
8085 }
8086
8087 if (parse_operands (p, opcode)
8088 && programmer_friendly_fixup (&inst)
8089 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8090 {
8091 /* Check that this instruction is supported for this CPU. */
8092 if (!opcode->avariant
8093 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8094 {
8095 as_bad (_("selected processor does not support `%s'"), str);
8096 return;
8097 }
8098
8099 warn_unpredictable_ldst (&inst, str);
8100
8101 if (inst.reloc.type == BFD_RELOC_UNUSED
8102 || !inst.reloc.need_libopcodes_p)
8103 output_inst (NULL);
8104 else
8105 {
8106 /* If there is relocation generated for the instruction,
8107 store the instruction information for the future fix-up. */
8108 struct aarch64_inst *copy;
8109 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8110 copy = XNEW (struct aarch64_inst);
8111 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8112 output_inst (copy);
8113 }
8114
8115 /* Issue non-fatal messages if any. */
8116 output_operand_error_report (str, true);
8117 return;
8118 }
8119
8120 template = template->next;
8121 if (template != NULL)
8122 {
8123 reset_aarch64_instruction (&inst);
8124 inst.cond = saved_cond;
8125 }
8126 }
8127 while (template != NULL);
8128
8129 /* Issue the error messages if any. */
8130 output_operand_error_report (str, false);
8131 }
8132
8133 /* Various frobbings of labels and their addresses. */
8134
8135 void
8136 aarch64_start_line_hook (void)
8137 {
8138 last_label_seen = NULL;
8139 }
8140
8141 void
8142 aarch64_frob_label (symbolS * sym)
8143 {
8144 last_label_seen = sym;
8145
8146 dwarf2_emit_label (sym);
8147 }
8148
8149 void
8150 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8151 {
8152 /* Check to see if we have a block to close. */
8153 force_automatic_sequence_close ();
8154 }
8155
8156 int
8157 aarch64_data_in_code (void)
8158 {
8159 if (startswith (input_line_pointer + 1, "data:"))
8160 {
8161 *input_line_pointer = '/';
8162 input_line_pointer += 5;
8163 *input_line_pointer = 0;
8164 return 1;
8165 }
8166
8167 return 0;
8168 }
8169
8170 char *
8171 aarch64_canonicalize_symbol_name (char *name)
8172 {
8173 int len;
8174
8175 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8176 *(name + len - 5) = 0;
8177
8178 return name;
8179 }
8180 \f
8181 /* Table of all register names defined by default. The user can
8182 define additional names with .req. Note that all register names
8183 should appear in both upper and lowercase variants. Some registers
8184 also have mixed-case names. */
8185
8186 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8187 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8188 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8189 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8190 #define REGSET16(p,t) \
8191 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8192 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8193 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8194 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8195 #define REGSET16S(p,s,t) \
8196 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8197 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8198 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8199 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8200 #define REGSET31(p,t) \
8201 REGSET16(p, t), \
8202 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8203 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8204 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8205 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8206 #define REGSET(p,t) \
8207 REGSET31(p,t), REGNUM(p,31,t)
8208
8209 /* These go into aarch64_reg_hsh hash-table. */
8210 static const reg_entry reg_names[] = {
8211 /* Integer registers. */
8212 REGSET31 (x, R_64), REGSET31 (X, R_64),
8213 REGSET31 (w, R_32), REGSET31 (W, R_32),
8214
8215 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8216 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8217 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8218 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8219 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8220 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8221
8222 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8223 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8224
8225 /* Floating-point single precision registers. */
8226 REGSET (s, FP_S), REGSET (S, FP_S),
8227
8228 /* Floating-point double precision registers. */
8229 REGSET (d, FP_D), REGSET (D, FP_D),
8230
8231 /* Floating-point half precision registers. */
8232 REGSET (h, FP_H), REGSET (H, FP_H),
8233
8234 /* Floating-point byte precision registers. */
8235 REGSET (b, FP_B), REGSET (B, FP_B),
8236
8237 /* Floating-point quad precision registers. */
8238 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8239
8240 /* FP/SIMD registers. */
8241 REGSET (v, VN), REGSET (V, VN),
8242
8243 /* SVE vector registers. */
8244 REGSET (z, ZN), REGSET (Z, ZN),
8245
8246 /* SVE predicate registers. */
8247 REGSET16 (p, PN), REGSET16 (P, PN),
8248
8249 /* SME ZA tile registers. */
8250 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8251
8252 /* SME ZA tile registers (horizontal slice). */
8253 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8254
8255 /* SME ZA tile registers (vertical slice). */
8256 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8257 };
8258
8259 #undef REGDEF
8260 #undef REGDEF_ALIAS
8261 #undef REGNUM
8262 #undef REGSET16
8263 #undef REGSET31
8264 #undef REGSET
8265
8266 #define N 1
8267 #define n 0
8268 #define Z 1
8269 #define z 0
8270 #define C 1
8271 #define c 0
8272 #define V 1
8273 #define v 0
8274 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8275 static const asm_nzcv nzcv_names[] = {
8276 {"nzcv", B (n, z, c, v)},
8277 {"nzcV", B (n, z, c, V)},
8278 {"nzCv", B (n, z, C, v)},
8279 {"nzCV", B (n, z, C, V)},
8280 {"nZcv", B (n, Z, c, v)},
8281 {"nZcV", B (n, Z, c, V)},
8282 {"nZCv", B (n, Z, C, v)},
8283 {"nZCV", B (n, Z, C, V)},
8284 {"Nzcv", B (N, z, c, v)},
8285 {"NzcV", B (N, z, c, V)},
8286 {"NzCv", B (N, z, C, v)},
8287 {"NzCV", B (N, z, C, V)},
8288 {"NZcv", B (N, Z, c, v)},
8289 {"NZcV", B (N, Z, c, V)},
8290 {"NZCv", B (N, Z, C, v)},
8291 {"NZCV", B (N, Z, C, V)}
8292 };
8293
8294 #undef N
8295 #undef n
8296 #undef Z
8297 #undef z
8298 #undef C
8299 #undef c
8300 #undef V
8301 #undef v
8302 #undef B
8303 \f
8304 /* MD interface: bits in the object file. */
8305
8306 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8307 for use in the a.out file, and stores them in the array pointed to by buf.
8308 This knows about the endian-ness of the target machine and does
8309 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8310 2 (short) and 4 (long) Floating numbers are put out as a series of
8311 LITTLENUMS (shorts, here at least). */
8312
8313 void
8314 md_number_to_chars (char *buf, valueT val, int n)
8315 {
8316 if (target_big_endian)
8317 number_to_chars_bigendian (buf, val, n);
8318 else
8319 number_to_chars_littleendian (buf, val, n);
8320 }
8321
8322 /* MD interface: Sections. */
8323
8324 /* Estimate the size of a frag before relaxing. Assume everything fits in
8325 4 bytes. */
8326
8327 int
8328 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8329 {
8330 fragp->fr_var = 4;
8331 return 4;
8332 }
8333
8334 /* Round up a section size to the appropriate boundary. */
8335
8336 valueT
8337 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8338 {
8339 return size;
8340 }
8341
8342 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8343 of an rs_align_code fragment.
8344
8345 Here we fill the frag with the appropriate info for padding the
8346 output stream. The resulting frag will consist of a fixed (fr_fix)
8347 and of a repeating (fr_var) part.
8348
8349 The fixed content is always emitted before the repeating content and
8350 these two parts are used as follows in constructing the output:
8351 - the fixed part will be used to align to a valid instruction word
8352 boundary, in case that we start at a misaligned address; as no
8353 executable instruction can live at the misaligned location, we
8354 simply fill with zeros;
8355 - the variable part will be used to cover the remaining padding and
8356 we fill using the AArch64 NOP instruction.
8357
8358 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8359 enough storage space for up to 3 bytes for padding the back to a valid
8360 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8361
8362 void
8363 aarch64_handle_align (fragS * fragP)
8364 {
8365 /* NOP = d503201f */
8366 /* AArch64 instructions are always little-endian. */
8367 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8368
8369 int bytes, fix, noop_size;
8370 char *p;
8371
8372 if (fragP->fr_type != rs_align_code)
8373 return;
8374
8375 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8376 p = fragP->fr_literal + fragP->fr_fix;
8377
8378 #ifdef OBJ_ELF
8379 gas_assert (fragP->tc_frag_data.recorded);
8380 #endif
8381
8382 noop_size = sizeof (aarch64_noop);
8383
8384 fix = bytes & (noop_size - 1);
8385 if (fix)
8386 {
8387 #if defined OBJ_ELF || defined OBJ_COFF
8388 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8389 #endif
8390 memset (p, 0, fix);
8391 p += fix;
8392 fragP->fr_fix += fix;
8393 }
8394
8395 if (noop_size)
8396 memcpy (p, aarch64_noop, noop_size);
8397 fragP->fr_var = noop_size;
8398 }
8399
8400 /* Perform target specific initialisation of a frag.
8401 Note - despite the name this initialisation is not done when the frag
8402 is created, but only when its type is assigned. A frag can be created
8403 and used a long time before its type is set, so beware of assuming that
8404 this initialisation is performed first. */
8405
8406 #ifndef OBJ_ELF
8407 void
8408 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8409 int max_chars ATTRIBUTE_UNUSED)
8410 {
8411 }
8412
8413 #else /* OBJ_ELF is defined. */
8414 void
8415 aarch64_init_frag (fragS * fragP, int max_chars)
8416 {
8417 /* Record a mapping symbol for alignment frags. We will delete this
8418 later if the alignment ends up empty. */
8419 if (!fragP->tc_frag_data.recorded)
8420 fragP->tc_frag_data.recorded = 1;
8421
8422 /* PR 21809: Do not set a mapping state for debug sections
8423 - it just confuses other tools. */
8424 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8425 return;
8426
8427 switch (fragP->fr_type)
8428 {
8429 case rs_align_test:
8430 case rs_fill:
8431 mapping_state_2 (MAP_DATA, max_chars);
8432 break;
8433 case rs_align:
8434 /* PR 20364: We can get alignment frags in code sections,
8435 so do not just assume that we should use the MAP_DATA state. */
8436 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8437 break;
8438 case rs_align_code:
8439 mapping_state_2 (MAP_INSN, max_chars);
8440 break;
8441 default:
8442 break;
8443 }
8444 }
8445
8446 /* Whether SFrame unwind info is supported. */
8447
8448 bool
8449 aarch64_support_sframe_p (void)
8450 {
8451 /* At this time, SFrame is supported for aarch64 only. */
8452 return (aarch64_abi == AARCH64_ABI_LP64);
8453 }
8454
8455 /* Specify if RA tracking is needed. */
8456
8457 bool
8458 aarch64_sframe_ra_tracking_p (void)
8459 {
8460 return true;
8461 }
8462
8463 /* Specify the fixed offset to recover RA from CFA.
8464 (useful only when RA tracking is not needed). */
8465
8466 offsetT
8467 aarch64_sframe_cfa_ra_offset (void)
8468 {
8469 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8470 }
8471
8472 /* Get the abi/arch indentifier for SFrame. */
8473
8474 unsigned char
8475 aarch64_sframe_get_abi_arch (void)
8476 {
8477 unsigned char sframe_abi_arch = 0;
8478
8479 if (aarch64_support_sframe_p ())
8480 {
8481 sframe_abi_arch = target_big_endian
8482 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8483 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8484 }
8485
8486 return sframe_abi_arch;
8487 }
8488
8489 #endif /* OBJ_ELF */
8490 \f
8491 /* Initialize the DWARF-2 unwind information for this procedure. */
8492
8493 void
8494 tc_aarch64_frame_initial_instructions (void)
8495 {
8496 cfi_add_CFA_def_cfa (REG_SP, 0);
8497 }
8498
8499 /* Convert REGNAME to a DWARF-2 register number. */
8500
8501 int
8502 tc_aarch64_regname_to_dw2regnum (char *regname)
8503 {
8504 const reg_entry *reg = parse_reg (&regname);
8505 if (reg == NULL)
8506 return -1;
8507
8508 switch (reg->type)
8509 {
8510 case REG_TYPE_SP_32:
8511 case REG_TYPE_SP_64:
8512 case REG_TYPE_R_32:
8513 case REG_TYPE_R_64:
8514 return reg->number;
8515
8516 case REG_TYPE_FP_B:
8517 case REG_TYPE_FP_H:
8518 case REG_TYPE_FP_S:
8519 case REG_TYPE_FP_D:
8520 case REG_TYPE_FP_Q:
8521 return reg->number + 64;
8522
8523 default:
8524 break;
8525 }
8526 return -1;
8527 }
8528
8529 /* Implement DWARF2_ADDR_SIZE. */
8530
8531 int
8532 aarch64_dwarf2_addr_size (void)
8533 {
8534 if (ilp32_p)
8535 return 4;
8536 else if (llp64_p)
8537 return 8;
8538 return bfd_arch_bits_per_address (stdoutput) / 8;
8539 }
8540
8541 /* MD interface: Symbol and relocation handling. */
8542
8543 /* Return the address within the segment that a PC-relative fixup is
8544 relative to. For AArch64 PC-relative fixups applied to instructions
8545 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8546
8547 long
8548 md_pcrel_from_section (fixS * fixP, segT seg)
8549 {
8550 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8551
8552 /* If this is pc-relative and we are going to emit a relocation
8553 then we just want to put out any pipeline compensation that the linker
8554 will need. Otherwise we want to use the calculated base. */
8555 if (fixP->fx_pcrel
8556 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8557 || aarch64_force_relocation (fixP)))
8558 base = 0;
8559
8560 /* AArch64 should be consistent for all pc-relative relocations. */
8561 return base + AARCH64_PCREL_OFFSET;
8562 }
8563
8564 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8565 Otherwise we have no need to default values of symbols. */
8566
8567 symbolS *
8568 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8569 {
8570 #ifdef OBJ_ELF
8571 if (name[0] == '_' && name[1] == 'G'
8572 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8573 {
8574 if (!GOT_symbol)
8575 {
8576 if (symbol_find (name))
8577 as_bad (_("GOT already in the symbol table"));
8578
8579 GOT_symbol = symbol_new (name, undefined_section,
8580 &zero_address_frag, 0);
8581 }
8582
8583 return GOT_symbol;
8584 }
8585 #endif
8586
8587 return 0;
8588 }
8589
8590 /* Return non-zero if the indicated VALUE has overflowed the maximum
8591 range expressible by a unsigned number with the indicated number of
8592 BITS. */
8593
8594 static bool
8595 unsigned_overflow (valueT value, unsigned bits)
8596 {
8597 valueT lim;
8598 if (bits >= sizeof (valueT) * 8)
8599 return false;
8600 lim = (valueT) 1 << bits;
8601 return (value >= lim);
8602 }
8603
8604
8605 /* Return non-zero if the indicated VALUE has overflowed the maximum
8606 range expressible by an signed number with the indicated number of
8607 BITS. */
8608
8609 static bool
8610 signed_overflow (offsetT value, unsigned bits)
8611 {
8612 offsetT lim;
8613 if (bits >= sizeof (offsetT) * 8)
8614 return false;
8615 lim = (offsetT) 1 << (bits - 1);
8616 return (value < -lim || value >= lim);
8617 }
8618
8619 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8620 unsigned immediate offset load/store instruction, try to encode it as
8621 an unscaled, 9-bit, signed immediate offset load/store instruction.
8622 Return TRUE if it is successful; otherwise return FALSE.
8623
8624 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8625 in response to the standard LDR/STR mnemonics when the immediate offset is
8626 unambiguous, i.e. when it is negative or unaligned. */
8627
8628 static bool
8629 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8630 {
8631 int idx;
8632 enum aarch64_op new_op;
8633 const aarch64_opcode *new_opcode;
8634
8635 gas_assert (instr->opcode->iclass == ldst_pos);
8636
8637 switch (instr->opcode->op)
8638 {
8639 case OP_LDRB_POS:new_op = OP_LDURB; break;
8640 case OP_STRB_POS: new_op = OP_STURB; break;
8641 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8642 case OP_LDRH_POS: new_op = OP_LDURH; break;
8643 case OP_STRH_POS: new_op = OP_STURH; break;
8644 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8645 case OP_LDR_POS: new_op = OP_LDUR; break;
8646 case OP_STR_POS: new_op = OP_STUR; break;
8647 case OP_LDRF_POS: new_op = OP_LDURV; break;
8648 case OP_STRF_POS: new_op = OP_STURV; break;
8649 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8650 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8651 default: new_op = OP_NIL; break;
8652 }
8653
8654 if (new_op == OP_NIL)
8655 return false;
8656
8657 new_opcode = aarch64_get_opcode (new_op);
8658 gas_assert (new_opcode != NULL);
8659
8660 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8661 instr->opcode->op, new_opcode->op);
8662
8663 aarch64_replace_opcode (instr, new_opcode);
8664
8665 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8666 qualifier matching may fail because the out-of-date qualifier will
8667 prevent the operand being updated with a new and correct qualifier. */
8668 idx = aarch64_operand_index (instr->opcode->operands,
8669 AARCH64_OPND_ADDR_SIMM9);
8670 gas_assert (idx == 1);
8671 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8672
8673 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8674
8675 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8676 insn_sequence))
8677 return false;
8678
8679 return true;
8680 }
8681
8682 /* Called by fix_insn to fix a MOV immediate alias instruction.
8683
8684 Operand for a generic move immediate instruction, which is an alias
8685 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8686 a 32-bit/64-bit immediate value into general register. An assembler error
8687 shall result if the immediate cannot be created by a single one of these
8688 instructions. If there is a choice, then to ensure reversability an
8689 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8690
8691 static void
8692 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8693 {
8694 const aarch64_opcode *opcode;
8695
8696 /* Need to check if the destination is SP/ZR. The check has to be done
8697 before any aarch64_replace_opcode. */
8698 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8699 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8700
8701 instr->operands[1].imm.value = value;
8702 instr->operands[1].skip = 0;
8703
8704 if (try_mov_wide_p)
8705 {
8706 /* Try the MOVZ alias. */
8707 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8708 aarch64_replace_opcode (instr, opcode);
8709 if (aarch64_opcode_encode (instr->opcode, instr,
8710 &instr->value, NULL, NULL, insn_sequence))
8711 {
8712 put_aarch64_insn (buf, instr->value);
8713 return;
8714 }
8715 /* Try the MOVK alias. */
8716 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8717 aarch64_replace_opcode (instr, opcode);
8718 if (aarch64_opcode_encode (instr->opcode, instr,
8719 &instr->value, NULL, NULL, insn_sequence))
8720 {
8721 put_aarch64_insn (buf, instr->value);
8722 return;
8723 }
8724 }
8725
8726 if (try_mov_bitmask_p)
8727 {
8728 /* Try the ORR alias. */
8729 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8730 aarch64_replace_opcode (instr, opcode);
8731 if (aarch64_opcode_encode (instr->opcode, instr,
8732 &instr->value, NULL, NULL, insn_sequence))
8733 {
8734 put_aarch64_insn (buf, instr->value);
8735 return;
8736 }
8737 }
8738
8739 as_bad_where (fixP->fx_file, fixP->fx_line,
8740 _("immediate cannot be moved by a single instruction"));
8741 }
8742
8743 /* An instruction operand which is immediate related may have symbol used
8744 in the assembly, e.g.
8745
8746 mov w0, u32
8747 .set u32, 0x00ffff00
8748
8749 At the time when the assembly instruction is parsed, a referenced symbol,
8750 like 'u32' in the above example may not have been seen; a fixS is created
8751 in such a case and is handled here after symbols have been resolved.
8752 Instruction is fixed up with VALUE using the information in *FIXP plus
8753 extra information in FLAGS.
8754
8755 This function is called by md_apply_fix to fix up instructions that need
8756 a fix-up described above but does not involve any linker-time relocation. */
8757
8758 static void
8759 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8760 {
8761 int idx;
8762 uint32_t insn;
8763 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8764 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8765 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8766
8767 if (new_inst)
8768 {
8769 /* Now the instruction is about to be fixed-up, so the operand that
8770 was previously marked as 'ignored' needs to be unmarked in order
8771 to get the encoding done properly. */
8772 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8773 new_inst->operands[idx].skip = 0;
8774 }
8775
8776 gas_assert (opnd != AARCH64_OPND_NIL);
8777
8778 switch (opnd)
8779 {
8780 case AARCH64_OPND_EXCEPTION:
8781 case AARCH64_OPND_UNDEFINED:
8782 if (unsigned_overflow (value, 16))
8783 as_bad_where (fixP->fx_file, fixP->fx_line,
8784 _("immediate out of range"));
8785 insn = get_aarch64_insn (buf);
8786 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8787 put_aarch64_insn (buf, insn);
8788 break;
8789
8790 case AARCH64_OPND_AIMM:
8791 /* ADD or SUB with immediate.
8792 NOTE this assumes we come here with a add/sub shifted reg encoding
8793 3 322|2222|2 2 2 21111 111111
8794 1 098|7654|3 2 1 09876 543210 98765 43210
8795 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8796 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8797 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8798 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8799 ->
8800 3 322|2222|2 2 221111111111
8801 1 098|7654|3 2 109876543210 98765 43210
8802 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8803 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8804 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8805 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8806 Fields sf Rn Rd are already set. */
8807 insn = get_aarch64_insn (buf);
8808 if (value < 0)
8809 {
8810 /* Add <-> sub. */
8811 insn = reencode_addsub_switch_add_sub (insn);
8812 value = -value;
8813 }
8814
8815 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8816 && unsigned_overflow (value, 12))
8817 {
8818 /* Try to shift the value by 12 to make it fit. */
8819 if (((value >> 12) << 12) == value
8820 && ! unsigned_overflow (value, 12 + 12))
8821 {
8822 value >>= 12;
8823 insn |= encode_addsub_imm_shift_amount (1);
8824 }
8825 }
8826
8827 if (unsigned_overflow (value, 12))
8828 as_bad_where (fixP->fx_file, fixP->fx_line,
8829 _("immediate out of range"));
8830
8831 insn |= encode_addsub_imm (value);
8832
8833 put_aarch64_insn (buf, insn);
8834 break;
8835
8836 case AARCH64_OPND_SIMD_IMM:
8837 case AARCH64_OPND_SIMD_IMM_SFT:
8838 case AARCH64_OPND_LIMM:
8839 /* Bit mask immediate. */
8840 gas_assert (new_inst != NULL);
8841 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8842 new_inst->operands[idx].imm.value = value;
8843 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8844 &new_inst->value, NULL, NULL, insn_sequence))
8845 put_aarch64_insn (buf, new_inst->value);
8846 else
8847 as_bad_where (fixP->fx_file, fixP->fx_line,
8848 _("invalid immediate"));
8849 break;
8850
8851 case AARCH64_OPND_HALF:
8852 /* 16-bit unsigned immediate. */
8853 if (unsigned_overflow (value, 16))
8854 as_bad_where (fixP->fx_file, fixP->fx_line,
8855 _("immediate out of range"));
8856 insn = get_aarch64_insn (buf);
8857 insn |= encode_movw_imm (value & 0xffff);
8858 put_aarch64_insn (buf, insn);
8859 break;
8860
8861 case AARCH64_OPND_IMM_MOV:
8862 /* Operand for a generic move immediate instruction, which is
8863 an alias instruction that generates a single MOVZ, MOVN or ORR
8864 instruction to loads a 32-bit/64-bit immediate value into general
8865 register. An assembler error shall result if the immediate cannot be
8866 created by a single one of these instructions. If there is a choice,
8867 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8868 and MOVZ or MOVN to ORR. */
8869 gas_assert (new_inst != NULL);
8870 fix_mov_imm_insn (fixP, buf, new_inst, value);
8871 break;
8872
8873 case AARCH64_OPND_ADDR_SIMM7:
8874 case AARCH64_OPND_ADDR_SIMM9:
8875 case AARCH64_OPND_ADDR_SIMM9_2:
8876 case AARCH64_OPND_ADDR_SIMM10:
8877 case AARCH64_OPND_ADDR_UIMM12:
8878 case AARCH64_OPND_ADDR_SIMM11:
8879 case AARCH64_OPND_ADDR_SIMM13:
8880 /* Immediate offset in an address. */
8881 insn = get_aarch64_insn (buf);
8882
8883 gas_assert (new_inst != NULL && new_inst->value == insn);
8884 gas_assert (new_inst->opcode->operands[1] == opnd
8885 || new_inst->opcode->operands[2] == opnd);
8886
8887 /* Get the index of the address operand. */
8888 if (new_inst->opcode->operands[1] == opnd)
8889 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8890 idx = 1;
8891 else
8892 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8893 idx = 2;
8894
8895 /* Update the resolved offset value. */
8896 new_inst->operands[idx].addr.offset.imm = value;
8897
8898 /* Encode/fix-up. */
8899 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8900 &new_inst->value, NULL, NULL, insn_sequence))
8901 {
8902 put_aarch64_insn (buf, new_inst->value);
8903 break;
8904 }
8905 else if (new_inst->opcode->iclass == ldst_pos
8906 && try_to_encode_as_unscaled_ldst (new_inst))
8907 {
8908 put_aarch64_insn (buf, new_inst->value);
8909 break;
8910 }
8911
8912 as_bad_where (fixP->fx_file, fixP->fx_line,
8913 _("immediate offset out of range"));
8914 break;
8915
8916 default:
8917 gas_assert (0);
8918 as_fatal (_("unhandled operand code %d"), opnd);
8919 }
8920 }
8921
8922 /* Apply a fixup (fixP) to segment data, once it has been determined
8923 by our caller that we have all the info we need to fix it up.
8924
8925 Parameter valP is the pointer to the value of the bits. */
8926
8927 void
8928 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8929 {
8930 offsetT value = *valP;
8931 uint32_t insn;
8932 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8933 int scale;
8934 unsigned flags = fixP->fx_addnumber;
8935
8936 DEBUG_TRACE ("\n\n");
8937 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8938 DEBUG_TRACE ("Enter md_apply_fix");
8939
8940 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8941
8942 /* Note whether this will delete the relocation. */
8943
8944 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8945 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8946 fixP->fx_done = 1;
8947
8948 /* Process the relocations. */
8949 switch (fixP->fx_r_type)
8950 {
8951 case BFD_RELOC_NONE:
8952 /* This will need to go in the object file. */
8953 fixP->fx_done = 0;
8954 break;
8955
8956 case BFD_RELOC_8:
8957 case BFD_RELOC_8_PCREL:
8958 if (fixP->fx_done || !seg->use_rela_p)
8959 md_number_to_chars (buf, value, 1);
8960 break;
8961
8962 case BFD_RELOC_16:
8963 case BFD_RELOC_16_PCREL:
8964 if (fixP->fx_done || !seg->use_rela_p)
8965 md_number_to_chars (buf, value, 2);
8966 break;
8967
8968 case BFD_RELOC_32:
8969 case BFD_RELOC_32_PCREL:
8970 if (fixP->fx_done || !seg->use_rela_p)
8971 md_number_to_chars (buf, value, 4);
8972 break;
8973
8974 case BFD_RELOC_64:
8975 case BFD_RELOC_64_PCREL:
8976 if (fixP->fx_done || !seg->use_rela_p)
8977 md_number_to_chars (buf, value, 8);
8978 break;
8979
8980 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8981 /* We claim that these fixups have been processed here, even if
8982 in fact we generate an error because we do not have a reloc
8983 for them, so tc_gen_reloc() will reject them. */
8984 fixP->fx_done = 1;
8985 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8986 {
8987 as_bad_where (fixP->fx_file, fixP->fx_line,
8988 _("undefined symbol %s used as an immediate value"),
8989 S_GET_NAME (fixP->fx_addsy));
8990 goto apply_fix_return;
8991 }
8992 fix_insn (fixP, flags, value);
8993 break;
8994
8995 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8996 if (fixP->fx_done || !seg->use_rela_p)
8997 {
8998 if (value & 3)
8999 as_bad_where (fixP->fx_file, fixP->fx_line,
9000 _("pc-relative load offset not word aligned"));
9001 if (signed_overflow (value, 21))
9002 as_bad_where (fixP->fx_file, fixP->fx_line,
9003 _("pc-relative load offset out of range"));
9004 insn = get_aarch64_insn (buf);
9005 insn |= encode_ld_lit_ofs_19 (value >> 2);
9006 put_aarch64_insn (buf, insn);
9007 }
9008 break;
9009
9010 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9011 if (fixP->fx_done || !seg->use_rela_p)
9012 {
9013 if (signed_overflow (value, 21))
9014 as_bad_where (fixP->fx_file, fixP->fx_line,
9015 _("pc-relative address offset out of range"));
9016 insn = get_aarch64_insn (buf);
9017 insn |= encode_adr_imm (value);
9018 put_aarch64_insn (buf, insn);
9019 }
9020 break;
9021
9022 case BFD_RELOC_AARCH64_BRANCH19:
9023 if (fixP->fx_done || !seg->use_rela_p)
9024 {
9025 if (value & 3)
9026 as_bad_where (fixP->fx_file, fixP->fx_line,
9027 _("conditional branch target not word aligned"));
9028 if (signed_overflow (value, 21))
9029 as_bad_where (fixP->fx_file, fixP->fx_line,
9030 _("conditional branch out of range"));
9031 insn = get_aarch64_insn (buf);
9032 insn |= encode_cond_branch_ofs_19 (value >> 2);
9033 put_aarch64_insn (buf, insn);
9034 }
9035 break;
9036
9037 case BFD_RELOC_AARCH64_TSTBR14:
9038 if (fixP->fx_done || !seg->use_rela_p)
9039 {
9040 if (value & 3)
9041 as_bad_where (fixP->fx_file, fixP->fx_line,
9042 _("conditional branch target not word aligned"));
9043 if (signed_overflow (value, 16))
9044 as_bad_where (fixP->fx_file, fixP->fx_line,
9045 _("conditional branch out of range"));
9046 insn = get_aarch64_insn (buf);
9047 insn |= encode_tst_branch_ofs_14 (value >> 2);
9048 put_aarch64_insn (buf, insn);
9049 }
9050 break;
9051
9052 case BFD_RELOC_AARCH64_CALL26:
9053 case BFD_RELOC_AARCH64_JUMP26:
9054 if (fixP->fx_done || !seg->use_rela_p)
9055 {
9056 if (value & 3)
9057 as_bad_where (fixP->fx_file, fixP->fx_line,
9058 _("branch target not word aligned"));
9059 if (signed_overflow (value, 28))
9060 as_bad_where (fixP->fx_file, fixP->fx_line,
9061 _("branch out of range"));
9062 insn = get_aarch64_insn (buf);
9063 insn |= encode_branch_ofs_26 (value >> 2);
9064 put_aarch64_insn (buf, insn);
9065 }
9066 break;
9067
9068 case BFD_RELOC_AARCH64_MOVW_G0:
9069 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9070 case BFD_RELOC_AARCH64_MOVW_G0_S:
9071 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9072 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9073 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9074 scale = 0;
9075 goto movw_common;
9076 case BFD_RELOC_AARCH64_MOVW_G1:
9077 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9078 case BFD_RELOC_AARCH64_MOVW_G1_S:
9079 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9080 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9081 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9082 scale = 16;
9083 goto movw_common;
9084 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9085 scale = 0;
9086 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9087 /* Should always be exported to object file, see
9088 aarch64_force_relocation(). */
9089 gas_assert (!fixP->fx_done);
9090 gas_assert (seg->use_rela_p);
9091 goto movw_common;
9092 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9093 scale = 16;
9094 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9095 /* Should always be exported to object file, see
9096 aarch64_force_relocation(). */
9097 gas_assert (!fixP->fx_done);
9098 gas_assert (seg->use_rela_p);
9099 goto movw_common;
9100 case BFD_RELOC_AARCH64_MOVW_G2:
9101 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9102 case BFD_RELOC_AARCH64_MOVW_G2_S:
9103 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9104 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9105 scale = 32;
9106 goto movw_common;
9107 case BFD_RELOC_AARCH64_MOVW_G3:
9108 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9109 scale = 48;
9110 movw_common:
9111 if (fixP->fx_done || !seg->use_rela_p)
9112 {
9113 insn = get_aarch64_insn (buf);
9114
9115 if (!fixP->fx_done)
9116 {
9117 /* REL signed addend must fit in 16 bits */
9118 if (signed_overflow (value, 16))
9119 as_bad_where (fixP->fx_file, fixP->fx_line,
9120 _("offset out of range"));
9121 }
9122 else
9123 {
9124 /* Check for overflow and scale. */
9125 switch (fixP->fx_r_type)
9126 {
9127 case BFD_RELOC_AARCH64_MOVW_G0:
9128 case BFD_RELOC_AARCH64_MOVW_G1:
9129 case BFD_RELOC_AARCH64_MOVW_G2:
9130 case BFD_RELOC_AARCH64_MOVW_G3:
9131 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9132 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9133 if (unsigned_overflow (value, scale + 16))
9134 as_bad_where (fixP->fx_file, fixP->fx_line,
9135 _("unsigned value out of range"));
9136 break;
9137 case BFD_RELOC_AARCH64_MOVW_G0_S:
9138 case BFD_RELOC_AARCH64_MOVW_G1_S:
9139 case BFD_RELOC_AARCH64_MOVW_G2_S:
9140 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9141 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9142 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9143 /* NOTE: We can only come here with movz or movn. */
9144 if (signed_overflow (value, scale + 16))
9145 as_bad_where (fixP->fx_file, fixP->fx_line,
9146 _("signed value out of range"));
9147 if (value < 0)
9148 {
9149 /* Force use of MOVN. */
9150 value = ~value;
9151 insn = reencode_movzn_to_movn (insn);
9152 }
9153 else
9154 {
9155 /* Force use of MOVZ. */
9156 insn = reencode_movzn_to_movz (insn);
9157 }
9158 break;
9159 default:
9160 /* Unchecked relocations. */
9161 break;
9162 }
9163 value >>= scale;
9164 }
9165
9166 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9167 insn |= encode_movw_imm (value & 0xffff);
9168
9169 put_aarch64_insn (buf, insn);
9170 }
9171 break;
9172
9173 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9174 fixP->fx_r_type = (ilp32_p
9175 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9176 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9177 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9178 /* Should always be exported to object file, see
9179 aarch64_force_relocation(). */
9180 gas_assert (!fixP->fx_done);
9181 gas_assert (seg->use_rela_p);
9182 break;
9183
9184 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9185 fixP->fx_r_type = (ilp32_p
9186 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9187 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9188 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9189 /* Should always be exported to object file, see
9190 aarch64_force_relocation(). */
9191 gas_assert (!fixP->fx_done);
9192 gas_assert (seg->use_rela_p);
9193 break;
9194
9195 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9196 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9197 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9198 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9199 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9200 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9201 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9202 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9203 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9204 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9205 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9206 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9207 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9208 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9209 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9210 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9211 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9212 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9213 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9214 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9215 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9216 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9217 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9218 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9219 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9220 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9221 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9222 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9223 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9224 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9225 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9226 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9227 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9228 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9229 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9230 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9231 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9232 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9233 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9234 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9235 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9236 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9237 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9238 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9239 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9240 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9241 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9242 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9243 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9244 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9245 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9246 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9247 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9248 /* Should always be exported to object file, see
9249 aarch64_force_relocation(). */
9250 gas_assert (!fixP->fx_done);
9251 gas_assert (seg->use_rela_p);
9252 break;
9253
9254 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9255 /* Should always be exported to object file, see
9256 aarch64_force_relocation(). */
9257 fixP->fx_r_type = (ilp32_p
9258 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9259 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9260 gas_assert (!fixP->fx_done);
9261 gas_assert (seg->use_rela_p);
9262 break;
9263
9264 case BFD_RELOC_AARCH64_ADD_LO12:
9265 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9266 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9267 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9268 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9269 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9270 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9271 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9272 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9273 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9274 case BFD_RELOC_AARCH64_LDST128_LO12:
9275 case BFD_RELOC_AARCH64_LDST16_LO12:
9276 case BFD_RELOC_AARCH64_LDST32_LO12:
9277 case BFD_RELOC_AARCH64_LDST64_LO12:
9278 case BFD_RELOC_AARCH64_LDST8_LO12:
9279 /* Should always be exported to object file, see
9280 aarch64_force_relocation(). */
9281 gas_assert (!fixP->fx_done);
9282 gas_assert (seg->use_rela_p);
9283 break;
9284
9285 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9286 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9287 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9288 break;
9289
9290 case BFD_RELOC_UNUSED:
9291 /* An error will already have been reported. */
9292 break;
9293
9294 case BFD_RELOC_RVA:
9295 case BFD_RELOC_32_SECREL:
9296 break;
9297
9298 default:
9299 as_bad_where (fixP->fx_file, fixP->fx_line,
9300 _("unexpected %s fixup"),
9301 bfd_get_reloc_code_name (fixP->fx_r_type));
9302 break;
9303 }
9304
9305 apply_fix_return:
9306 /* Free the allocated the struct aarch64_inst.
9307 N.B. currently there are very limited number of fix-up types actually use
9308 this field, so the impact on the performance should be minimal . */
9309 free (fixP->tc_fix_data.inst);
9310
9311 return;
9312 }
9313
9314 /* Translate internal representation of relocation info to BFD target
9315 format. */
9316
9317 arelent *
9318 tc_gen_reloc (asection * section, fixS * fixp)
9319 {
9320 arelent *reloc;
9321 bfd_reloc_code_real_type code;
9322
9323 reloc = XNEW (arelent);
9324
9325 reloc->sym_ptr_ptr = XNEW (asymbol *);
9326 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9327 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9328
9329 if (fixp->fx_pcrel)
9330 {
9331 if (section->use_rela_p)
9332 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9333 else
9334 fixp->fx_offset = reloc->address;
9335 }
9336 reloc->addend = fixp->fx_offset;
9337
9338 code = fixp->fx_r_type;
9339 switch (code)
9340 {
9341 case BFD_RELOC_16:
9342 if (fixp->fx_pcrel)
9343 code = BFD_RELOC_16_PCREL;
9344 break;
9345
9346 case BFD_RELOC_32:
9347 if (fixp->fx_pcrel)
9348 code = BFD_RELOC_32_PCREL;
9349 break;
9350
9351 case BFD_RELOC_64:
9352 if (fixp->fx_pcrel)
9353 code = BFD_RELOC_64_PCREL;
9354 break;
9355
9356 default:
9357 break;
9358 }
9359
9360 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9361 if (reloc->howto == NULL)
9362 {
9363 as_bad_where (fixp->fx_file, fixp->fx_line,
9364 _
9365 ("cannot represent %s relocation in this object file format"),
9366 bfd_get_reloc_code_name (code));
9367 return NULL;
9368 }
9369
9370 return reloc;
9371 }
9372
9373 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9374
9375 void
9376 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9377 {
9378 bfd_reloc_code_real_type type;
9379 int pcrel = 0;
9380
9381 #ifdef TE_PE
9382 if (exp->X_op == O_secrel)
9383 {
9384 exp->X_op = O_symbol;
9385 type = BFD_RELOC_32_SECREL;
9386 }
9387 else
9388 {
9389 #endif
9390 /* Pick a reloc.
9391 FIXME: @@ Should look at CPU word size. */
9392 switch (size)
9393 {
9394 case 1:
9395 type = BFD_RELOC_8;
9396 break;
9397 case 2:
9398 type = BFD_RELOC_16;
9399 break;
9400 case 4:
9401 type = BFD_RELOC_32;
9402 break;
9403 case 8:
9404 type = BFD_RELOC_64;
9405 break;
9406 default:
9407 as_bad (_("cannot do %u-byte relocation"), size);
9408 type = BFD_RELOC_UNUSED;
9409 break;
9410 }
9411 #ifdef TE_PE
9412 }
9413 #endif
9414
9415 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9416 }
9417
9418 /* Implement md_after_parse_args. This is the earliest time we need to decide
9419 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9420
9421 void
9422 aarch64_after_parse_args (void)
9423 {
9424 if (aarch64_abi != AARCH64_ABI_NONE)
9425 return;
9426
9427 #ifdef OBJ_ELF
9428 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9429 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9430 aarch64_abi = AARCH64_ABI_ILP32;
9431 else
9432 aarch64_abi = AARCH64_ABI_LP64;
9433 #else
9434 aarch64_abi = AARCH64_ABI_LLP64;
9435 #endif
9436 }
9437
9438 #ifdef OBJ_ELF
9439 const char *
9440 elf64_aarch64_target_format (void)
9441 {
9442 #ifdef TE_CLOUDABI
9443 /* FIXME: What to do for ilp32_p ? */
9444 if (target_big_endian)
9445 return "elf64-bigaarch64-cloudabi";
9446 else
9447 return "elf64-littleaarch64-cloudabi";
9448 #else
9449 if (target_big_endian)
9450 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9451 else
9452 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9453 #endif
9454 }
9455
9456 void
9457 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9458 {
9459 elf_frob_symbol (symp, puntp);
9460 }
9461 #elif defined OBJ_COFF
9462 const char *
9463 coff_aarch64_target_format (void)
9464 {
9465 return "pe-aarch64-little";
9466 }
9467 #endif
9468
9469 /* MD interface: Finalization. */
9470
9471 /* A good place to do this, although this was probably not intended
9472 for this kind of use. We need to dump the literal pool before
9473 references are made to a null symbol pointer. */
9474
9475 void
9476 aarch64_cleanup (void)
9477 {
9478 literal_pool *pool;
9479
9480 for (pool = list_of_pools; pool; pool = pool->next)
9481 {
9482 /* Put it at the end of the relevant section. */
9483 subseg_set (pool->section, pool->sub_section);
9484 s_ltorg (0);
9485 }
9486 }
9487
9488 #ifdef OBJ_ELF
9489 /* Remove any excess mapping symbols generated for alignment frags in
9490 SEC. We may have created a mapping symbol before a zero byte
9491 alignment; remove it if there's a mapping symbol after the
9492 alignment. */
9493 static void
9494 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9495 void *dummy ATTRIBUTE_UNUSED)
9496 {
9497 segment_info_type *seginfo = seg_info (sec);
9498 fragS *fragp;
9499
9500 if (seginfo == NULL || seginfo->frchainP == NULL)
9501 return;
9502
9503 for (fragp = seginfo->frchainP->frch_root;
9504 fragp != NULL; fragp = fragp->fr_next)
9505 {
9506 symbolS *sym = fragp->tc_frag_data.last_map;
9507 fragS *next = fragp->fr_next;
9508
9509 /* Variable-sized frags have been converted to fixed size by
9510 this point. But if this was variable-sized to start with,
9511 there will be a fixed-size frag after it. So don't handle
9512 next == NULL. */
9513 if (sym == NULL || next == NULL)
9514 continue;
9515
9516 if (S_GET_VALUE (sym) < next->fr_address)
9517 /* Not at the end of this frag. */
9518 continue;
9519 know (S_GET_VALUE (sym) == next->fr_address);
9520
9521 do
9522 {
9523 if (next->tc_frag_data.first_map != NULL)
9524 {
9525 /* Next frag starts with a mapping symbol. Discard this
9526 one. */
9527 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9528 break;
9529 }
9530
9531 if (next->fr_next == NULL)
9532 {
9533 /* This mapping symbol is at the end of the section. Discard
9534 it. */
9535 know (next->fr_fix == 0 && next->fr_var == 0);
9536 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9537 break;
9538 }
9539
9540 /* As long as we have empty frags without any mapping symbols,
9541 keep looking. */
9542 /* If the next frag is non-empty and does not start with a
9543 mapping symbol, then this mapping symbol is required. */
9544 if (next->fr_address != next->fr_next->fr_address)
9545 break;
9546
9547 next = next->fr_next;
9548 }
9549 while (next != NULL);
9550 }
9551 }
9552 #endif
9553
9554 /* Adjust the symbol table. */
9555
9556 void
9557 aarch64_adjust_symtab (void)
9558 {
9559 #ifdef OBJ_ELF
9560 /* Remove any overlapping mapping symbols generated by alignment frags. */
9561 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9562 /* Now do generic ELF adjustments. */
9563 elf_adjust_symtab ();
9564 #endif
9565 }
9566
9567 static void
9568 checked_hash_insert (htab_t table, const char *key, void *value)
9569 {
9570 str_hash_insert (table, key, value, 0);
9571 }
9572
9573 static void
9574 sysreg_hash_insert (htab_t table, const char *key, void *value)
9575 {
9576 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9577 checked_hash_insert (table, key, value);
9578 }
9579
9580 static void
9581 fill_instruction_hash_table (void)
9582 {
9583 const aarch64_opcode *opcode = aarch64_opcode_table;
9584
9585 while (opcode->name != NULL)
9586 {
9587 templates *templ, *new_templ;
9588 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9589
9590 new_templ = XNEW (templates);
9591 new_templ->opcode = opcode;
9592 new_templ->next = NULL;
9593
9594 if (!templ)
9595 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9596 else
9597 {
9598 new_templ->next = templ->next;
9599 templ->next = new_templ;
9600 }
9601 ++opcode;
9602 }
9603 }
9604
9605 static inline void
9606 convert_to_upper (char *dst, const char *src, size_t num)
9607 {
9608 unsigned int i;
9609 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9610 *dst = TOUPPER (*src);
9611 *dst = '\0';
9612 }
9613
9614 /* Assume STR point to a lower-case string, allocate, convert and return
9615 the corresponding upper-case string. */
9616 static inline const char*
9617 get_upper_str (const char *str)
9618 {
9619 char *ret;
9620 size_t len = strlen (str);
9621 ret = XNEWVEC (char, len + 1);
9622 convert_to_upper (ret, str, len);
9623 return ret;
9624 }
9625
9626 /* MD interface: Initialization. */
9627
9628 void
9629 md_begin (void)
9630 {
9631 unsigned mach;
9632 unsigned int i;
9633
9634 aarch64_ops_hsh = str_htab_create ();
9635 aarch64_cond_hsh = str_htab_create ();
9636 aarch64_shift_hsh = str_htab_create ();
9637 aarch64_sys_regs_hsh = str_htab_create ();
9638 aarch64_pstatefield_hsh = str_htab_create ();
9639 aarch64_sys_regs_ic_hsh = str_htab_create ();
9640 aarch64_sys_regs_dc_hsh = str_htab_create ();
9641 aarch64_sys_regs_at_hsh = str_htab_create ();
9642 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9643 aarch64_sys_regs_sr_hsh = str_htab_create ();
9644 aarch64_reg_hsh = str_htab_create ();
9645 aarch64_barrier_opt_hsh = str_htab_create ();
9646 aarch64_nzcv_hsh = str_htab_create ();
9647 aarch64_pldop_hsh = str_htab_create ();
9648 aarch64_hint_opt_hsh = str_htab_create ();
9649
9650 fill_instruction_hash_table ();
9651
9652 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9653 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9654 (void *) (aarch64_sys_regs + i));
9655
9656 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9657 sysreg_hash_insert (aarch64_pstatefield_hsh,
9658 aarch64_pstatefields[i].name,
9659 (void *) (aarch64_pstatefields + i));
9660
9661 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9662 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9663 aarch64_sys_regs_ic[i].name,
9664 (void *) (aarch64_sys_regs_ic + i));
9665
9666 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9667 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9668 aarch64_sys_regs_dc[i].name,
9669 (void *) (aarch64_sys_regs_dc + i));
9670
9671 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9672 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9673 aarch64_sys_regs_at[i].name,
9674 (void *) (aarch64_sys_regs_at + i));
9675
9676 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9677 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9678 aarch64_sys_regs_tlbi[i].name,
9679 (void *) (aarch64_sys_regs_tlbi + i));
9680
9681 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9682 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9683 aarch64_sys_regs_sr[i].name,
9684 (void *) (aarch64_sys_regs_sr + i));
9685
9686 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9687 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9688 (void *) (reg_names + i));
9689
9690 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9691 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9692 (void *) (nzcv_names + i));
9693
9694 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9695 {
9696 const char *name = aarch64_operand_modifiers[i].name;
9697 checked_hash_insert (aarch64_shift_hsh, name,
9698 (void *) (aarch64_operand_modifiers + i));
9699 /* Also hash the name in the upper case. */
9700 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9701 (void *) (aarch64_operand_modifiers + i));
9702 }
9703
9704 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9705 {
9706 unsigned int j;
9707 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9708 the same condition code. */
9709 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9710 {
9711 const char *name = aarch64_conds[i].names[j];
9712 if (name == NULL)
9713 break;
9714 checked_hash_insert (aarch64_cond_hsh, name,
9715 (void *) (aarch64_conds + i));
9716 /* Also hash the name in the upper case. */
9717 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9718 (void *) (aarch64_conds + i));
9719 }
9720 }
9721
9722 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9723 {
9724 const char *name = aarch64_barrier_options[i].name;
9725 /* Skip xx00 - the unallocated values of option. */
9726 if ((i & 0x3) == 0)
9727 continue;
9728 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9729 (void *) (aarch64_barrier_options + i));
9730 /* Also hash the name in the upper case. */
9731 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9732 (void *) (aarch64_barrier_options + i));
9733 }
9734
9735 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9736 {
9737 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9738 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9739 (void *) (aarch64_barrier_dsb_nxs_options + i));
9740 /* Also hash the name in the upper case. */
9741 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9742 (void *) (aarch64_barrier_dsb_nxs_options + i));
9743 }
9744
9745 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9746 {
9747 const char* name = aarch64_prfops[i].name;
9748 /* Skip the unallocated hint encodings. */
9749 if (name == NULL)
9750 continue;
9751 checked_hash_insert (aarch64_pldop_hsh, name,
9752 (void *) (aarch64_prfops + i));
9753 /* Also hash the name in the upper case. */
9754 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9755 (void *) (aarch64_prfops + i));
9756 }
9757
9758 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9759 {
9760 const char* name = aarch64_hint_options[i].name;
9761 const char* upper_name = get_upper_str(name);
9762
9763 checked_hash_insert (aarch64_hint_opt_hsh, name,
9764 (void *) (aarch64_hint_options + i));
9765
9766 /* Also hash the name in the upper case if not the same. */
9767 if (strcmp (name, upper_name) != 0)
9768 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9769 (void *) (aarch64_hint_options + i));
9770 }
9771
9772 /* Set the cpu variant based on the command-line options. */
9773 if (!mcpu_cpu_opt)
9774 mcpu_cpu_opt = march_cpu_opt;
9775
9776 if (!mcpu_cpu_opt)
9777 mcpu_cpu_opt = &cpu_default;
9778
9779 cpu_variant = *mcpu_cpu_opt;
9780
9781 /* Record the CPU type. */
9782 if(ilp32_p)
9783 mach = bfd_mach_aarch64_ilp32;
9784 else if (llp64_p)
9785 mach = bfd_mach_aarch64_llp64;
9786 else
9787 mach = bfd_mach_aarch64;
9788
9789 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9790 #ifdef OBJ_ELF
9791 /* FIXME - is there a better way to do it ? */
9792 aarch64_sframe_cfa_sp_reg = 31;
9793 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
9794 aarch64_sframe_cfa_ra_reg = 30;
9795 #endif
9796 }
9797
9798 /* Command line processing. */
9799
9800 const char *md_shortopts = "m:";
9801
9802 #ifdef AARCH64_BI_ENDIAN
9803 #define OPTION_EB (OPTION_MD_BASE + 0)
9804 #define OPTION_EL (OPTION_MD_BASE + 1)
9805 #else
9806 #if TARGET_BYTES_BIG_ENDIAN
9807 #define OPTION_EB (OPTION_MD_BASE + 0)
9808 #else
9809 #define OPTION_EL (OPTION_MD_BASE + 1)
9810 #endif
9811 #endif
9812
9813 struct option md_longopts[] = {
9814 #ifdef OPTION_EB
9815 {"EB", no_argument, NULL, OPTION_EB},
9816 #endif
9817 #ifdef OPTION_EL
9818 {"EL", no_argument, NULL, OPTION_EL},
9819 #endif
9820 {NULL, no_argument, NULL, 0}
9821 };
9822
9823 size_t md_longopts_size = sizeof (md_longopts);
9824
9825 struct aarch64_option_table
9826 {
9827 const char *option; /* Option name to match. */
9828 const char *help; /* Help information. */
9829 int *var; /* Variable to change. */
9830 int value; /* What to change it to. */
9831 char *deprecated; /* If non-null, print this message. */
9832 };
9833
9834 static struct aarch64_option_table aarch64_opts[] = {
9835 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9836 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9837 NULL},
9838 #ifdef DEBUG_AARCH64
9839 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9840 #endif /* DEBUG_AARCH64 */
9841 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9842 NULL},
9843 {"mno-verbose-error", N_("do not output verbose error messages"),
9844 &verbose_error_p, 0, NULL},
9845 {NULL, NULL, NULL, 0, NULL}
9846 };
9847
9848 struct aarch64_cpu_option_table
9849 {
9850 const char *name;
9851 const aarch64_feature_set value;
9852 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9853 case. */
9854 const char *canonical_name;
9855 };
9856
9857 /* This list should, at a minimum, contain all the cpu names
9858 recognized by GCC. */
9859 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9860 {"all", AARCH64_ANY, NULL},
9861 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9862 AARCH64_FEATURE_CRC), "Cortex-A34"},
9863 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9864 AARCH64_FEATURE_CRC), "Cortex-A35"},
9865 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9866 AARCH64_FEATURE_CRC), "Cortex-A53"},
9867 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9868 AARCH64_FEATURE_CRC), "Cortex-A57"},
9869 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9870 AARCH64_FEATURE_CRC), "Cortex-A72"},
9871 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9872 AARCH64_FEATURE_CRC), "Cortex-A73"},
9873 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9874 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9875 "Cortex-A55"},
9876 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9877 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9878 "Cortex-A75"},
9879 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9880 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9881 "Cortex-A76"},
9882 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9883 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9884 | AARCH64_FEATURE_DOTPROD
9885 | AARCH64_FEATURE_SSBS),
9886 "Cortex-A76AE"},
9887 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9888 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9889 | AARCH64_FEATURE_DOTPROD
9890 | AARCH64_FEATURE_SSBS),
9891 "Cortex-A77"},
9892 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9893 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9894 | AARCH64_FEATURE_DOTPROD
9895 | AARCH64_FEATURE_SSBS),
9896 "Cortex-A65"},
9897 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9898 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9899 | AARCH64_FEATURE_DOTPROD
9900 | AARCH64_FEATURE_SSBS),
9901 "Cortex-A65AE"},
9902 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9903 AARCH64_FEATURE_F16
9904 | AARCH64_FEATURE_RCPC
9905 | AARCH64_FEATURE_DOTPROD
9906 | AARCH64_FEATURE_SSBS
9907 | AARCH64_FEATURE_PROFILE),
9908 "Cortex-A78"},
9909 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9910 AARCH64_FEATURE_F16
9911 | AARCH64_FEATURE_RCPC
9912 | AARCH64_FEATURE_DOTPROD
9913 | AARCH64_FEATURE_SSBS
9914 | AARCH64_FEATURE_PROFILE),
9915 "Cortex-A78AE"},
9916 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9917 AARCH64_FEATURE_DOTPROD
9918 | AARCH64_FEATURE_F16
9919 | AARCH64_FEATURE_FLAGM
9920 | AARCH64_FEATURE_PAC
9921 | AARCH64_FEATURE_PROFILE
9922 | AARCH64_FEATURE_RCPC
9923 | AARCH64_FEATURE_SSBS),
9924 "Cortex-A78C"},
9925 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9926 AARCH64_FEATURE_BFLOAT16
9927 | AARCH64_FEATURE_I8MM
9928 | AARCH64_FEATURE_MEMTAG
9929 | AARCH64_FEATURE_SVE2_BITPERM),
9930 "Cortex-A510"},
9931 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9932 AARCH64_FEATURE_BFLOAT16
9933 | AARCH64_FEATURE_I8MM
9934 | AARCH64_FEATURE_MEMTAG
9935 | AARCH64_FEATURE_SVE2_BITPERM),
9936 "Cortex-A710"},
9937 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9938 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9939 | AARCH64_FEATURE_DOTPROD
9940 | AARCH64_FEATURE_PROFILE),
9941 "Ares"},
9942 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9943 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9944 "Samsung Exynos M1"},
9945 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9946 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9947 | AARCH64_FEATURE_RDMA),
9948 "Qualcomm Falkor"},
9949 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9950 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9951 | AARCH64_FEATURE_DOTPROD
9952 | AARCH64_FEATURE_SSBS),
9953 "Neoverse E1"},
9954 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9955 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9956 | AARCH64_FEATURE_DOTPROD
9957 | AARCH64_FEATURE_PROFILE),
9958 "Neoverse N1"},
9959 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9960 AARCH64_FEATURE_BFLOAT16
9961 | AARCH64_FEATURE_I8MM
9962 | AARCH64_FEATURE_F16
9963 | AARCH64_FEATURE_SVE
9964 | AARCH64_FEATURE_SVE2
9965 | AARCH64_FEATURE_SVE2_BITPERM
9966 | AARCH64_FEATURE_MEMTAG
9967 | AARCH64_FEATURE_RNG),
9968 "Neoverse N2"},
9969 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9970 AARCH64_FEATURE_PROFILE
9971 | AARCH64_FEATURE_CVADP
9972 | AARCH64_FEATURE_SVE
9973 | AARCH64_FEATURE_SSBS
9974 | AARCH64_FEATURE_RNG
9975 | AARCH64_FEATURE_F16
9976 | AARCH64_FEATURE_BFLOAT16
9977 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9978 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9979 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9980 | AARCH64_FEATURE_RDMA),
9981 "Qualcomm QDF24XX"},
9982 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9983 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9984 "Qualcomm Saphira"},
9985 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9986 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9987 "Cavium ThunderX"},
9988 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9989 AARCH64_FEATURE_CRYPTO),
9990 "Broadcom Vulcan"},
9991 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9992 in earlier releases and is superseded by 'xgene1' in all
9993 tools. */
9994 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9995 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9996 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9997 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9998 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9999 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10000 AARCH64_FEATURE_F16
10001 | AARCH64_FEATURE_RCPC
10002 | AARCH64_FEATURE_DOTPROD
10003 | AARCH64_FEATURE_SSBS
10004 | AARCH64_FEATURE_PROFILE),
10005 "Cortex-X1"},
10006 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
10007 AARCH64_FEATURE_BFLOAT16
10008 | AARCH64_FEATURE_I8MM
10009 | AARCH64_FEATURE_MEMTAG
10010 | AARCH64_FEATURE_SVE2_BITPERM),
10011 "Cortex-X2"},
10012 {"generic", AARCH64_ARCH_V8, NULL},
10013
10014 {NULL, AARCH64_ARCH_NONE, NULL}
10015 };
10016
10017 struct aarch64_arch_option_table
10018 {
10019 const char *name;
10020 const aarch64_feature_set value;
10021 };
10022
10023 /* This list should, at a minimum, contain all the architecture names
10024 recognized by GCC. */
10025 static const struct aarch64_arch_option_table aarch64_archs[] = {
10026 {"all", AARCH64_ANY},
10027 {"armv8-a", AARCH64_ARCH_V8},
10028 {"armv8.1-a", AARCH64_ARCH_V8_1},
10029 {"armv8.2-a", AARCH64_ARCH_V8_2},
10030 {"armv8.3-a", AARCH64_ARCH_V8_3},
10031 {"armv8.4-a", AARCH64_ARCH_V8_4},
10032 {"armv8.5-a", AARCH64_ARCH_V8_5},
10033 {"armv8.6-a", AARCH64_ARCH_V8_6},
10034 {"armv8.7-a", AARCH64_ARCH_V8_7},
10035 {"armv8.8-a", AARCH64_ARCH_V8_8},
10036 {"armv8-r", AARCH64_ARCH_V8_R},
10037 {"armv9-a", AARCH64_ARCH_V9},
10038 {"armv9.1-a", AARCH64_ARCH_V9_1},
10039 {"armv9.2-a", AARCH64_ARCH_V9_2},
10040 {"armv9.3-a", AARCH64_ARCH_V9_3},
10041 {NULL, AARCH64_ARCH_NONE}
10042 };
10043
10044 /* ISA extensions. */
10045 struct aarch64_option_cpu_value_table
10046 {
10047 const char *name;
10048 const aarch64_feature_set value;
10049 const aarch64_feature_set require; /* Feature dependencies. */
10050 };
10051
10052 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10053 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
10054 AARCH64_ARCH_NONE},
10055 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
10056 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10057 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
10058 AARCH64_ARCH_NONE},
10059 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
10060 AARCH64_ARCH_NONE},
10061 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
10062 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10063 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
10064 AARCH64_ARCH_NONE},
10065 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
10066 AARCH64_ARCH_NONE},
10067 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
10068 AARCH64_ARCH_NONE},
10069 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
10070 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10071 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
10072 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10073 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
10074 AARCH64_FEATURE (AARCH64_FEATURE_FP
10075 | AARCH64_FEATURE_F16, 0)},
10076 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
10077 AARCH64_ARCH_NONE},
10078 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
10079 AARCH64_FEATURE (AARCH64_FEATURE_F16
10080 | AARCH64_FEATURE_SIMD
10081 | AARCH64_FEATURE_COMPNUM, 0)},
10082 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
10083 AARCH64_ARCH_NONE},
10084 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
10085 AARCH64_FEATURE (AARCH64_FEATURE_F16
10086 | AARCH64_FEATURE_SIMD, 0)},
10087 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
10088 AARCH64_ARCH_NONE},
10089 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
10090 AARCH64_ARCH_NONE},
10091 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
10092 AARCH64_ARCH_NONE},
10093 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
10094 AARCH64_ARCH_NONE},
10095 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
10096 AARCH64_ARCH_NONE},
10097 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
10098 AARCH64_ARCH_NONE},
10099 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
10100 AARCH64_ARCH_NONE},
10101 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
10102 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
10103 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
10104 AARCH64_ARCH_NONE},
10105 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
10106 AARCH64_ARCH_NONE},
10107 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10108 AARCH64_ARCH_NONE},
10109 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10110 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10111 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10112 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10113 | AARCH64_FEATURE_SM4, 0)},
10114 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10115 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10116 | AARCH64_FEATURE_AES, 0)},
10117 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10118 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10119 | AARCH64_FEATURE_SHA3, 0)},
10120 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10121 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10122 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10123 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10124 | AARCH64_FEATURE_BFLOAT16, 0)},
10125 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
10126 AARCH64_FEATURE (AARCH64_FEATURE_SME
10127 | AARCH64_FEATURE_SVE2
10128 | AARCH64_FEATURE_BFLOAT16, 0)},
10129 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
10130 AARCH64_FEATURE (AARCH64_FEATURE_SME
10131 | AARCH64_FEATURE_SVE2
10132 | AARCH64_FEATURE_BFLOAT16, 0)},
10133 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10134 AARCH64_ARCH_NONE},
10135 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10136 AARCH64_ARCH_NONE},
10137 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10138 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10139 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10140 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10141 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10142 AARCH64_ARCH_NONE},
10143 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10144 AARCH64_ARCH_NONE},
10145 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10146 AARCH64_ARCH_NONE},
10147 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10148 AARCH64_ARCH_NONE},
10149 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10150 AARCH64_ARCH_NONE},
10151 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
10152 AARCH64_ARCH_NONE},
10153 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10154 };
10155
10156 struct aarch64_long_option_table
10157 {
10158 const char *option; /* Substring to match. */
10159 const char *help; /* Help information. */
10160 int (*func) (const char *subopt); /* Function to decode sub-option. */
10161 char *deprecated; /* If non-null, print this message. */
10162 };
10163
10164 /* Transitive closure of features depending on set. */
10165 static aarch64_feature_set
10166 aarch64_feature_disable_set (aarch64_feature_set set)
10167 {
10168 const struct aarch64_option_cpu_value_table *opt;
10169 aarch64_feature_set prev = 0;
10170
10171 while (prev != set) {
10172 prev = set;
10173 for (opt = aarch64_features; opt->name != NULL; opt++)
10174 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10175 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10176 }
10177 return set;
10178 }
10179
10180 /* Transitive closure of dependencies of set. */
10181 static aarch64_feature_set
10182 aarch64_feature_enable_set (aarch64_feature_set set)
10183 {
10184 const struct aarch64_option_cpu_value_table *opt;
10185 aarch64_feature_set prev = 0;
10186
10187 while (prev != set) {
10188 prev = set;
10189 for (opt = aarch64_features; opt->name != NULL; opt++)
10190 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10191 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10192 }
10193 return set;
10194 }
10195
10196 static int
10197 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10198 bool ext_only)
10199 {
10200 /* We insist on extensions being added before being removed. We achieve
10201 this by using the ADDING_VALUE variable to indicate whether we are
10202 adding an extension (1) or removing it (0) and only allowing it to
10203 change in the order -1 -> 1 -> 0. */
10204 int adding_value = -1;
10205 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10206
10207 /* Copy the feature set, so that we can modify it. */
10208 *ext_set = **opt_p;
10209 *opt_p = ext_set;
10210
10211 while (str != NULL && *str != 0)
10212 {
10213 const struct aarch64_option_cpu_value_table *opt;
10214 const char *ext = NULL;
10215 int optlen;
10216
10217 if (!ext_only)
10218 {
10219 if (*str != '+')
10220 {
10221 as_bad (_("invalid architectural extension"));
10222 return 0;
10223 }
10224
10225 ext = strchr (++str, '+');
10226 }
10227
10228 if (ext != NULL)
10229 optlen = ext - str;
10230 else
10231 optlen = strlen (str);
10232
10233 if (optlen >= 2 && startswith (str, "no"))
10234 {
10235 if (adding_value != 0)
10236 adding_value = 0;
10237 optlen -= 2;
10238 str += 2;
10239 }
10240 else if (optlen > 0)
10241 {
10242 if (adding_value == -1)
10243 adding_value = 1;
10244 else if (adding_value != 1)
10245 {
10246 as_bad (_("must specify extensions to add before specifying "
10247 "those to remove"));
10248 return false;
10249 }
10250 }
10251
10252 if (optlen == 0)
10253 {
10254 as_bad (_("missing architectural extension"));
10255 return 0;
10256 }
10257
10258 gas_assert (adding_value != -1);
10259
10260 for (opt = aarch64_features; opt->name != NULL; opt++)
10261 if (strncmp (opt->name, str, optlen) == 0)
10262 {
10263 aarch64_feature_set set;
10264
10265 /* Add or remove the extension. */
10266 if (adding_value)
10267 {
10268 set = aarch64_feature_enable_set (opt->value);
10269 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10270 }
10271 else
10272 {
10273 set = aarch64_feature_disable_set (opt->value);
10274 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10275 }
10276 break;
10277 }
10278
10279 if (opt->name == NULL)
10280 {
10281 as_bad (_("unknown architectural extension `%s'"), str);
10282 return 0;
10283 }
10284
10285 str = ext;
10286 };
10287
10288 return 1;
10289 }
10290
10291 static int
10292 aarch64_parse_cpu (const char *str)
10293 {
10294 const struct aarch64_cpu_option_table *opt;
10295 const char *ext = strchr (str, '+');
10296 size_t optlen;
10297
10298 if (ext != NULL)
10299 optlen = ext - str;
10300 else
10301 optlen = strlen (str);
10302
10303 if (optlen == 0)
10304 {
10305 as_bad (_("missing cpu name `%s'"), str);
10306 return 0;
10307 }
10308
10309 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10310 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10311 {
10312 mcpu_cpu_opt = &opt->value;
10313 if (ext != NULL)
10314 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10315
10316 return 1;
10317 }
10318
10319 as_bad (_("unknown cpu `%s'"), str);
10320 return 0;
10321 }
10322
10323 static int
10324 aarch64_parse_arch (const char *str)
10325 {
10326 const struct aarch64_arch_option_table *opt;
10327 const char *ext = strchr (str, '+');
10328 size_t optlen;
10329
10330 if (ext != NULL)
10331 optlen = ext - str;
10332 else
10333 optlen = strlen (str);
10334
10335 if (optlen == 0)
10336 {
10337 as_bad (_("missing architecture name `%s'"), str);
10338 return 0;
10339 }
10340
10341 for (opt = aarch64_archs; opt->name != NULL; opt++)
10342 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10343 {
10344 march_cpu_opt = &opt->value;
10345 if (ext != NULL)
10346 return aarch64_parse_features (ext, &march_cpu_opt, false);
10347
10348 return 1;
10349 }
10350
10351 as_bad (_("unknown architecture `%s'\n"), str);
10352 return 0;
10353 }
10354
10355 /* ABIs. */
10356 struct aarch64_option_abi_value_table
10357 {
10358 const char *name;
10359 enum aarch64_abi_type value;
10360 };
10361
10362 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10363 #ifdef OBJ_ELF
10364 {"ilp32", AARCH64_ABI_ILP32},
10365 {"lp64", AARCH64_ABI_LP64},
10366 #else
10367 {"llp64", AARCH64_ABI_LLP64},
10368 #endif
10369 };
10370
10371 static int
10372 aarch64_parse_abi (const char *str)
10373 {
10374 unsigned int i;
10375
10376 if (str[0] == '\0')
10377 {
10378 as_bad (_("missing abi name `%s'"), str);
10379 return 0;
10380 }
10381
10382 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10383 if (strcmp (str, aarch64_abis[i].name) == 0)
10384 {
10385 aarch64_abi = aarch64_abis[i].value;
10386 return 1;
10387 }
10388
10389 as_bad (_("unknown abi `%s'\n"), str);
10390 return 0;
10391 }
10392
10393 static struct aarch64_long_option_table aarch64_long_opts[] = {
10394 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10395 aarch64_parse_abi, NULL},
10396 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10397 aarch64_parse_cpu, NULL},
10398 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10399 aarch64_parse_arch, NULL},
10400 {NULL, NULL, 0, NULL}
10401 };
10402
10403 int
10404 md_parse_option (int c, const char *arg)
10405 {
10406 struct aarch64_option_table *opt;
10407 struct aarch64_long_option_table *lopt;
10408
10409 switch (c)
10410 {
10411 #ifdef OPTION_EB
10412 case OPTION_EB:
10413 target_big_endian = 1;
10414 break;
10415 #endif
10416
10417 #ifdef OPTION_EL
10418 case OPTION_EL:
10419 target_big_endian = 0;
10420 break;
10421 #endif
10422
10423 case 'a':
10424 /* Listing option. Just ignore these, we don't support additional
10425 ones. */
10426 return 0;
10427
10428 default:
10429 for (opt = aarch64_opts; opt->option != NULL; opt++)
10430 {
10431 if (c == opt->option[0]
10432 && ((arg == NULL && opt->option[1] == 0)
10433 || streq (arg, opt->option + 1)))
10434 {
10435 /* If the option is deprecated, tell the user. */
10436 if (opt->deprecated != NULL)
10437 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10438 arg ? arg : "", _(opt->deprecated));
10439
10440 if (opt->var != NULL)
10441 *opt->var = opt->value;
10442
10443 return 1;
10444 }
10445 }
10446
10447 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10448 {
10449 /* These options are expected to have an argument. */
10450 if (c == lopt->option[0]
10451 && arg != NULL
10452 && startswith (arg, lopt->option + 1))
10453 {
10454 /* If the option is deprecated, tell the user. */
10455 if (lopt->deprecated != NULL)
10456 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10457 _(lopt->deprecated));
10458
10459 /* Call the sup-option parser. */
10460 return lopt->func (arg + strlen (lopt->option) - 1);
10461 }
10462 }
10463
10464 return 0;
10465 }
10466
10467 return 1;
10468 }
10469
10470 void
10471 md_show_usage (FILE * fp)
10472 {
10473 struct aarch64_option_table *opt;
10474 struct aarch64_long_option_table *lopt;
10475
10476 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10477
10478 for (opt = aarch64_opts; opt->option != NULL; opt++)
10479 if (opt->help != NULL)
10480 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10481
10482 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10483 if (lopt->help != NULL)
10484 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10485
10486 #ifdef OPTION_EB
10487 fprintf (fp, _("\
10488 -EB assemble code for a big-endian cpu\n"));
10489 #endif
10490
10491 #ifdef OPTION_EL
10492 fprintf (fp, _("\
10493 -EL assemble code for a little-endian cpu\n"));
10494 #endif
10495 }
10496
10497 /* Parse a .cpu directive. */
10498
10499 static void
10500 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10501 {
10502 const struct aarch64_cpu_option_table *opt;
10503 char saved_char;
10504 char *name;
10505 char *ext;
10506 size_t optlen;
10507
10508 name = input_line_pointer;
10509 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10510 saved_char = *input_line_pointer;
10511 *input_line_pointer = 0;
10512
10513 ext = strchr (name, '+');
10514
10515 if (ext != NULL)
10516 optlen = ext - name;
10517 else
10518 optlen = strlen (name);
10519
10520 /* Skip the first "all" entry. */
10521 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10522 if (strlen (opt->name) == optlen
10523 && strncmp (name, opt->name, optlen) == 0)
10524 {
10525 mcpu_cpu_opt = &opt->value;
10526 if (ext != NULL)
10527 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10528 return;
10529
10530 cpu_variant = *mcpu_cpu_opt;
10531
10532 *input_line_pointer = saved_char;
10533 demand_empty_rest_of_line ();
10534 return;
10535 }
10536 as_bad (_("unknown cpu `%s'"), name);
10537 *input_line_pointer = saved_char;
10538 ignore_rest_of_line ();
10539 }
10540
10541
10542 /* Parse a .arch directive. */
10543
10544 static void
10545 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10546 {
10547 const struct aarch64_arch_option_table *opt;
10548 char saved_char;
10549 char *name;
10550 char *ext;
10551 size_t optlen;
10552
10553 name = input_line_pointer;
10554 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10555 saved_char = *input_line_pointer;
10556 *input_line_pointer = 0;
10557
10558 ext = strchr (name, '+');
10559
10560 if (ext != NULL)
10561 optlen = ext - name;
10562 else
10563 optlen = strlen (name);
10564
10565 /* Skip the first "all" entry. */
10566 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10567 if (strlen (opt->name) == optlen
10568 && strncmp (name, opt->name, optlen) == 0)
10569 {
10570 mcpu_cpu_opt = &opt->value;
10571 if (ext != NULL)
10572 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10573 return;
10574
10575 cpu_variant = *mcpu_cpu_opt;
10576
10577 *input_line_pointer = saved_char;
10578 demand_empty_rest_of_line ();
10579 return;
10580 }
10581
10582 as_bad (_("unknown architecture `%s'\n"), name);
10583 *input_line_pointer = saved_char;
10584 ignore_rest_of_line ();
10585 }
10586
10587 /* Parse a .arch_extension directive. */
10588
10589 static void
10590 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10591 {
10592 char saved_char;
10593 char *ext = input_line_pointer;
10594
10595 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10596 saved_char = *input_line_pointer;
10597 *input_line_pointer = 0;
10598
10599 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10600 return;
10601
10602 cpu_variant = *mcpu_cpu_opt;
10603
10604 *input_line_pointer = saved_char;
10605 demand_empty_rest_of_line ();
10606 }
10607
10608 /* Copy symbol information. */
10609
10610 void
10611 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10612 {
10613 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10614 }
10615
10616 #ifdef OBJ_ELF
10617 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10618 This is needed so AArch64 specific st_other values can be independently
10619 specified for an IFUNC resolver (that is called by the dynamic linker)
10620 and the symbol it resolves (aliased to the resolver). In particular,
10621 if a function symbol has special st_other value set via directives,
10622 then attaching an IFUNC resolver to that symbol should not override
10623 the st_other setting. Requiring the directive on the IFUNC resolver
10624 symbol would be unexpected and problematic in C code, where the two
10625 symbols appear as two independent function declarations. */
10626
10627 void
10628 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10629 {
10630 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10631 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10632 /* If size is unset, copy size from src. Because we don't track whether
10633 .size has been used, we can't differentiate .size dest, 0 from the case
10634 where dest's size is unset. */
10635 if (!destelf->size && S_GET_SIZE (dest) == 0)
10636 {
10637 if (srcelf->size)
10638 {
10639 destelf->size = XNEW (expressionS);
10640 *destelf->size = *srcelf->size;
10641 }
10642 S_SET_SIZE (dest, S_GET_SIZE (src));
10643 }
10644 }
10645 #endif