[AArch64][SVE 27/32] Add SVE integer immediate operands
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum vector_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q,
87 NT_zero,
88 NT_merge
89 };
90
91 /* Bits for DEFINED field in vector_type_el. */
92 #define NTA_HASTYPE 1
93 #define NTA_HASINDEX 2
94 #define NTA_HASVARWIDTH 4
95
96 struct vector_type_el
97 {
98 enum vector_el_type type;
99 unsigned char defined;
100 unsigned width;
101 int64_t index;
102 };
103
104 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
105
106 struct reloc
107 {
108 bfd_reloc_code_real_type type;
109 expressionS exp;
110 int pc_rel;
111 enum aarch64_opnd opnd;
112 uint32_t flags;
113 unsigned need_libopcodes_p : 1;
114 };
115
116 struct aarch64_instruction
117 {
118 /* libopcodes structure for instruction intermediate representation. */
119 aarch64_inst base;
120 /* Record assembly errors found during the parsing. */
121 struct
122 {
123 enum aarch64_operand_error_kind kind;
124 const char *error;
125 } parsing_error;
126 /* The condition that appears in the assembly line. */
127 int cond;
128 /* Relocation information (including the GAS internal fixup). */
129 struct reloc reloc;
130 /* Need to generate an immediate in the literal pool. */
131 unsigned gen_lit_pool : 1;
132 };
133
134 typedef struct aarch64_instruction aarch64_instruction;
135
136 static aarch64_instruction inst;
137
138 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
139 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
140
141 /* Diagnostics inline function utilites.
142
143 These are lightweight utlities which should only be called by parse_operands
144 and other parsers. GAS processes each assembly line by parsing it against
145 instruction template(s), in the case of multiple templates (for the same
146 mnemonic name), those templates are tried one by one until one succeeds or
147 all fail. An assembly line may fail a few templates before being
148 successfully parsed; an error saved here in most cases is not a user error
149 but an error indicating the current template is not the right template.
150 Therefore it is very important that errors can be saved at a low cost during
151 the parsing; we don't want to slow down the whole parsing by recording
152 non-user errors in detail.
153
154 Remember that the objective is to help GAS pick up the most approapriate
155 error message in the case of multiple templates, e.g. FMOV which has 8
156 templates. */
157
158 static inline void
159 clear_error (void)
160 {
161 inst.parsing_error.kind = AARCH64_OPDE_NIL;
162 inst.parsing_error.error = NULL;
163 }
164
165 static inline bfd_boolean
166 error_p (void)
167 {
168 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
169 }
170
171 static inline const char *
172 get_error_message (void)
173 {
174 return inst.parsing_error.error;
175 }
176
177 static inline enum aarch64_operand_error_kind
178 get_error_kind (void)
179 {
180 return inst.parsing_error.kind;
181 }
182
183 static inline void
184 set_error (enum aarch64_operand_error_kind kind, const char *error)
185 {
186 inst.parsing_error.kind = kind;
187 inst.parsing_error.error = error;
188 }
189
190 static inline void
191 set_recoverable_error (const char *error)
192 {
193 set_error (AARCH64_OPDE_RECOVERABLE, error);
194 }
195
196 /* Use the DESC field of the corresponding aarch64_operand entry to compose
197 the error message. */
198 static inline void
199 set_default_error (void)
200 {
201 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
202 }
203
204 static inline void
205 set_syntax_error (const char *error)
206 {
207 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
208 }
209
210 static inline void
211 set_first_syntax_error (const char *error)
212 {
213 if (! error_p ())
214 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
215 }
216
217 static inline void
218 set_fatal_syntax_error (const char *error)
219 {
220 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
221 }
222 \f
223 /* Number of littlenums required to hold an extended precision number. */
224 #define MAX_LITTLENUMS 6
225
226 /* Return value for certain parsers when the parsing fails; those parsers
227 return the information of the parsed result, e.g. register number, on
228 success. */
229 #define PARSE_FAIL -1
230
231 /* This is an invalid condition code that means no conditional field is
232 present. */
233 #define COND_ALWAYS 0x10
234
235 typedef struct
236 {
237 const char *template;
238 unsigned long value;
239 } asm_barrier_opt;
240
241 typedef struct
242 {
243 const char *template;
244 uint32_t value;
245 } asm_nzcv;
246
247 struct reloc_entry
248 {
249 char *name;
250 bfd_reloc_code_real_type reloc;
251 };
252
253 /* Macros to define the register types and masks for the purpose
254 of parsing. */
255
256 #undef AARCH64_REG_TYPES
257 #define AARCH64_REG_TYPES \
258 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
259 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
260 BASIC_REG_TYPE(SP_32) /* wsp */ \
261 BASIC_REG_TYPE(SP_64) /* sp */ \
262 BASIC_REG_TYPE(Z_32) /* wzr */ \
263 BASIC_REG_TYPE(Z_64) /* xzr */ \
264 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
265 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
266 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
267 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
268 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
269 BASIC_REG_TYPE(CN) /* c[0-7] */ \
270 BASIC_REG_TYPE(VN) /* v[0-31] */ \
271 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
272 BASIC_REG_TYPE(PN) /* p[0-15] */ \
273 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
274 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
275 /* Typecheck: same, plus SVE registers. */ \
276 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
277 | REG_TYPE(ZN)) \
278 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
279 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
280 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
281 /* Typecheck: same, plus SVE registers. */ \
282 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
283 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
284 | REG_TYPE(ZN)) \
285 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
286 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
287 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
288 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
289 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
290 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
291 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
292 /* Typecheck: any [BHSDQ]P FP. */ \
293 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
294 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
295 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
296 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
298 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
299 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
300 /* Any integer register; used for error messages only. */ \
301 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
303 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
304 /* Pseudo type to mark the end of the enumerator sequence. */ \
305 BASIC_REG_TYPE(MAX)
306
307 #undef BASIC_REG_TYPE
308 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
309 #undef MULTI_REG_TYPE
310 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
311
312 /* Register type enumerators. */
313 typedef enum aarch64_reg_type_
314 {
315 /* A list of REG_TYPE_*. */
316 AARCH64_REG_TYPES
317 } aarch64_reg_type;
318
319 #undef BASIC_REG_TYPE
320 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
321 #undef REG_TYPE
322 #define REG_TYPE(T) (1 << REG_TYPE_##T)
323 #undef MULTI_REG_TYPE
324 #define MULTI_REG_TYPE(T,V) V,
325
326 /* Structure for a hash table entry for a register. */
327 typedef struct
328 {
329 const char *name;
330 unsigned char number;
331 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
332 unsigned char builtin;
333 } reg_entry;
334
335 /* Values indexed by aarch64_reg_type to assist the type checking. */
336 static const unsigned reg_type_masks[] =
337 {
338 AARCH64_REG_TYPES
339 };
340
341 #undef BASIC_REG_TYPE
342 #undef REG_TYPE
343 #undef MULTI_REG_TYPE
344 #undef AARCH64_REG_TYPES
345
346 /* Diagnostics used when we don't get a register of the expected type.
347 Note: this has to synchronized with aarch64_reg_type definitions
348 above. */
349 static const char *
350 get_reg_expected_msg (aarch64_reg_type reg_type)
351 {
352 const char *msg;
353
354 switch (reg_type)
355 {
356 case REG_TYPE_R_32:
357 msg = N_("integer 32-bit register expected");
358 break;
359 case REG_TYPE_R_64:
360 msg = N_("integer 64-bit register expected");
361 break;
362 case REG_TYPE_R_N:
363 msg = N_("integer register expected");
364 break;
365 case REG_TYPE_R64_SP:
366 msg = N_("64-bit integer or SP register expected");
367 break;
368 case REG_TYPE_SVE_BASE:
369 msg = N_("base register expected");
370 break;
371 case REG_TYPE_R_Z:
372 msg = N_("integer or zero register expected");
373 break;
374 case REG_TYPE_SVE_OFFSET:
375 msg = N_("offset register expected");
376 break;
377 case REG_TYPE_R_SP:
378 msg = N_("integer or SP register expected");
379 break;
380 case REG_TYPE_R_Z_SP:
381 msg = N_("integer, zero or SP register expected");
382 break;
383 case REG_TYPE_FP_B:
384 msg = N_("8-bit SIMD scalar register expected");
385 break;
386 case REG_TYPE_FP_H:
387 msg = N_("16-bit SIMD scalar or floating-point half precision "
388 "register expected");
389 break;
390 case REG_TYPE_FP_S:
391 msg = N_("32-bit SIMD scalar or floating-point single precision "
392 "register expected");
393 break;
394 case REG_TYPE_FP_D:
395 msg = N_("64-bit SIMD scalar or floating-point double precision "
396 "register expected");
397 break;
398 case REG_TYPE_FP_Q:
399 msg = N_("128-bit SIMD scalar or floating-point quad precision "
400 "register expected");
401 break;
402 case REG_TYPE_CN:
403 msg = N_("C0 - C15 expected");
404 break;
405 case REG_TYPE_R_Z_BHSDQ_V:
406 msg = N_("register expected");
407 break;
408 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
409 msg = N_("SIMD scalar or floating-point register expected");
410 break;
411 case REG_TYPE_VN: /* any V reg */
412 msg = N_("vector register expected");
413 break;
414 case REG_TYPE_ZN:
415 msg = N_("SVE vector register expected");
416 break;
417 case REG_TYPE_PN:
418 msg = N_("SVE predicate register expected");
419 break;
420 default:
421 as_fatal (_("invalid register type %d"), reg_type);
422 }
423 return msg;
424 }
425
426 /* Some well known registers that we refer to directly elsewhere. */
427 #define REG_SP 31
428
429 /* Instructions take 4 bytes in the object file. */
430 #define INSN_SIZE 4
431
432 static struct hash_control *aarch64_ops_hsh;
433 static struct hash_control *aarch64_cond_hsh;
434 static struct hash_control *aarch64_shift_hsh;
435 static struct hash_control *aarch64_sys_regs_hsh;
436 static struct hash_control *aarch64_pstatefield_hsh;
437 static struct hash_control *aarch64_sys_regs_ic_hsh;
438 static struct hash_control *aarch64_sys_regs_dc_hsh;
439 static struct hash_control *aarch64_sys_regs_at_hsh;
440 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
441 static struct hash_control *aarch64_reg_hsh;
442 static struct hash_control *aarch64_barrier_opt_hsh;
443 static struct hash_control *aarch64_nzcv_hsh;
444 static struct hash_control *aarch64_pldop_hsh;
445 static struct hash_control *aarch64_hint_opt_hsh;
446
447 /* Stuff needed to resolve the label ambiguity
448 As:
449 ...
450 label: <insn>
451 may differ from:
452 ...
453 label:
454 <insn> */
455
456 static symbolS *last_label_seen;
457
458 /* Literal pool structure. Held on a per-section
459 and per-sub-section basis. */
460
461 #define MAX_LITERAL_POOL_SIZE 1024
462 typedef struct literal_expression
463 {
464 expressionS exp;
465 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
466 LITTLENUM_TYPE * bignum;
467 } literal_expression;
468
469 typedef struct literal_pool
470 {
471 literal_expression literals[MAX_LITERAL_POOL_SIZE];
472 unsigned int next_free_entry;
473 unsigned int id;
474 symbolS *symbol;
475 segT section;
476 subsegT sub_section;
477 int size;
478 struct literal_pool *next;
479 } literal_pool;
480
481 /* Pointer to a linked list of literal pools. */
482 static literal_pool *list_of_pools = NULL;
483 \f
484 /* Pure syntax. */
485
486 /* This array holds the chars that always start a comment. If the
487 pre-processor is disabled, these aren't very useful. */
488 const char comment_chars[] = "";
489
490 /* This array holds the chars that only start a comment at the beginning of
491 a line. If the line seems to have the form '# 123 filename'
492 .line and .file directives will appear in the pre-processed output. */
493 /* Note that input_file.c hand checks for '#' at the beginning of the
494 first line of the input file. This is because the compiler outputs
495 #NO_APP at the beginning of its output. */
496 /* Also note that comments like this one will always work. */
497 const char line_comment_chars[] = "#";
498
499 const char line_separator_chars[] = ";";
500
501 /* Chars that can be used to separate mant
502 from exp in floating point numbers. */
503 const char EXP_CHARS[] = "eE";
504
505 /* Chars that mean this number is a floating point constant. */
506 /* As in 0f12.456 */
507 /* or 0d1.2345e12 */
508
509 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
510
511 /* Prefix character that indicates the start of an immediate value. */
512 #define is_immediate_prefix(C) ((C) == '#')
513
514 /* Separator character handling. */
515
516 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
517
518 static inline bfd_boolean
519 skip_past_char (char **str, char c)
520 {
521 if (**str == c)
522 {
523 (*str)++;
524 return TRUE;
525 }
526 else
527 return FALSE;
528 }
529
530 #define skip_past_comma(str) skip_past_char (str, ',')
531
532 /* Arithmetic expressions (possibly involving symbols). */
533
534 static bfd_boolean in_my_get_expression_p = FALSE;
535
536 /* Third argument to my_get_expression. */
537 #define GE_NO_PREFIX 0
538 #define GE_OPT_PREFIX 1
539
540 /* Return TRUE if the string pointed by *STR is successfully parsed
541 as an valid expression; *EP will be filled with the information of
542 such an expression. Otherwise return FALSE. */
543
544 static bfd_boolean
545 my_get_expression (expressionS * ep, char **str, int prefix_mode,
546 int reject_absent)
547 {
548 char *save_in;
549 segT seg;
550 int prefix_present_p = 0;
551
552 switch (prefix_mode)
553 {
554 case GE_NO_PREFIX:
555 break;
556 case GE_OPT_PREFIX:
557 if (is_immediate_prefix (**str))
558 {
559 (*str)++;
560 prefix_present_p = 1;
561 }
562 break;
563 default:
564 abort ();
565 }
566
567 memset (ep, 0, sizeof (expressionS));
568
569 save_in = input_line_pointer;
570 input_line_pointer = *str;
571 in_my_get_expression_p = TRUE;
572 seg = expression (ep);
573 in_my_get_expression_p = FALSE;
574
575 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
576 {
577 /* We found a bad expression in md_operand(). */
578 *str = input_line_pointer;
579 input_line_pointer = save_in;
580 if (prefix_present_p && ! error_p ())
581 set_fatal_syntax_error (_("bad expression"));
582 else
583 set_first_syntax_error (_("bad expression"));
584 return FALSE;
585 }
586
587 #ifdef OBJ_AOUT
588 if (seg != absolute_section
589 && seg != text_section
590 && seg != data_section
591 && seg != bss_section && seg != undefined_section)
592 {
593 set_syntax_error (_("bad segment"));
594 *str = input_line_pointer;
595 input_line_pointer = save_in;
596 return FALSE;
597 }
598 #else
599 (void) seg;
600 #endif
601
602 *str = input_line_pointer;
603 input_line_pointer = save_in;
604 return TRUE;
605 }
606
607 /* Turn a string in input_line_pointer into a floating point constant
608 of type TYPE, and store the appropriate bytes in *LITP. The number
609 of LITTLENUMS emitted is stored in *SIZEP. An error message is
610 returned, or NULL on OK. */
611
612 const char *
613 md_atof (int type, char *litP, int *sizeP)
614 {
615 return ieee_md_atof (type, litP, sizeP, target_big_endian);
616 }
617
618 /* We handle all bad expressions here, so that we can report the faulty
619 instruction in the error message. */
620 void
621 md_operand (expressionS * exp)
622 {
623 if (in_my_get_expression_p)
624 exp->X_op = O_illegal;
625 }
626
627 /* Immediate values. */
628
629 /* Errors may be set multiple times during parsing or bit encoding
630 (particularly in the Neon bits), but usually the earliest error which is set
631 will be the most meaningful. Avoid overwriting it with later (cascading)
632 errors by calling this function. */
633
634 static void
635 first_error (const char *error)
636 {
637 if (! error_p ())
638 set_syntax_error (error);
639 }
640
641 /* Similiar to first_error, but this function accepts formatted error
642 message. */
643 static void
644 first_error_fmt (const char *format, ...)
645 {
646 va_list args;
647 enum
648 { size = 100 };
649 /* N.B. this single buffer will not cause error messages for different
650 instructions to pollute each other; this is because at the end of
651 processing of each assembly line, error message if any will be
652 collected by as_bad. */
653 static char buffer[size];
654
655 if (! error_p ())
656 {
657 int ret ATTRIBUTE_UNUSED;
658 va_start (args, format);
659 ret = vsnprintf (buffer, size, format, args);
660 know (ret <= size - 1 && ret >= 0);
661 va_end (args);
662 set_syntax_error (buffer);
663 }
664 }
665
666 /* Register parsing. */
667
668 /* Generic register parser which is called by other specialized
669 register parsers.
670 CCP points to what should be the beginning of a register name.
671 If it is indeed a valid register name, advance CCP over it and
672 return the reg_entry structure; otherwise return NULL.
673 It does not issue diagnostics. */
674
675 static reg_entry *
676 parse_reg (char **ccp)
677 {
678 char *start = *ccp;
679 char *p;
680 reg_entry *reg;
681
682 #ifdef REGISTER_PREFIX
683 if (*start != REGISTER_PREFIX)
684 return NULL;
685 start++;
686 #endif
687
688 p = start;
689 if (!ISALPHA (*p) || !is_name_beginner (*p))
690 return NULL;
691
692 do
693 p++;
694 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
695
696 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
697
698 if (!reg)
699 return NULL;
700
701 *ccp = p;
702 return reg;
703 }
704
705 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
706 return FALSE. */
707 static bfd_boolean
708 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
709 {
710 return (reg_type_masks[type] & (1 << reg->type)) != 0;
711 }
712
713 /* Try to parse a base or offset register. Allow SVE base and offset
714 registers if REG_TYPE includes SVE registers. Return the register
715 entry on success, setting *QUALIFIER to the register qualifier.
716 Return null otherwise.
717
718 Note that this function does not issue any diagnostics. */
719
720 static const reg_entry *
721 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
722 aarch64_opnd_qualifier_t *qualifier)
723 {
724 char *str = *ccp;
725 const reg_entry *reg = parse_reg (&str);
726
727 if (reg == NULL)
728 return NULL;
729
730 switch (reg->type)
731 {
732 case REG_TYPE_R_32:
733 case REG_TYPE_SP_32:
734 case REG_TYPE_Z_32:
735 *qualifier = AARCH64_OPND_QLF_W;
736 break;
737
738 case REG_TYPE_R_64:
739 case REG_TYPE_SP_64:
740 case REG_TYPE_Z_64:
741 *qualifier = AARCH64_OPND_QLF_X;
742 break;
743
744 case REG_TYPE_ZN:
745 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
746 || str[0] != '.')
747 return NULL;
748 switch (TOLOWER (str[1]))
749 {
750 case 's':
751 *qualifier = AARCH64_OPND_QLF_S_S;
752 break;
753 case 'd':
754 *qualifier = AARCH64_OPND_QLF_S_D;
755 break;
756 default:
757 return NULL;
758 }
759 str += 2;
760 break;
761
762 default:
763 return NULL;
764 }
765
766 *ccp = str;
767
768 return reg;
769 }
770
771 /* Try to parse a base or offset register. Return the register entry
772 on success, setting *QUALIFIER to the register qualifier. Return null
773 otherwise.
774
775 Note that this function does not issue any diagnostics. */
776
777 static const reg_entry *
778 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
779 {
780 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
781 }
782
783 /* Parse the qualifier of a vector register or vector element of type
784 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
785 succeeds; otherwise return FALSE.
786
787 Accept only one occurrence of:
788 8b 16b 2h 4h 8h 2s 4s 1d 2d
789 b h s d q */
790 static bfd_boolean
791 parse_vector_type_for_operand (aarch64_reg_type reg_type,
792 struct vector_type_el *parsed_type, char **str)
793 {
794 char *ptr = *str;
795 unsigned width;
796 unsigned element_size;
797 enum vector_el_type type;
798
799 /* skip '.' */
800 gas_assert (*ptr == '.');
801 ptr++;
802
803 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
804 {
805 width = 0;
806 goto elt_size;
807 }
808 width = strtoul (ptr, &ptr, 10);
809 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
810 {
811 first_error_fmt (_("bad size %d in vector width specifier"), width);
812 return FALSE;
813 }
814
815 elt_size:
816 switch (TOLOWER (*ptr))
817 {
818 case 'b':
819 type = NT_b;
820 element_size = 8;
821 break;
822 case 'h':
823 type = NT_h;
824 element_size = 16;
825 break;
826 case 's':
827 type = NT_s;
828 element_size = 32;
829 break;
830 case 'd':
831 type = NT_d;
832 element_size = 64;
833 break;
834 case 'q':
835 if (width == 1)
836 {
837 type = NT_q;
838 element_size = 128;
839 break;
840 }
841 /* fall through. */
842 default:
843 if (*ptr != '\0')
844 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
845 else
846 first_error (_("missing element size"));
847 return FALSE;
848 }
849 if (width != 0 && width * element_size != 64 && width * element_size != 128
850 && !(width == 2 && element_size == 16))
851 {
852 first_error_fmt (_
853 ("invalid element size %d and vector size combination %c"),
854 width, *ptr);
855 return FALSE;
856 }
857 ptr++;
858
859 parsed_type->type = type;
860 parsed_type->width = width;
861
862 *str = ptr;
863
864 return TRUE;
865 }
866
867 /* *STR contains an SVE zero/merge predication suffix. Parse it into
868 *PARSED_TYPE and point *STR at the end of the suffix. */
869
870 static bfd_boolean
871 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
872 {
873 char *ptr = *str;
874
875 /* Skip '/'. */
876 gas_assert (*ptr == '/');
877 ptr++;
878 switch (TOLOWER (*ptr))
879 {
880 case 'z':
881 parsed_type->type = NT_zero;
882 break;
883 case 'm':
884 parsed_type->type = NT_merge;
885 break;
886 default:
887 if (*ptr != '\0' && *ptr != ',')
888 first_error_fmt (_("unexpected character `%c' in predication type"),
889 *ptr);
890 else
891 first_error (_("missing predication type"));
892 return FALSE;
893 }
894 parsed_type->width = 0;
895 *str = ptr + 1;
896 return TRUE;
897 }
898
899 /* Parse a register of the type TYPE.
900
901 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
902 name or the parsed register is not of TYPE.
903
904 Otherwise return the register number, and optionally fill in the actual
905 type of the register in *RTYPE when multiple alternatives were given, and
906 return the register shape and element index information in *TYPEINFO.
907
908 IN_REG_LIST should be set with TRUE if the caller is parsing a register
909 list. */
910
911 static int
912 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
913 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
914 {
915 char *str = *ccp;
916 const reg_entry *reg = parse_reg (&str);
917 struct vector_type_el atype;
918 struct vector_type_el parsetype;
919 bfd_boolean is_typed_vecreg = FALSE;
920
921 atype.defined = 0;
922 atype.type = NT_invtype;
923 atype.width = -1;
924 atype.index = 0;
925
926 if (reg == NULL)
927 {
928 if (typeinfo)
929 *typeinfo = atype;
930 set_default_error ();
931 return PARSE_FAIL;
932 }
933
934 if (! aarch64_check_reg_type (reg, type))
935 {
936 DEBUG_TRACE ("reg type check failed");
937 set_default_error ();
938 return PARSE_FAIL;
939 }
940 type = reg->type;
941
942 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
943 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
944 {
945 if (*str == '.')
946 {
947 if (!parse_vector_type_for_operand (type, &parsetype, &str))
948 return PARSE_FAIL;
949 }
950 else
951 {
952 if (!parse_predication_for_operand (&parsetype, &str))
953 return PARSE_FAIL;
954 }
955
956 /* Register if of the form Vn.[bhsdq]. */
957 is_typed_vecreg = TRUE;
958
959 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
960 {
961 /* The width is always variable; we don't allow an integer width
962 to be specified. */
963 gas_assert (parsetype.width == 0);
964 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
965 }
966 else if (parsetype.width == 0)
967 /* Expect index. In the new scheme we cannot have
968 Vn.[bhsdq] represent a scalar. Therefore any
969 Vn.[bhsdq] should have an index following it.
970 Except in reglists ofcourse. */
971 atype.defined |= NTA_HASINDEX;
972 else
973 atype.defined |= NTA_HASTYPE;
974
975 atype.type = parsetype.type;
976 atype.width = parsetype.width;
977 }
978
979 if (skip_past_char (&str, '['))
980 {
981 expressionS exp;
982
983 /* Reject Sn[index] syntax. */
984 if (!is_typed_vecreg)
985 {
986 first_error (_("this type of register can't be indexed"));
987 return PARSE_FAIL;
988 }
989
990 if (in_reg_list == TRUE)
991 {
992 first_error (_("index not allowed inside register list"));
993 return PARSE_FAIL;
994 }
995
996 atype.defined |= NTA_HASINDEX;
997
998 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
999
1000 if (exp.X_op != O_constant)
1001 {
1002 first_error (_("constant expression required"));
1003 return PARSE_FAIL;
1004 }
1005
1006 if (! skip_past_char (&str, ']'))
1007 return PARSE_FAIL;
1008
1009 atype.index = exp.X_add_number;
1010 }
1011 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1012 {
1013 /* Indexed vector register expected. */
1014 first_error (_("indexed vector register expected"));
1015 return PARSE_FAIL;
1016 }
1017
1018 /* A vector reg Vn should be typed or indexed. */
1019 if (type == REG_TYPE_VN && atype.defined == 0)
1020 {
1021 first_error (_("invalid use of vector register"));
1022 }
1023
1024 if (typeinfo)
1025 *typeinfo = atype;
1026
1027 if (rtype)
1028 *rtype = type;
1029
1030 *ccp = str;
1031
1032 return reg->number;
1033 }
1034
1035 /* Parse register.
1036
1037 Return the register number on success; return PARSE_FAIL otherwise.
1038
1039 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1040 the register (e.g. NEON double or quad reg when either has been requested).
1041
1042 If this is a NEON vector register with additional type information, fill
1043 in the struct pointed to by VECTYPE (if non-NULL).
1044
1045 This parser does not handle register list. */
1046
1047 static int
1048 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1049 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1050 {
1051 struct vector_type_el atype;
1052 char *str = *ccp;
1053 int reg = parse_typed_reg (&str, type, rtype, &atype,
1054 /*in_reg_list= */ FALSE);
1055
1056 if (reg == PARSE_FAIL)
1057 return PARSE_FAIL;
1058
1059 if (vectype)
1060 *vectype = atype;
1061
1062 *ccp = str;
1063
1064 return reg;
1065 }
1066
1067 static inline bfd_boolean
1068 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1069 {
1070 return
1071 e1.type == e2.type
1072 && e1.defined == e2.defined
1073 && e1.width == e2.width && e1.index == e2.index;
1074 }
1075
1076 /* This function parses a list of vector registers of type TYPE.
1077 On success, it returns the parsed register list information in the
1078 following encoded format:
1079
1080 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1081 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1082
1083 The information of the register shape and/or index is returned in
1084 *VECTYPE.
1085
1086 It returns PARSE_FAIL if the register list is invalid.
1087
1088 The list contains one to four registers.
1089 Each register can be one of:
1090 <Vt>.<T>[<index>]
1091 <Vt>.<T>
1092 All <T> should be identical.
1093 All <index> should be identical.
1094 There are restrictions on <Vt> numbers which are checked later
1095 (by reg_list_valid_p). */
1096
1097 static int
1098 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1099 struct vector_type_el *vectype)
1100 {
1101 char *str = *ccp;
1102 int nb_regs;
1103 struct vector_type_el typeinfo, typeinfo_first;
1104 int val, val_range;
1105 int in_range;
1106 int ret_val;
1107 int i;
1108 bfd_boolean error = FALSE;
1109 bfd_boolean expect_index = FALSE;
1110
1111 if (*str != '{')
1112 {
1113 set_syntax_error (_("expecting {"));
1114 return PARSE_FAIL;
1115 }
1116 str++;
1117
1118 nb_regs = 0;
1119 typeinfo_first.defined = 0;
1120 typeinfo_first.type = NT_invtype;
1121 typeinfo_first.width = -1;
1122 typeinfo_first.index = 0;
1123 ret_val = 0;
1124 val = -1;
1125 val_range = -1;
1126 in_range = 0;
1127 do
1128 {
1129 if (in_range)
1130 {
1131 str++; /* skip over '-' */
1132 val_range = val;
1133 }
1134 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1135 /*in_reg_list= */ TRUE);
1136 if (val == PARSE_FAIL)
1137 {
1138 set_first_syntax_error (_("invalid vector register in list"));
1139 error = TRUE;
1140 continue;
1141 }
1142 /* reject [bhsd]n */
1143 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1144 {
1145 set_first_syntax_error (_("invalid scalar register in list"));
1146 error = TRUE;
1147 continue;
1148 }
1149
1150 if (typeinfo.defined & NTA_HASINDEX)
1151 expect_index = TRUE;
1152
1153 if (in_range)
1154 {
1155 if (val < val_range)
1156 {
1157 set_first_syntax_error
1158 (_("invalid range in vector register list"));
1159 error = TRUE;
1160 }
1161 val_range++;
1162 }
1163 else
1164 {
1165 val_range = val;
1166 if (nb_regs == 0)
1167 typeinfo_first = typeinfo;
1168 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1169 {
1170 set_first_syntax_error
1171 (_("type mismatch in vector register list"));
1172 error = TRUE;
1173 }
1174 }
1175 if (! error)
1176 for (i = val_range; i <= val; i++)
1177 {
1178 ret_val |= i << (5 * nb_regs);
1179 nb_regs++;
1180 }
1181 in_range = 0;
1182 }
1183 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1184
1185 skip_whitespace (str);
1186 if (*str != '}')
1187 {
1188 set_first_syntax_error (_("end of vector register list not found"));
1189 error = TRUE;
1190 }
1191 str++;
1192
1193 skip_whitespace (str);
1194
1195 if (expect_index)
1196 {
1197 if (skip_past_char (&str, '['))
1198 {
1199 expressionS exp;
1200
1201 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1202 if (exp.X_op != O_constant)
1203 {
1204 set_first_syntax_error (_("constant expression required."));
1205 error = TRUE;
1206 }
1207 if (! skip_past_char (&str, ']'))
1208 error = TRUE;
1209 else
1210 typeinfo_first.index = exp.X_add_number;
1211 }
1212 else
1213 {
1214 set_first_syntax_error (_("expected index"));
1215 error = TRUE;
1216 }
1217 }
1218
1219 if (nb_regs > 4)
1220 {
1221 set_first_syntax_error (_("too many registers in vector register list"));
1222 error = TRUE;
1223 }
1224 else if (nb_regs == 0)
1225 {
1226 set_first_syntax_error (_("empty vector register list"));
1227 error = TRUE;
1228 }
1229
1230 *ccp = str;
1231 if (! error)
1232 *vectype = typeinfo_first;
1233
1234 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1235 }
1236
1237 /* Directives: register aliases. */
1238
1239 static reg_entry *
1240 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1241 {
1242 reg_entry *new;
1243 const char *name;
1244
1245 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1246 {
1247 if (new->builtin)
1248 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1249 str);
1250
1251 /* Only warn about a redefinition if it's not defined as the
1252 same register. */
1253 else if (new->number != number || new->type != type)
1254 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1255
1256 return NULL;
1257 }
1258
1259 name = xstrdup (str);
1260 new = XNEW (reg_entry);
1261
1262 new->name = name;
1263 new->number = number;
1264 new->type = type;
1265 new->builtin = FALSE;
1266
1267 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1268 abort ();
1269
1270 return new;
1271 }
1272
1273 /* Look for the .req directive. This is of the form:
1274
1275 new_register_name .req existing_register_name
1276
1277 If we find one, or if it looks sufficiently like one that we want to
1278 handle any error here, return TRUE. Otherwise return FALSE. */
1279
1280 static bfd_boolean
1281 create_register_alias (char *newname, char *p)
1282 {
1283 const reg_entry *old;
1284 char *oldname, *nbuf;
1285 size_t nlen;
1286
1287 /* The input scrubber ensures that whitespace after the mnemonic is
1288 collapsed to single spaces. */
1289 oldname = p;
1290 if (strncmp (oldname, " .req ", 6) != 0)
1291 return FALSE;
1292
1293 oldname += 6;
1294 if (*oldname == '\0')
1295 return FALSE;
1296
1297 old = hash_find (aarch64_reg_hsh, oldname);
1298 if (!old)
1299 {
1300 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1301 return TRUE;
1302 }
1303
1304 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1305 the desired alias name, and p points to its end. If not, then
1306 the desired alias name is in the global original_case_string. */
1307 #ifdef TC_CASE_SENSITIVE
1308 nlen = p - newname;
1309 #else
1310 newname = original_case_string;
1311 nlen = strlen (newname);
1312 #endif
1313
1314 nbuf = xmemdup0 (newname, nlen);
1315
1316 /* Create aliases under the new name as stated; an all-lowercase
1317 version of the new name; and an all-uppercase version of the new
1318 name. */
1319 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1320 {
1321 for (p = nbuf; *p; p++)
1322 *p = TOUPPER (*p);
1323
1324 if (strncmp (nbuf, newname, nlen))
1325 {
1326 /* If this attempt to create an additional alias fails, do not bother
1327 trying to create the all-lower case alias. We will fail and issue
1328 a second, duplicate error message. This situation arises when the
1329 programmer does something like:
1330 foo .req r0
1331 Foo .req r1
1332 The second .req creates the "Foo" alias but then fails to create
1333 the artificial FOO alias because it has already been created by the
1334 first .req. */
1335 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1336 {
1337 free (nbuf);
1338 return TRUE;
1339 }
1340 }
1341
1342 for (p = nbuf; *p; p++)
1343 *p = TOLOWER (*p);
1344
1345 if (strncmp (nbuf, newname, nlen))
1346 insert_reg_alias (nbuf, old->number, old->type);
1347 }
1348
1349 free (nbuf);
1350 return TRUE;
1351 }
1352
1353 /* Should never be called, as .req goes between the alias and the
1354 register name, not at the beginning of the line. */
1355 static void
1356 s_req (int a ATTRIBUTE_UNUSED)
1357 {
1358 as_bad (_("invalid syntax for .req directive"));
1359 }
1360
1361 /* The .unreq directive deletes an alias which was previously defined
1362 by .req. For example:
1363
1364 my_alias .req r11
1365 .unreq my_alias */
1366
1367 static void
1368 s_unreq (int a ATTRIBUTE_UNUSED)
1369 {
1370 char *name;
1371 char saved_char;
1372
1373 name = input_line_pointer;
1374
1375 while (*input_line_pointer != 0
1376 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1377 ++input_line_pointer;
1378
1379 saved_char = *input_line_pointer;
1380 *input_line_pointer = 0;
1381
1382 if (!*name)
1383 as_bad (_("invalid syntax for .unreq directive"));
1384 else
1385 {
1386 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1387
1388 if (!reg)
1389 as_bad (_("unknown register alias '%s'"), name);
1390 else if (reg->builtin)
1391 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1392 name);
1393 else
1394 {
1395 char *p;
1396 char *nbuf;
1397
1398 hash_delete (aarch64_reg_hsh, name, FALSE);
1399 free ((char *) reg->name);
1400 free (reg);
1401
1402 /* Also locate the all upper case and all lower case versions.
1403 Do not complain if we cannot find one or the other as it
1404 was probably deleted above. */
1405
1406 nbuf = strdup (name);
1407 for (p = nbuf; *p; p++)
1408 *p = TOUPPER (*p);
1409 reg = hash_find (aarch64_reg_hsh, nbuf);
1410 if (reg)
1411 {
1412 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1413 free ((char *) reg->name);
1414 free (reg);
1415 }
1416
1417 for (p = nbuf; *p; p++)
1418 *p = TOLOWER (*p);
1419 reg = hash_find (aarch64_reg_hsh, nbuf);
1420 if (reg)
1421 {
1422 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1423 free ((char *) reg->name);
1424 free (reg);
1425 }
1426
1427 free (nbuf);
1428 }
1429 }
1430
1431 *input_line_pointer = saved_char;
1432 demand_empty_rest_of_line ();
1433 }
1434
1435 /* Directives: Instruction set selection. */
1436
1437 #ifdef OBJ_ELF
1438 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1439 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1440 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1441 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1442
1443 /* Create a new mapping symbol for the transition to STATE. */
1444
1445 static void
1446 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1447 {
1448 symbolS *symbolP;
1449 const char *symname;
1450 int type;
1451
1452 switch (state)
1453 {
1454 case MAP_DATA:
1455 symname = "$d";
1456 type = BSF_NO_FLAGS;
1457 break;
1458 case MAP_INSN:
1459 symname = "$x";
1460 type = BSF_NO_FLAGS;
1461 break;
1462 default:
1463 abort ();
1464 }
1465
1466 symbolP = symbol_new (symname, now_seg, value, frag);
1467 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1468
1469 /* Save the mapping symbols for future reference. Also check that
1470 we do not place two mapping symbols at the same offset within a
1471 frag. We'll handle overlap between frags in
1472 check_mapping_symbols.
1473
1474 If .fill or other data filling directive generates zero sized data,
1475 the mapping symbol for the following code will have the same value
1476 as the one generated for the data filling directive. In this case,
1477 we replace the old symbol with the new one at the same address. */
1478 if (value == 0)
1479 {
1480 if (frag->tc_frag_data.first_map != NULL)
1481 {
1482 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1483 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1484 &symbol_lastP);
1485 }
1486 frag->tc_frag_data.first_map = symbolP;
1487 }
1488 if (frag->tc_frag_data.last_map != NULL)
1489 {
1490 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1491 S_GET_VALUE (symbolP));
1492 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1493 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1494 &symbol_lastP);
1495 }
1496 frag->tc_frag_data.last_map = symbolP;
1497 }
1498
1499 /* We must sometimes convert a region marked as code to data during
1500 code alignment, if an odd number of bytes have to be padded. The
1501 code mapping symbol is pushed to an aligned address. */
1502
1503 static void
1504 insert_data_mapping_symbol (enum mstate state,
1505 valueT value, fragS * frag, offsetT bytes)
1506 {
1507 /* If there was already a mapping symbol, remove it. */
1508 if (frag->tc_frag_data.last_map != NULL
1509 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1510 frag->fr_address + value)
1511 {
1512 symbolS *symp = frag->tc_frag_data.last_map;
1513
1514 if (value == 0)
1515 {
1516 know (frag->tc_frag_data.first_map == symp);
1517 frag->tc_frag_data.first_map = NULL;
1518 }
1519 frag->tc_frag_data.last_map = NULL;
1520 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1521 }
1522
1523 make_mapping_symbol (MAP_DATA, value, frag);
1524 make_mapping_symbol (state, value + bytes, frag);
1525 }
1526
1527 static void mapping_state_2 (enum mstate state, int max_chars);
1528
1529 /* Set the mapping state to STATE. Only call this when about to
1530 emit some STATE bytes to the file. */
1531
1532 void
1533 mapping_state (enum mstate state)
1534 {
1535 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1536
1537 if (state == MAP_INSN)
1538 /* AArch64 instructions require 4-byte alignment. When emitting
1539 instructions into any section, record the appropriate section
1540 alignment. */
1541 record_alignment (now_seg, 2);
1542
1543 if (mapstate == state)
1544 /* The mapping symbol has already been emitted.
1545 There is nothing else to do. */
1546 return;
1547
1548 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1549 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1550 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1551 evaluated later in the next else. */
1552 return;
1553 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1554 {
1555 /* Only add the symbol if the offset is > 0:
1556 if we're at the first frag, check it's size > 0;
1557 if we're not at the first frag, then for sure
1558 the offset is > 0. */
1559 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1560 const int add_symbol = (frag_now != frag_first)
1561 || (frag_now_fix () > 0);
1562
1563 if (add_symbol)
1564 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1565 }
1566 #undef TRANSITION
1567
1568 mapping_state_2 (state, 0);
1569 }
1570
1571 /* Same as mapping_state, but MAX_CHARS bytes have already been
1572 allocated. Put the mapping symbol that far back. */
1573
1574 static void
1575 mapping_state_2 (enum mstate state, int max_chars)
1576 {
1577 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1578
1579 if (!SEG_NORMAL (now_seg))
1580 return;
1581
1582 if (mapstate == state)
1583 /* The mapping symbol has already been emitted.
1584 There is nothing else to do. */
1585 return;
1586
1587 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1588 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1589 }
1590 #else
1591 #define mapping_state(x) /* nothing */
1592 #define mapping_state_2(x, y) /* nothing */
1593 #endif
1594
1595 /* Directives: sectioning and alignment. */
1596
1597 static void
1598 s_bss (int ignore ATTRIBUTE_UNUSED)
1599 {
1600 /* We don't support putting frags in the BSS segment, we fake it by
1601 marking in_bss, then looking at s_skip for clues. */
1602 subseg_set (bss_section, 0);
1603 demand_empty_rest_of_line ();
1604 mapping_state (MAP_DATA);
1605 }
1606
1607 static void
1608 s_even (int ignore ATTRIBUTE_UNUSED)
1609 {
1610 /* Never make frag if expect extra pass. */
1611 if (!need_pass_2)
1612 frag_align (1, 0, 0);
1613
1614 record_alignment (now_seg, 1);
1615
1616 demand_empty_rest_of_line ();
1617 }
1618
1619 /* Directives: Literal pools. */
1620
1621 static literal_pool *
1622 find_literal_pool (int size)
1623 {
1624 literal_pool *pool;
1625
1626 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1627 {
1628 if (pool->section == now_seg
1629 && pool->sub_section == now_subseg && pool->size == size)
1630 break;
1631 }
1632
1633 return pool;
1634 }
1635
1636 static literal_pool *
1637 find_or_make_literal_pool (int size)
1638 {
1639 /* Next literal pool ID number. */
1640 static unsigned int latest_pool_num = 1;
1641 literal_pool *pool;
1642
1643 pool = find_literal_pool (size);
1644
1645 if (pool == NULL)
1646 {
1647 /* Create a new pool. */
1648 pool = XNEW (literal_pool);
1649 if (!pool)
1650 return NULL;
1651
1652 /* Currently we always put the literal pool in the current text
1653 section. If we were generating "small" model code where we
1654 knew that all code and initialised data was within 1MB then
1655 we could output literals to mergeable, read-only data
1656 sections. */
1657
1658 pool->next_free_entry = 0;
1659 pool->section = now_seg;
1660 pool->sub_section = now_subseg;
1661 pool->size = size;
1662 pool->next = list_of_pools;
1663 pool->symbol = NULL;
1664
1665 /* Add it to the list. */
1666 list_of_pools = pool;
1667 }
1668
1669 /* New pools, and emptied pools, will have a NULL symbol. */
1670 if (pool->symbol == NULL)
1671 {
1672 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1673 (valueT) 0, &zero_address_frag);
1674 pool->id = latest_pool_num++;
1675 }
1676
1677 /* Done. */
1678 return pool;
1679 }
1680
1681 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1682 Return TRUE on success, otherwise return FALSE. */
1683 static bfd_boolean
1684 add_to_lit_pool (expressionS *exp, int size)
1685 {
1686 literal_pool *pool;
1687 unsigned int entry;
1688
1689 pool = find_or_make_literal_pool (size);
1690
1691 /* Check if this literal value is already in the pool. */
1692 for (entry = 0; entry < pool->next_free_entry; entry++)
1693 {
1694 expressionS * litexp = & pool->literals[entry].exp;
1695
1696 if ((litexp->X_op == exp->X_op)
1697 && (exp->X_op == O_constant)
1698 && (litexp->X_add_number == exp->X_add_number)
1699 && (litexp->X_unsigned == exp->X_unsigned))
1700 break;
1701
1702 if ((litexp->X_op == exp->X_op)
1703 && (exp->X_op == O_symbol)
1704 && (litexp->X_add_number == exp->X_add_number)
1705 && (litexp->X_add_symbol == exp->X_add_symbol)
1706 && (litexp->X_op_symbol == exp->X_op_symbol))
1707 break;
1708 }
1709
1710 /* Do we need to create a new entry? */
1711 if (entry == pool->next_free_entry)
1712 {
1713 if (entry >= MAX_LITERAL_POOL_SIZE)
1714 {
1715 set_syntax_error (_("literal pool overflow"));
1716 return FALSE;
1717 }
1718
1719 pool->literals[entry].exp = *exp;
1720 pool->next_free_entry += 1;
1721 if (exp->X_op == O_big)
1722 {
1723 /* PR 16688: Bignums are held in a single global array. We must
1724 copy and preserve that value now, before it is overwritten. */
1725 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1726 exp->X_add_number);
1727 memcpy (pool->literals[entry].bignum, generic_bignum,
1728 CHARS_PER_LITTLENUM * exp->X_add_number);
1729 }
1730 else
1731 pool->literals[entry].bignum = NULL;
1732 }
1733
1734 exp->X_op = O_symbol;
1735 exp->X_add_number = ((int) entry) * size;
1736 exp->X_add_symbol = pool->symbol;
1737
1738 return TRUE;
1739 }
1740
1741 /* Can't use symbol_new here, so have to create a symbol and then at
1742 a later date assign it a value. Thats what these functions do. */
1743
1744 static void
1745 symbol_locate (symbolS * symbolP,
1746 const char *name,/* It is copied, the caller can modify. */
1747 segT segment, /* Segment identifier (SEG_<something>). */
1748 valueT valu, /* Symbol value. */
1749 fragS * frag) /* Associated fragment. */
1750 {
1751 size_t name_length;
1752 char *preserved_copy_of_name;
1753
1754 name_length = strlen (name) + 1; /* +1 for \0. */
1755 obstack_grow (&notes, name, name_length);
1756 preserved_copy_of_name = obstack_finish (&notes);
1757
1758 #ifdef tc_canonicalize_symbol_name
1759 preserved_copy_of_name =
1760 tc_canonicalize_symbol_name (preserved_copy_of_name);
1761 #endif
1762
1763 S_SET_NAME (symbolP, preserved_copy_of_name);
1764
1765 S_SET_SEGMENT (symbolP, segment);
1766 S_SET_VALUE (symbolP, valu);
1767 symbol_clear_list_pointers (symbolP);
1768
1769 symbol_set_frag (symbolP, frag);
1770
1771 /* Link to end of symbol chain. */
1772 {
1773 extern int symbol_table_frozen;
1774
1775 if (symbol_table_frozen)
1776 abort ();
1777 }
1778
1779 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1780
1781 obj_symbol_new_hook (symbolP);
1782
1783 #ifdef tc_symbol_new_hook
1784 tc_symbol_new_hook (symbolP);
1785 #endif
1786
1787 #ifdef DEBUG_SYMS
1788 verify_symbol_chain (symbol_rootP, symbol_lastP);
1789 #endif /* DEBUG_SYMS */
1790 }
1791
1792
1793 static void
1794 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1795 {
1796 unsigned int entry;
1797 literal_pool *pool;
1798 char sym_name[20];
1799 int align;
1800
1801 for (align = 2; align <= 4; align++)
1802 {
1803 int size = 1 << align;
1804
1805 pool = find_literal_pool (size);
1806 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1807 continue;
1808
1809 /* Align pool as you have word accesses.
1810 Only make a frag if we have to. */
1811 if (!need_pass_2)
1812 frag_align (align, 0, 0);
1813
1814 mapping_state (MAP_DATA);
1815
1816 record_alignment (now_seg, align);
1817
1818 sprintf (sym_name, "$$lit_\002%x", pool->id);
1819
1820 symbol_locate (pool->symbol, sym_name, now_seg,
1821 (valueT) frag_now_fix (), frag_now);
1822 symbol_table_insert (pool->symbol);
1823
1824 for (entry = 0; entry < pool->next_free_entry; entry++)
1825 {
1826 expressionS * exp = & pool->literals[entry].exp;
1827
1828 if (exp->X_op == O_big)
1829 {
1830 /* PR 16688: Restore the global bignum value. */
1831 gas_assert (pool->literals[entry].bignum != NULL);
1832 memcpy (generic_bignum, pool->literals[entry].bignum,
1833 CHARS_PER_LITTLENUM * exp->X_add_number);
1834 }
1835
1836 /* First output the expression in the instruction to the pool. */
1837 emit_expr (exp, size); /* .word|.xword */
1838
1839 if (exp->X_op == O_big)
1840 {
1841 free (pool->literals[entry].bignum);
1842 pool->literals[entry].bignum = NULL;
1843 }
1844 }
1845
1846 /* Mark the pool as empty. */
1847 pool->next_free_entry = 0;
1848 pool->symbol = NULL;
1849 }
1850 }
1851
1852 #ifdef OBJ_ELF
1853 /* Forward declarations for functions below, in the MD interface
1854 section. */
1855 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1856 static struct reloc_table_entry * find_reloc_table_entry (char **);
1857
1858 /* Directives: Data. */
1859 /* N.B. the support for relocation suffix in this directive needs to be
1860 implemented properly. */
1861
1862 static void
1863 s_aarch64_elf_cons (int nbytes)
1864 {
1865 expressionS exp;
1866
1867 #ifdef md_flush_pending_output
1868 md_flush_pending_output ();
1869 #endif
1870
1871 if (is_it_end_of_statement ())
1872 {
1873 demand_empty_rest_of_line ();
1874 return;
1875 }
1876
1877 #ifdef md_cons_align
1878 md_cons_align (nbytes);
1879 #endif
1880
1881 mapping_state (MAP_DATA);
1882 do
1883 {
1884 struct reloc_table_entry *reloc;
1885
1886 expression (&exp);
1887
1888 if (exp.X_op != O_symbol)
1889 emit_expr (&exp, (unsigned int) nbytes);
1890 else
1891 {
1892 skip_past_char (&input_line_pointer, '#');
1893 if (skip_past_char (&input_line_pointer, ':'))
1894 {
1895 reloc = find_reloc_table_entry (&input_line_pointer);
1896 if (reloc == NULL)
1897 as_bad (_("unrecognized relocation suffix"));
1898 else
1899 as_bad (_("unimplemented relocation suffix"));
1900 ignore_rest_of_line ();
1901 return;
1902 }
1903 else
1904 emit_expr (&exp, (unsigned int) nbytes);
1905 }
1906 }
1907 while (*input_line_pointer++ == ',');
1908
1909 /* Put terminator back into stream. */
1910 input_line_pointer--;
1911 demand_empty_rest_of_line ();
1912 }
1913
1914 #endif /* OBJ_ELF */
1915
1916 /* Output a 32-bit word, but mark as an instruction. */
1917
1918 static void
1919 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1920 {
1921 expressionS exp;
1922
1923 #ifdef md_flush_pending_output
1924 md_flush_pending_output ();
1925 #endif
1926
1927 if (is_it_end_of_statement ())
1928 {
1929 demand_empty_rest_of_line ();
1930 return;
1931 }
1932
1933 /* Sections are assumed to start aligned. In executable section, there is no
1934 MAP_DATA symbol pending. So we only align the address during
1935 MAP_DATA --> MAP_INSN transition.
1936 For other sections, this is not guaranteed. */
1937 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1938 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1939 frag_align_code (2, 0);
1940
1941 #ifdef OBJ_ELF
1942 mapping_state (MAP_INSN);
1943 #endif
1944
1945 do
1946 {
1947 expression (&exp);
1948 if (exp.X_op != O_constant)
1949 {
1950 as_bad (_("constant expression required"));
1951 ignore_rest_of_line ();
1952 return;
1953 }
1954
1955 if (target_big_endian)
1956 {
1957 unsigned int val = exp.X_add_number;
1958 exp.X_add_number = SWAP_32 (val);
1959 }
1960 emit_expr (&exp, 4);
1961 }
1962 while (*input_line_pointer++ == ',');
1963
1964 /* Put terminator back into stream. */
1965 input_line_pointer--;
1966 demand_empty_rest_of_line ();
1967 }
1968
1969 #ifdef OBJ_ELF
1970 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1971
1972 static void
1973 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1974 {
1975 expressionS exp;
1976
1977 expression (&exp);
1978 frag_grow (4);
1979 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1980 BFD_RELOC_AARCH64_TLSDESC_ADD);
1981
1982 demand_empty_rest_of_line ();
1983 }
1984
1985 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1986
1987 static void
1988 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1989 {
1990 expressionS exp;
1991
1992 /* Since we're just labelling the code, there's no need to define a
1993 mapping symbol. */
1994 expression (&exp);
1995 /* Make sure there is enough room in this frag for the following
1996 blr. This trick only works if the blr follows immediately after
1997 the .tlsdesc directive. */
1998 frag_grow (4);
1999 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2000 BFD_RELOC_AARCH64_TLSDESC_CALL);
2001
2002 demand_empty_rest_of_line ();
2003 }
2004
2005 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2006
2007 static void
2008 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2009 {
2010 expressionS exp;
2011
2012 expression (&exp);
2013 frag_grow (4);
2014 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2015 BFD_RELOC_AARCH64_TLSDESC_LDR);
2016
2017 demand_empty_rest_of_line ();
2018 }
2019 #endif /* OBJ_ELF */
2020
2021 static void s_aarch64_arch (int);
2022 static void s_aarch64_cpu (int);
2023 static void s_aarch64_arch_extension (int);
2024
2025 /* This table describes all the machine specific pseudo-ops the assembler
2026 has to support. The fields are:
2027 pseudo-op name without dot
2028 function to call to execute this pseudo-op
2029 Integer arg to pass to the function. */
2030
2031 const pseudo_typeS md_pseudo_table[] = {
2032 /* Never called because '.req' does not start a line. */
2033 {"req", s_req, 0},
2034 {"unreq", s_unreq, 0},
2035 {"bss", s_bss, 0},
2036 {"even", s_even, 0},
2037 {"ltorg", s_ltorg, 0},
2038 {"pool", s_ltorg, 0},
2039 {"cpu", s_aarch64_cpu, 0},
2040 {"arch", s_aarch64_arch, 0},
2041 {"arch_extension", s_aarch64_arch_extension, 0},
2042 {"inst", s_aarch64_inst, 0},
2043 #ifdef OBJ_ELF
2044 {"tlsdescadd", s_tlsdescadd, 0},
2045 {"tlsdesccall", s_tlsdesccall, 0},
2046 {"tlsdescldr", s_tlsdescldr, 0},
2047 {"word", s_aarch64_elf_cons, 4},
2048 {"long", s_aarch64_elf_cons, 4},
2049 {"xword", s_aarch64_elf_cons, 8},
2050 {"dword", s_aarch64_elf_cons, 8},
2051 #endif
2052 {0, 0, 0}
2053 };
2054 \f
2055
2056 /* Check whether STR points to a register name followed by a comma or the
2057 end of line; REG_TYPE indicates which register types are checked
2058 against. Return TRUE if STR is such a register name; otherwise return
2059 FALSE. The function does not intend to produce any diagnostics, but since
2060 the register parser aarch64_reg_parse, which is called by this function,
2061 does produce diagnostics, we call clear_error to clear any diagnostics
2062 that may be generated by aarch64_reg_parse.
2063 Also, the function returns FALSE directly if there is any user error
2064 present at the function entry. This prevents the existing diagnostics
2065 state from being spoiled.
2066 The function currently serves parse_constant_immediate and
2067 parse_big_immediate only. */
2068 static bfd_boolean
2069 reg_name_p (char *str, aarch64_reg_type reg_type)
2070 {
2071 int reg;
2072
2073 /* Prevent the diagnostics state from being spoiled. */
2074 if (error_p ())
2075 return FALSE;
2076
2077 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2078
2079 /* Clear the parsing error that may be set by the reg parser. */
2080 clear_error ();
2081
2082 if (reg == PARSE_FAIL)
2083 return FALSE;
2084
2085 skip_whitespace (str);
2086 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2087 return TRUE;
2088
2089 return FALSE;
2090 }
2091
2092 /* Parser functions used exclusively in instruction operands. */
2093
2094 /* Parse an immediate expression which may not be constant.
2095
2096 To prevent the expression parser from pushing a register name
2097 into the symbol table as an undefined symbol, firstly a check is
2098 done to find out whether STR is a register of type REG_TYPE followed
2099 by a comma or the end of line. Return FALSE if STR is such a string. */
2100
2101 static bfd_boolean
2102 parse_immediate_expression (char **str, expressionS *exp,
2103 aarch64_reg_type reg_type)
2104 {
2105 if (reg_name_p (*str, reg_type))
2106 {
2107 set_recoverable_error (_("immediate operand required"));
2108 return FALSE;
2109 }
2110
2111 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2112
2113 if (exp->X_op == O_absent)
2114 {
2115 set_fatal_syntax_error (_("missing immediate expression"));
2116 return FALSE;
2117 }
2118
2119 return TRUE;
2120 }
2121
2122 /* Constant immediate-value read function for use in insn parsing.
2123 STR points to the beginning of the immediate (with the optional
2124 leading #); *VAL receives the value. REG_TYPE says which register
2125 names should be treated as registers rather than as symbolic immediates.
2126
2127 Return TRUE on success; otherwise return FALSE. */
2128
2129 static bfd_boolean
2130 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2131 {
2132 expressionS exp;
2133
2134 if (! parse_immediate_expression (str, &exp, reg_type))
2135 return FALSE;
2136
2137 if (exp.X_op != O_constant)
2138 {
2139 set_syntax_error (_("constant expression required"));
2140 return FALSE;
2141 }
2142
2143 *val = exp.X_add_number;
2144 return TRUE;
2145 }
2146
2147 static uint32_t
2148 encode_imm_float_bits (uint32_t imm)
2149 {
2150 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2151 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2152 }
2153
2154 /* Return TRUE if the single-precision floating-point value encoded in IMM
2155 can be expressed in the AArch64 8-bit signed floating-point format with
2156 3-bit exponent and normalized 4 bits of precision; in other words, the
2157 floating-point value must be expressable as
2158 (+/-) n / 16 * power (2, r)
2159 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2160
2161 static bfd_boolean
2162 aarch64_imm_float_p (uint32_t imm)
2163 {
2164 /* If a single-precision floating-point value has the following bit
2165 pattern, it can be expressed in the AArch64 8-bit floating-point
2166 format:
2167
2168 3 32222222 2221111111111
2169 1 09876543 21098765432109876543210
2170 n Eeeeeexx xxxx0000000000000000000
2171
2172 where n, e and each x are either 0 or 1 independently, with
2173 E == ~ e. */
2174
2175 uint32_t pattern;
2176
2177 /* Prepare the pattern for 'Eeeeee'. */
2178 if (((imm >> 30) & 0x1) == 0)
2179 pattern = 0x3e000000;
2180 else
2181 pattern = 0x40000000;
2182
2183 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2184 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2185 }
2186
2187 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2188 as an IEEE float without any loss of precision. Store the value in
2189 *FPWORD if so. */
2190
2191 static bfd_boolean
2192 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2193 {
2194 /* If a double-precision floating-point value has the following bit
2195 pattern, it can be expressed in a float:
2196
2197 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2198 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2199 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2200
2201 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2202 if Eeee_eeee != 1111_1111
2203
2204 where n, e, s and S are either 0 or 1 independently and where ~ is the
2205 inverse of E. */
2206
2207 uint32_t pattern;
2208 uint32_t high32 = imm >> 32;
2209 uint32_t low32 = imm;
2210
2211 /* Lower 29 bits need to be 0s. */
2212 if ((imm & 0x1fffffff) != 0)
2213 return FALSE;
2214
2215 /* Prepare the pattern for 'Eeeeeeeee'. */
2216 if (((high32 >> 30) & 0x1) == 0)
2217 pattern = 0x38000000;
2218 else
2219 pattern = 0x40000000;
2220
2221 /* Check E~~~. */
2222 if ((high32 & 0x78000000) != pattern)
2223 return FALSE;
2224
2225 /* Check Eeee_eeee != 1111_1111. */
2226 if ((high32 & 0x7ff00000) == 0x47f00000)
2227 return FALSE;
2228
2229 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2230 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2231 | (low32 >> 29)); /* 3 S bits. */
2232 return TRUE;
2233 }
2234
2235 /* Parse a floating-point immediate. Return TRUE on success and return the
2236 value in *IMMED in the format of IEEE754 single-precision encoding.
2237 *CCP points to the start of the string; DP_P is TRUE when the immediate
2238 is expected to be in double-precision (N.B. this only matters when
2239 hexadecimal representation is involved). REG_TYPE says which register
2240 names should be treated as registers rather than as symbolic immediates.
2241
2242 This routine accepts any IEEE float; it is up to the callers to reject
2243 invalid ones. */
2244
2245 static bfd_boolean
2246 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2247 aarch64_reg_type reg_type)
2248 {
2249 char *str = *ccp;
2250 char *fpnum;
2251 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2252 int found_fpchar = 0;
2253 int64_t val = 0;
2254 unsigned fpword = 0;
2255 bfd_boolean hex_p = FALSE;
2256
2257 skip_past_char (&str, '#');
2258
2259 fpnum = str;
2260 skip_whitespace (fpnum);
2261
2262 if (strncmp (fpnum, "0x", 2) == 0)
2263 {
2264 /* Support the hexadecimal representation of the IEEE754 encoding.
2265 Double-precision is expected when DP_P is TRUE, otherwise the
2266 representation should be in single-precision. */
2267 if (! parse_constant_immediate (&str, &val, reg_type))
2268 goto invalid_fp;
2269
2270 if (dp_p)
2271 {
2272 if (!can_convert_double_to_float (val, &fpword))
2273 goto invalid_fp;
2274 }
2275 else if ((uint64_t) val > 0xffffffff)
2276 goto invalid_fp;
2277 else
2278 fpword = val;
2279
2280 hex_p = TRUE;
2281 }
2282 else
2283 {
2284 if (reg_name_p (str, reg_type))
2285 {
2286 set_recoverable_error (_("immediate operand required"));
2287 return FALSE;
2288 }
2289
2290 /* We must not accidentally parse an integer as a floating-point number.
2291 Make sure that the value we parse is not an integer by checking for
2292 special characters '.' or 'e'. */
2293 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2294 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2295 {
2296 found_fpchar = 1;
2297 break;
2298 }
2299
2300 if (!found_fpchar)
2301 return FALSE;
2302 }
2303
2304 if (! hex_p)
2305 {
2306 int i;
2307
2308 if ((str = atof_ieee (str, 's', words)) == NULL)
2309 goto invalid_fp;
2310
2311 /* Our FP word must be 32 bits (single-precision FP). */
2312 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2313 {
2314 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2315 fpword |= words[i];
2316 }
2317 }
2318
2319 *immed = fpword;
2320 *ccp = str;
2321 return TRUE;
2322
2323 invalid_fp:
2324 set_fatal_syntax_error (_("invalid floating-point constant"));
2325 return FALSE;
2326 }
2327
2328 /* Less-generic immediate-value read function with the possibility of loading
2329 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2330 instructions.
2331
2332 To prevent the expression parser from pushing a register name into the
2333 symbol table as an undefined symbol, a check is firstly done to find
2334 out whether STR is a register of type REG_TYPE followed by a comma or
2335 the end of line. Return FALSE if STR is such a register. */
2336
2337 static bfd_boolean
2338 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2339 {
2340 char *ptr = *str;
2341
2342 if (reg_name_p (ptr, reg_type))
2343 {
2344 set_syntax_error (_("immediate operand required"));
2345 return FALSE;
2346 }
2347
2348 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2349
2350 if (inst.reloc.exp.X_op == O_constant)
2351 *imm = inst.reloc.exp.X_add_number;
2352
2353 *str = ptr;
2354
2355 return TRUE;
2356 }
2357
2358 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2359 if NEED_LIBOPCODES is non-zero, the fixup will need
2360 assistance from the libopcodes. */
2361
2362 static inline void
2363 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2364 const aarch64_opnd_info *operand,
2365 int need_libopcodes_p)
2366 {
2367 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2368 reloc->opnd = operand->type;
2369 if (need_libopcodes_p)
2370 reloc->need_libopcodes_p = 1;
2371 };
2372
2373 /* Return TRUE if the instruction needs to be fixed up later internally by
2374 the GAS; otherwise return FALSE. */
2375
2376 static inline bfd_boolean
2377 aarch64_gas_internal_fixup_p (void)
2378 {
2379 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2380 }
2381
2382 /* Assign the immediate value to the relavant field in *OPERAND if
2383 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2384 needs an internal fixup in a later stage.
2385 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2386 IMM.VALUE that may get assigned with the constant. */
2387 static inline void
2388 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2389 aarch64_opnd_info *operand,
2390 int addr_off_p,
2391 int need_libopcodes_p,
2392 int skip_p)
2393 {
2394 if (reloc->exp.X_op == O_constant)
2395 {
2396 if (addr_off_p)
2397 operand->addr.offset.imm = reloc->exp.X_add_number;
2398 else
2399 operand->imm.value = reloc->exp.X_add_number;
2400 reloc->type = BFD_RELOC_UNUSED;
2401 }
2402 else
2403 {
2404 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2405 /* Tell libopcodes to ignore this operand or not. This is helpful
2406 when one of the operands needs to be fixed up later but we need
2407 libopcodes to check the other operands. */
2408 operand->skip = skip_p;
2409 }
2410 }
2411
2412 /* Relocation modifiers. Each entry in the table contains the textual
2413 name for the relocation which may be placed before a symbol used as
2414 a load/store offset, or add immediate. It must be surrounded by a
2415 leading and trailing colon, for example:
2416
2417 ldr x0, [x1, #:rello:varsym]
2418 add x0, x1, #:rello:varsym */
2419
2420 struct reloc_table_entry
2421 {
2422 const char *name;
2423 int pc_rel;
2424 bfd_reloc_code_real_type adr_type;
2425 bfd_reloc_code_real_type adrp_type;
2426 bfd_reloc_code_real_type movw_type;
2427 bfd_reloc_code_real_type add_type;
2428 bfd_reloc_code_real_type ldst_type;
2429 bfd_reloc_code_real_type ld_literal_type;
2430 };
2431
2432 static struct reloc_table_entry reloc_table[] = {
2433 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2434 {"lo12", 0,
2435 0, /* adr_type */
2436 0,
2437 0,
2438 BFD_RELOC_AARCH64_ADD_LO12,
2439 BFD_RELOC_AARCH64_LDST_LO12,
2440 0},
2441
2442 /* Higher 21 bits of pc-relative page offset: ADRP */
2443 {"pg_hi21", 1,
2444 0, /* adr_type */
2445 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2446 0,
2447 0,
2448 0,
2449 0},
2450
2451 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2452 {"pg_hi21_nc", 1,
2453 0, /* adr_type */
2454 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2455 0,
2456 0,
2457 0,
2458 0},
2459
2460 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2461 {"abs_g0", 0,
2462 0, /* adr_type */
2463 0,
2464 BFD_RELOC_AARCH64_MOVW_G0,
2465 0,
2466 0,
2467 0},
2468
2469 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2470 {"abs_g0_s", 0,
2471 0, /* adr_type */
2472 0,
2473 BFD_RELOC_AARCH64_MOVW_G0_S,
2474 0,
2475 0,
2476 0},
2477
2478 /* Less significant bits 0-15 of address/value: MOVK, no check */
2479 {"abs_g0_nc", 0,
2480 0, /* adr_type */
2481 0,
2482 BFD_RELOC_AARCH64_MOVW_G0_NC,
2483 0,
2484 0,
2485 0},
2486
2487 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2488 {"abs_g1", 0,
2489 0, /* adr_type */
2490 0,
2491 BFD_RELOC_AARCH64_MOVW_G1,
2492 0,
2493 0,
2494 0},
2495
2496 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2497 {"abs_g1_s", 0,
2498 0, /* adr_type */
2499 0,
2500 BFD_RELOC_AARCH64_MOVW_G1_S,
2501 0,
2502 0,
2503 0},
2504
2505 /* Less significant bits 16-31 of address/value: MOVK, no check */
2506 {"abs_g1_nc", 0,
2507 0, /* adr_type */
2508 0,
2509 BFD_RELOC_AARCH64_MOVW_G1_NC,
2510 0,
2511 0,
2512 0},
2513
2514 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2515 {"abs_g2", 0,
2516 0, /* adr_type */
2517 0,
2518 BFD_RELOC_AARCH64_MOVW_G2,
2519 0,
2520 0,
2521 0},
2522
2523 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2524 {"abs_g2_s", 0,
2525 0, /* adr_type */
2526 0,
2527 BFD_RELOC_AARCH64_MOVW_G2_S,
2528 0,
2529 0,
2530 0},
2531
2532 /* Less significant bits 32-47 of address/value: MOVK, no check */
2533 {"abs_g2_nc", 0,
2534 0, /* adr_type */
2535 0,
2536 BFD_RELOC_AARCH64_MOVW_G2_NC,
2537 0,
2538 0,
2539 0},
2540
2541 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2542 {"abs_g3", 0,
2543 0, /* adr_type */
2544 0,
2545 BFD_RELOC_AARCH64_MOVW_G3,
2546 0,
2547 0,
2548 0},
2549
2550 /* Get to the page containing GOT entry for a symbol. */
2551 {"got", 1,
2552 0, /* adr_type */
2553 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2554 0,
2555 0,
2556 0,
2557 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2558
2559 /* 12 bit offset into the page containing GOT entry for that symbol. */
2560 {"got_lo12", 0,
2561 0, /* adr_type */
2562 0,
2563 0,
2564 0,
2565 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2566 0},
2567
2568 /* 0-15 bits of address/value: MOVk, no check. */
2569 {"gotoff_g0_nc", 0,
2570 0, /* adr_type */
2571 0,
2572 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2573 0,
2574 0,
2575 0},
2576
2577 /* Most significant bits 16-31 of address/value: MOVZ. */
2578 {"gotoff_g1", 0,
2579 0, /* adr_type */
2580 0,
2581 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2582 0,
2583 0,
2584 0},
2585
2586 /* 15 bit offset into the page containing GOT entry for that symbol. */
2587 {"gotoff_lo15", 0,
2588 0, /* adr_type */
2589 0,
2590 0,
2591 0,
2592 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2593 0},
2594
2595 /* Get to the page containing GOT TLS entry for a symbol */
2596 {"gottprel_g0_nc", 0,
2597 0, /* adr_type */
2598 0,
2599 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2600 0,
2601 0,
2602 0},
2603
2604 /* Get to the page containing GOT TLS entry for a symbol */
2605 {"gottprel_g1", 0,
2606 0, /* adr_type */
2607 0,
2608 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2609 0,
2610 0,
2611 0},
2612
2613 /* Get to the page containing GOT TLS entry for a symbol */
2614 {"tlsgd", 0,
2615 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2616 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2617 0,
2618 0,
2619 0,
2620 0},
2621
2622 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2623 {"tlsgd_lo12", 0,
2624 0, /* adr_type */
2625 0,
2626 0,
2627 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2628 0,
2629 0},
2630
2631 /* Lower 16 bits address/value: MOVk. */
2632 {"tlsgd_g0_nc", 0,
2633 0, /* adr_type */
2634 0,
2635 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2636 0,
2637 0,
2638 0},
2639
2640 /* Most significant bits 16-31 of address/value: MOVZ. */
2641 {"tlsgd_g1", 0,
2642 0, /* adr_type */
2643 0,
2644 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2645 0,
2646 0,
2647 0},
2648
2649 /* Get to the page containing GOT TLS entry for a symbol */
2650 {"tlsdesc", 0,
2651 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2652 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2653 0,
2654 0,
2655 0,
2656 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2657
2658 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2659 {"tlsdesc_lo12", 0,
2660 0, /* adr_type */
2661 0,
2662 0,
2663 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2664 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2665 0},
2666
2667 /* Get to the page containing GOT TLS entry for a symbol.
2668 The same as GD, we allocate two consecutive GOT slots
2669 for module index and module offset, the only difference
2670 with GD is the module offset should be intialized to
2671 zero without any outstanding runtime relocation. */
2672 {"tlsldm", 0,
2673 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2674 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2675 0,
2676 0,
2677 0,
2678 0},
2679
2680 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2681 {"tlsldm_lo12_nc", 0,
2682 0, /* adr_type */
2683 0,
2684 0,
2685 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2686 0,
2687 0},
2688
2689 /* 12 bit offset into the module TLS base address. */
2690 {"dtprel_lo12", 0,
2691 0, /* adr_type */
2692 0,
2693 0,
2694 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2695 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2696 0},
2697
2698 /* Same as dtprel_lo12, no overflow check. */
2699 {"dtprel_lo12_nc", 0,
2700 0, /* adr_type */
2701 0,
2702 0,
2703 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2704 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2705 0},
2706
2707 /* bits[23:12] of offset to the module TLS base address. */
2708 {"dtprel_hi12", 0,
2709 0, /* adr_type */
2710 0,
2711 0,
2712 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2713 0,
2714 0},
2715
2716 /* bits[15:0] of offset to the module TLS base address. */
2717 {"dtprel_g0", 0,
2718 0, /* adr_type */
2719 0,
2720 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2721 0,
2722 0,
2723 0},
2724
2725 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2726 {"dtprel_g0_nc", 0,
2727 0, /* adr_type */
2728 0,
2729 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2730 0,
2731 0,
2732 0},
2733
2734 /* bits[31:16] of offset to the module TLS base address. */
2735 {"dtprel_g1", 0,
2736 0, /* adr_type */
2737 0,
2738 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2739 0,
2740 0,
2741 0},
2742
2743 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2744 {"dtprel_g1_nc", 0,
2745 0, /* adr_type */
2746 0,
2747 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2748 0,
2749 0,
2750 0},
2751
2752 /* bits[47:32] of offset to the module TLS base address. */
2753 {"dtprel_g2", 0,
2754 0, /* adr_type */
2755 0,
2756 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2757 0,
2758 0,
2759 0},
2760
2761 /* Lower 16 bit offset into GOT entry for a symbol */
2762 {"tlsdesc_off_g0_nc", 0,
2763 0, /* adr_type */
2764 0,
2765 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2766 0,
2767 0,
2768 0},
2769
2770 /* Higher 16 bit offset into GOT entry for a symbol */
2771 {"tlsdesc_off_g1", 0,
2772 0, /* adr_type */
2773 0,
2774 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2775 0,
2776 0,
2777 0},
2778
2779 /* Get to the page containing GOT TLS entry for a symbol */
2780 {"gottprel", 0,
2781 0, /* adr_type */
2782 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2783 0,
2784 0,
2785 0,
2786 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2787
2788 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2789 {"gottprel_lo12", 0,
2790 0, /* adr_type */
2791 0,
2792 0,
2793 0,
2794 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2795 0},
2796
2797 /* Get tp offset for a symbol. */
2798 {"tprel", 0,
2799 0, /* adr_type */
2800 0,
2801 0,
2802 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2803 0,
2804 0},
2805
2806 /* Get tp offset for a symbol. */
2807 {"tprel_lo12", 0,
2808 0, /* adr_type */
2809 0,
2810 0,
2811 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2812 0,
2813 0},
2814
2815 /* Get tp offset for a symbol. */
2816 {"tprel_hi12", 0,
2817 0, /* adr_type */
2818 0,
2819 0,
2820 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2821 0,
2822 0},
2823
2824 /* Get tp offset for a symbol. */
2825 {"tprel_lo12_nc", 0,
2826 0, /* adr_type */
2827 0,
2828 0,
2829 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2830 0,
2831 0},
2832
2833 /* Most significant bits 32-47 of address/value: MOVZ. */
2834 {"tprel_g2", 0,
2835 0, /* adr_type */
2836 0,
2837 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2838 0,
2839 0,
2840 0},
2841
2842 /* Most significant bits 16-31 of address/value: MOVZ. */
2843 {"tprel_g1", 0,
2844 0, /* adr_type */
2845 0,
2846 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2847 0,
2848 0,
2849 0},
2850
2851 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2852 {"tprel_g1_nc", 0,
2853 0, /* adr_type */
2854 0,
2855 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2856 0,
2857 0,
2858 0},
2859
2860 /* Most significant bits 0-15 of address/value: MOVZ. */
2861 {"tprel_g0", 0,
2862 0, /* adr_type */
2863 0,
2864 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2865 0,
2866 0,
2867 0},
2868
2869 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2870 {"tprel_g0_nc", 0,
2871 0, /* adr_type */
2872 0,
2873 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2874 0,
2875 0,
2876 0},
2877
2878 /* 15bit offset from got entry to base address of GOT table. */
2879 {"gotpage_lo15", 0,
2880 0,
2881 0,
2882 0,
2883 0,
2884 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2885 0},
2886
2887 /* 14bit offset from got entry to base address of GOT table. */
2888 {"gotpage_lo14", 0,
2889 0,
2890 0,
2891 0,
2892 0,
2893 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2894 0},
2895 };
2896
2897 /* Given the address of a pointer pointing to the textual name of a
2898 relocation as may appear in assembler source, attempt to find its
2899 details in reloc_table. The pointer will be updated to the character
2900 after the trailing colon. On failure, NULL will be returned;
2901 otherwise return the reloc_table_entry. */
2902
2903 static struct reloc_table_entry *
2904 find_reloc_table_entry (char **str)
2905 {
2906 unsigned int i;
2907 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2908 {
2909 int length = strlen (reloc_table[i].name);
2910
2911 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2912 && (*str)[length] == ':')
2913 {
2914 *str += (length + 1);
2915 return &reloc_table[i];
2916 }
2917 }
2918
2919 return NULL;
2920 }
2921
2922 /* Mode argument to parse_shift and parser_shifter_operand. */
2923 enum parse_shift_mode
2924 {
2925 SHIFTED_NONE, /* no shifter allowed */
2926 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2927 "#imm{,lsl #n}" */
2928 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2929 "#imm" */
2930 SHIFTED_LSL, /* bare "lsl #n" */
2931 SHIFTED_MUL, /* bare "mul #n" */
2932 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2933 SHIFTED_MUL_VL, /* "mul vl" */
2934 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2935 };
2936
2937 /* Parse a <shift> operator on an AArch64 data processing instruction.
2938 Return TRUE on success; otherwise return FALSE. */
2939 static bfd_boolean
2940 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2941 {
2942 const struct aarch64_name_value_pair *shift_op;
2943 enum aarch64_modifier_kind kind;
2944 expressionS exp;
2945 int exp_has_prefix;
2946 char *s = *str;
2947 char *p = s;
2948
2949 for (p = *str; ISALPHA (*p); p++)
2950 ;
2951
2952 if (p == *str)
2953 {
2954 set_syntax_error (_("shift expression expected"));
2955 return FALSE;
2956 }
2957
2958 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2959
2960 if (shift_op == NULL)
2961 {
2962 set_syntax_error (_("shift operator expected"));
2963 return FALSE;
2964 }
2965
2966 kind = aarch64_get_operand_modifier (shift_op);
2967
2968 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2969 {
2970 set_syntax_error (_("invalid use of 'MSL'"));
2971 return FALSE;
2972 }
2973
2974 if (kind == AARCH64_MOD_MUL
2975 && mode != SHIFTED_MUL
2976 && mode != SHIFTED_MUL_VL)
2977 {
2978 set_syntax_error (_("invalid use of 'MUL'"));
2979 return FALSE;
2980 }
2981
2982 switch (mode)
2983 {
2984 case SHIFTED_LOGIC_IMM:
2985 if (aarch64_extend_operator_p (kind) == TRUE)
2986 {
2987 set_syntax_error (_("extending shift is not permitted"));
2988 return FALSE;
2989 }
2990 break;
2991
2992 case SHIFTED_ARITH_IMM:
2993 if (kind == AARCH64_MOD_ROR)
2994 {
2995 set_syntax_error (_("'ROR' shift is not permitted"));
2996 return FALSE;
2997 }
2998 break;
2999
3000 case SHIFTED_LSL:
3001 if (kind != AARCH64_MOD_LSL)
3002 {
3003 set_syntax_error (_("only 'LSL' shift is permitted"));
3004 return FALSE;
3005 }
3006 break;
3007
3008 case SHIFTED_MUL:
3009 if (kind != AARCH64_MOD_MUL)
3010 {
3011 set_syntax_error (_("only 'MUL' is permitted"));
3012 return FALSE;
3013 }
3014 break;
3015
3016 case SHIFTED_MUL_VL:
3017 /* "MUL VL" consists of two separate tokens. Require the first
3018 token to be "MUL" and look for a following "VL". */
3019 if (kind == AARCH64_MOD_MUL)
3020 {
3021 skip_whitespace (p);
3022 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3023 {
3024 p += 2;
3025 kind = AARCH64_MOD_MUL_VL;
3026 break;
3027 }
3028 }
3029 set_syntax_error (_("only 'MUL VL' is permitted"));
3030 return FALSE;
3031
3032 case SHIFTED_REG_OFFSET:
3033 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3034 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3035 {
3036 set_fatal_syntax_error
3037 (_("invalid shift for the register offset addressing mode"));
3038 return FALSE;
3039 }
3040 break;
3041
3042 case SHIFTED_LSL_MSL:
3043 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3044 {
3045 set_syntax_error (_("invalid shift operator"));
3046 return FALSE;
3047 }
3048 break;
3049
3050 default:
3051 abort ();
3052 }
3053
3054 /* Whitespace can appear here if the next thing is a bare digit. */
3055 skip_whitespace (p);
3056
3057 /* Parse shift amount. */
3058 exp_has_prefix = 0;
3059 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3060 exp.X_op = O_absent;
3061 else
3062 {
3063 if (is_immediate_prefix (*p))
3064 {
3065 p++;
3066 exp_has_prefix = 1;
3067 }
3068 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3069 }
3070 if (kind == AARCH64_MOD_MUL_VL)
3071 /* For consistency, give MUL VL the same shift amount as an implicit
3072 MUL #1. */
3073 operand->shifter.amount = 1;
3074 else if (exp.X_op == O_absent)
3075 {
3076 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
3077 {
3078 set_syntax_error (_("missing shift amount"));
3079 return FALSE;
3080 }
3081 operand->shifter.amount = 0;
3082 }
3083 else if (exp.X_op != O_constant)
3084 {
3085 set_syntax_error (_("constant shift amount required"));
3086 return FALSE;
3087 }
3088 /* For parsing purposes, MUL #n has no inherent range. The range
3089 depends on the operand and will be checked by operand-specific
3090 routines. */
3091 else if (kind != AARCH64_MOD_MUL
3092 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3093 {
3094 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3095 return FALSE;
3096 }
3097 else
3098 {
3099 operand->shifter.amount = exp.X_add_number;
3100 operand->shifter.amount_present = 1;
3101 }
3102
3103 operand->shifter.operator_present = 1;
3104 operand->shifter.kind = kind;
3105
3106 *str = p;
3107 return TRUE;
3108 }
3109
3110 /* Parse a <shifter_operand> for a data processing instruction:
3111
3112 #<immediate>
3113 #<immediate>, LSL #imm
3114
3115 Validation of immediate operands is deferred to md_apply_fix.
3116
3117 Return TRUE on success; otherwise return FALSE. */
3118
3119 static bfd_boolean
3120 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3121 enum parse_shift_mode mode)
3122 {
3123 char *p;
3124
3125 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3126 return FALSE;
3127
3128 p = *str;
3129
3130 /* Accept an immediate expression. */
3131 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3132 return FALSE;
3133
3134 /* Accept optional LSL for arithmetic immediate values. */
3135 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3136 if (! parse_shift (&p, operand, SHIFTED_LSL))
3137 return FALSE;
3138
3139 /* Not accept any shifter for logical immediate values. */
3140 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3141 && parse_shift (&p, operand, mode))
3142 {
3143 set_syntax_error (_("unexpected shift operator"));
3144 return FALSE;
3145 }
3146
3147 *str = p;
3148 return TRUE;
3149 }
3150
3151 /* Parse a <shifter_operand> for a data processing instruction:
3152
3153 <Rm>
3154 <Rm>, <shift>
3155 #<immediate>
3156 #<immediate>, LSL #imm
3157
3158 where <shift> is handled by parse_shift above, and the last two
3159 cases are handled by the function above.
3160
3161 Validation of immediate operands is deferred to md_apply_fix.
3162
3163 Return TRUE on success; otherwise return FALSE. */
3164
3165 static bfd_boolean
3166 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3167 enum parse_shift_mode mode)
3168 {
3169 const reg_entry *reg;
3170 aarch64_opnd_qualifier_t qualifier;
3171 enum aarch64_operand_class opd_class
3172 = aarch64_get_operand_class (operand->type);
3173
3174 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3175 if (reg)
3176 {
3177 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3178 {
3179 set_syntax_error (_("unexpected register in the immediate operand"));
3180 return FALSE;
3181 }
3182
3183 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3184 {
3185 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3186 return FALSE;
3187 }
3188
3189 operand->reg.regno = reg->number;
3190 operand->qualifier = qualifier;
3191
3192 /* Accept optional shift operation on register. */
3193 if (! skip_past_comma (str))
3194 return TRUE;
3195
3196 if (! parse_shift (str, operand, mode))
3197 return FALSE;
3198
3199 return TRUE;
3200 }
3201 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3202 {
3203 set_syntax_error
3204 (_("integer register expected in the extended/shifted operand "
3205 "register"));
3206 return FALSE;
3207 }
3208
3209 /* We have a shifted immediate variable. */
3210 return parse_shifter_operand_imm (str, operand, mode);
3211 }
3212
3213 /* Return TRUE on success; return FALSE otherwise. */
3214
3215 static bfd_boolean
3216 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3217 enum parse_shift_mode mode)
3218 {
3219 char *p = *str;
3220
3221 /* Determine if we have the sequence of characters #: or just :
3222 coming next. If we do, then we check for a :rello: relocation
3223 modifier. If we don't, punt the whole lot to
3224 parse_shifter_operand. */
3225
3226 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3227 {
3228 struct reloc_table_entry *entry;
3229
3230 if (p[0] == '#')
3231 p += 2;
3232 else
3233 p++;
3234 *str = p;
3235
3236 /* Try to parse a relocation. Anything else is an error. */
3237 if (!(entry = find_reloc_table_entry (str)))
3238 {
3239 set_syntax_error (_("unknown relocation modifier"));
3240 return FALSE;
3241 }
3242
3243 if (entry->add_type == 0)
3244 {
3245 set_syntax_error
3246 (_("this relocation modifier is not allowed on this instruction"));
3247 return FALSE;
3248 }
3249
3250 /* Save str before we decompose it. */
3251 p = *str;
3252
3253 /* Next, we parse the expression. */
3254 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3255 return FALSE;
3256
3257 /* Record the relocation type (use the ADD variant here). */
3258 inst.reloc.type = entry->add_type;
3259 inst.reloc.pc_rel = entry->pc_rel;
3260
3261 /* If str is empty, we've reached the end, stop here. */
3262 if (**str == '\0')
3263 return TRUE;
3264
3265 /* Otherwise, we have a shifted reloc modifier, so rewind to
3266 recover the variable name and continue parsing for the shifter. */
3267 *str = p;
3268 return parse_shifter_operand_imm (str, operand, mode);
3269 }
3270
3271 return parse_shifter_operand (str, operand, mode);
3272 }
3273
3274 /* Parse all forms of an address expression. Information is written
3275 to *OPERAND and/or inst.reloc.
3276
3277 The A64 instruction set has the following addressing modes:
3278
3279 Offset
3280 [base] // in SIMD ld/st structure
3281 [base{,#0}] // in ld/st exclusive
3282 [base{,#imm}]
3283 [base,Xm{,LSL #imm}]
3284 [base,Xm,SXTX {#imm}]
3285 [base,Wm,(S|U)XTW {#imm}]
3286 Pre-indexed
3287 [base,#imm]!
3288 Post-indexed
3289 [base],#imm
3290 [base],Xm // in SIMD ld/st structure
3291 PC-relative (literal)
3292 label
3293 SVE:
3294 [base,#imm,MUL VL]
3295 [base,Zm.D{,LSL #imm}]
3296 [base,Zm.S,(S|U)XTW {#imm}]
3297 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3298 [Zn.S,#imm]
3299 [Zn.D,#imm]
3300 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3301 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3302 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3303
3304 (As a convenience, the notation "=immediate" is permitted in conjunction
3305 with the pc-relative literal load instructions to automatically place an
3306 immediate value or symbolic address in a nearby literal pool and generate
3307 a hidden label which references it.)
3308
3309 Upon a successful parsing, the address structure in *OPERAND will be
3310 filled in the following way:
3311
3312 .base_regno = <base>
3313 .offset.is_reg // 1 if the offset is a register
3314 .offset.imm = <imm>
3315 .offset.regno = <Rm>
3316
3317 For different addressing modes defined in the A64 ISA:
3318
3319 Offset
3320 .pcrel=0; .preind=1; .postind=0; .writeback=0
3321 Pre-indexed
3322 .pcrel=0; .preind=1; .postind=0; .writeback=1
3323 Post-indexed
3324 .pcrel=0; .preind=0; .postind=1; .writeback=1
3325 PC-relative (literal)
3326 .pcrel=1; .preind=1; .postind=0; .writeback=0
3327
3328 The shift/extension information, if any, will be stored in .shifter.
3329 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3330 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3331 corresponding register.
3332
3333 BASE_TYPE says which types of base register should be accepted and
3334 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3335 is the type of shifter that is allowed for immediate offsets,
3336 or SHIFTED_NONE if none.
3337
3338 In all other respects, it is the caller's responsibility to check
3339 for addressing modes not supported by the instruction, and to set
3340 inst.reloc.type. */
3341
3342 static bfd_boolean
3343 parse_address_main (char **str, aarch64_opnd_info *operand,
3344 aarch64_opnd_qualifier_t *base_qualifier,
3345 aarch64_opnd_qualifier_t *offset_qualifier,
3346 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3347 enum parse_shift_mode imm_shift_mode)
3348 {
3349 char *p = *str;
3350 const reg_entry *reg;
3351 expressionS *exp = &inst.reloc.exp;
3352
3353 *base_qualifier = AARCH64_OPND_QLF_NIL;
3354 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3355 if (! skip_past_char (&p, '['))
3356 {
3357 /* =immediate or label. */
3358 operand->addr.pcrel = 1;
3359 operand->addr.preind = 1;
3360
3361 /* #:<reloc_op>:<symbol> */
3362 skip_past_char (&p, '#');
3363 if (skip_past_char (&p, ':'))
3364 {
3365 bfd_reloc_code_real_type ty;
3366 struct reloc_table_entry *entry;
3367
3368 /* Try to parse a relocation modifier. Anything else is
3369 an error. */
3370 entry = find_reloc_table_entry (&p);
3371 if (! entry)
3372 {
3373 set_syntax_error (_("unknown relocation modifier"));
3374 return FALSE;
3375 }
3376
3377 switch (operand->type)
3378 {
3379 case AARCH64_OPND_ADDR_PCREL21:
3380 /* adr */
3381 ty = entry->adr_type;
3382 break;
3383
3384 default:
3385 ty = entry->ld_literal_type;
3386 break;
3387 }
3388
3389 if (ty == 0)
3390 {
3391 set_syntax_error
3392 (_("this relocation modifier is not allowed on this "
3393 "instruction"));
3394 return FALSE;
3395 }
3396
3397 /* #:<reloc_op>: */
3398 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3399 {
3400 set_syntax_error (_("invalid relocation expression"));
3401 return FALSE;
3402 }
3403
3404 /* #:<reloc_op>:<expr> */
3405 /* Record the relocation type. */
3406 inst.reloc.type = ty;
3407 inst.reloc.pc_rel = entry->pc_rel;
3408 }
3409 else
3410 {
3411
3412 if (skip_past_char (&p, '='))
3413 /* =immediate; need to generate the literal in the literal pool. */
3414 inst.gen_lit_pool = 1;
3415
3416 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3417 {
3418 set_syntax_error (_("invalid address"));
3419 return FALSE;
3420 }
3421 }
3422
3423 *str = p;
3424 return TRUE;
3425 }
3426
3427 /* [ */
3428
3429 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3430 if (!reg || !aarch64_check_reg_type (reg, base_type))
3431 {
3432 set_syntax_error (_(get_reg_expected_msg (base_type)));
3433 return FALSE;
3434 }
3435 operand->addr.base_regno = reg->number;
3436
3437 /* [Xn */
3438 if (skip_past_comma (&p))
3439 {
3440 /* [Xn, */
3441 operand->addr.preind = 1;
3442
3443 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3444 if (reg)
3445 {
3446 if (!aarch64_check_reg_type (reg, offset_type))
3447 {
3448 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3449 return FALSE;
3450 }
3451
3452 /* [Xn,Rm */
3453 operand->addr.offset.regno = reg->number;
3454 operand->addr.offset.is_reg = 1;
3455 /* Shifted index. */
3456 if (skip_past_comma (&p))
3457 {
3458 /* [Xn,Rm, */
3459 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3460 /* Use the diagnostics set in parse_shift, so not set new
3461 error message here. */
3462 return FALSE;
3463 }
3464 /* We only accept:
3465 [base,Xm{,LSL #imm}]
3466 [base,Xm,SXTX {#imm}]
3467 [base,Wm,(S|U)XTW {#imm}] */
3468 if (operand->shifter.kind == AARCH64_MOD_NONE
3469 || operand->shifter.kind == AARCH64_MOD_LSL
3470 || operand->shifter.kind == AARCH64_MOD_SXTX)
3471 {
3472 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3473 {
3474 set_syntax_error (_("invalid use of 32-bit register offset"));
3475 return FALSE;
3476 }
3477 if (aarch64_get_qualifier_esize (*base_qualifier)
3478 != aarch64_get_qualifier_esize (*offset_qualifier))
3479 {
3480 set_syntax_error (_("offset has different size from base"));
3481 return FALSE;
3482 }
3483 }
3484 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3485 {
3486 set_syntax_error (_("invalid use of 64-bit register offset"));
3487 return FALSE;
3488 }
3489 }
3490 else
3491 {
3492 /* [Xn,#:<reloc_op>:<symbol> */
3493 skip_past_char (&p, '#');
3494 if (skip_past_char (&p, ':'))
3495 {
3496 struct reloc_table_entry *entry;
3497
3498 /* Try to parse a relocation modifier. Anything else is
3499 an error. */
3500 if (!(entry = find_reloc_table_entry (&p)))
3501 {
3502 set_syntax_error (_("unknown relocation modifier"));
3503 return FALSE;
3504 }
3505
3506 if (entry->ldst_type == 0)
3507 {
3508 set_syntax_error
3509 (_("this relocation modifier is not allowed on this "
3510 "instruction"));
3511 return FALSE;
3512 }
3513
3514 /* [Xn,#:<reloc_op>: */
3515 /* We now have the group relocation table entry corresponding to
3516 the name in the assembler source. Next, we parse the
3517 expression. */
3518 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3519 {
3520 set_syntax_error (_("invalid relocation expression"));
3521 return FALSE;
3522 }
3523
3524 /* [Xn,#:<reloc_op>:<expr> */
3525 /* Record the load/store relocation type. */
3526 inst.reloc.type = entry->ldst_type;
3527 inst.reloc.pc_rel = entry->pc_rel;
3528 }
3529 else
3530 {
3531 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3532 {
3533 set_syntax_error (_("invalid expression in the address"));
3534 return FALSE;
3535 }
3536 /* [Xn,<expr> */
3537 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3538 /* [Xn,<expr>,<shifter> */
3539 if (! parse_shift (&p, operand, imm_shift_mode))
3540 return FALSE;
3541 }
3542 }
3543 }
3544
3545 if (! skip_past_char (&p, ']'))
3546 {
3547 set_syntax_error (_("']' expected"));
3548 return FALSE;
3549 }
3550
3551 if (skip_past_char (&p, '!'))
3552 {
3553 if (operand->addr.preind && operand->addr.offset.is_reg)
3554 {
3555 set_syntax_error (_("register offset not allowed in pre-indexed "
3556 "addressing mode"));
3557 return FALSE;
3558 }
3559 /* [Xn]! */
3560 operand->addr.writeback = 1;
3561 }
3562 else if (skip_past_comma (&p))
3563 {
3564 /* [Xn], */
3565 operand->addr.postind = 1;
3566 operand->addr.writeback = 1;
3567
3568 if (operand->addr.preind)
3569 {
3570 set_syntax_error (_("cannot combine pre- and post-indexing"));
3571 return FALSE;
3572 }
3573
3574 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3575 if (reg)
3576 {
3577 /* [Xn],Xm */
3578 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3579 {
3580 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3581 return FALSE;
3582 }
3583
3584 operand->addr.offset.regno = reg->number;
3585 operand->addr.offset.is_reg = 1;
3586 }
3587 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3588 {
3589 /* [Xn],#expr */
3590 set_syntax_error (_("invalid expression in the address"));
3591 return FALSE;
3592 }
3593 }
3594
3595 /* If at this point neither .preind nor .postind is set, we have a
3596 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3597 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3598 {
3599 if (operand->addr.writeback)
3600 {
3601 /* Reject [Rn]! */
3602 set_syntax_error (_("missing offset in the pre-indexed address"));
3603 return FALSE;
3604 }
3605 operand->addr.preind = 1;
3606 inst.reloc.exp.X_op = O_constant;
3607 inst.reloc.exp.X_add_number = 0;
3608 }
3609
3610 *str = p;
3611 return TRUE;
3612 }
3613
3614 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3615 on success. */
3616 static bfd_boolean
3617 parse_address (char **str, aarch64_opnd_info *operand)
3618 {
3619 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3620 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3621 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3622 }
3623
3624 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3625 The arguments have the same meaning as for parse_address_main.
3626 Return TRUE on success. */
3627 static bfd_boolean
3628 parse_sve_address (char **str, aarch64_opnd_info *operand,
3629 aarch64_opnd_qualifier_t *base_qualifier,
3630 aarch64_opnd_qualifier_t *offset_qualifier)
3631 {
3632 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3633 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3634 SHIFTED_MUL_VL);
3635 }
3636
3637 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3638 Return TRUE on success; otherwise return FALSE. */
3639 static bfd_boolean
3640 parse_half (char **str, int *internal_fixup_p)
3641 {
3642 char *p = *str;
3643
3644 skip_past_char (&p, '#');
3645
3646 gas_assert (internal_fixup_p);
3647 *internal_fixup_p = 0;
3648
3649 if (*p == ':')
3650 {
3651 struct reloc_table_entry *entry;
3652
3653 /* Try to parse a relocation. Anything else is an error. */
3654 ++p;
3655 if (!(entry = find_reloc_table_entry (&p)))
3656 {
3657 set_syntax_error (_("unknown relocation modifier"));
3658 return FALSE;
3659 }
3660
3661 if (entry->movw_type == 0)
3662 {
3663 set_syntax_error
3664 (_("this relocation modifier is not allowed on this instruction"));
3665 return FALSE;
3666 }
3667
3668 inst.reloc.type = entry->movw_type;
3669 }
3670 else
3671 *internal_fixup_p = 1;
3672
3673 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3674 return FALSE;
3675
3676 *str = p;
3677 return TRUE;
3678 }
3679
3680 /* Parse an operand for an ADRP instruction:
3681 ADRP <Xd>, <label>
3682 Return TRUE on success; otherwise return FALSE. */
3683
3684 static bfd_boolean
3685 parse_adrp (char **str)
3686 {
3687 char *p;
3688
3689 p = *str;
3690 if (*p == ':')
3691 {
3692 struct reloc_table_entry *entry;
3693
3694 /* Try to parse a relocation. Anything else is an error. */
3695 ++p;
3696 if (!(entry = find_reloc_table_entry (&p)))
3697 {
3698 set_syntax_error (_("unknown relocation modifier"));
3699 return FALSE;
3700 }
3701
3702 if (entry->adrp_type == 0)
3703 {
3704 set_syntax_error
3705 (_("this relocation modifier is not allowed on this instruction"));
3706 return FALSE;
3707 }
3708
3709 inst.reloc.type = entry->adrp_type;
3710 }
3711 else
3712 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3713
3714 inst.reloc.pc_rel = 1;
3715
3716 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3717 return FALSE;
3718
3719 *str = p;
3720 return TRUE;
3721 }
3722
3723 /* Miscellaneous. */
3724
3725 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3726 of SIZE tokens in which index I gives the token for field value I,
3727 or is null if field value I is invalid. REG_TYPE says which register
3728 names should be treated as registers rather than as symbolic immediates.
3729
3730 Return true on success, moving *STR past the operand and storing the
3731 field value in *VAL. */
3732
3733 static int
3734 parse_enum_string (char **str, int64_t *val, const char *const *array,
3735 size_t size, aarch64_reg_type reg_type)
3736 {
3737 expressionS exp;
3738 char *p, *q;
3739 size_t i;
3740
3741 /* Match C-like tokens. */
3742 p = q = *str;
3743 while (ISALNUM (*q))
3744 q++;
3745
3746 for (i = 0; i < size; ++i)
3747 if (array[i]
3748 && strncasecmp (array[i], p, q - p) == 0
3749 && array[i][q - p] == 0)
3750 {
3751 *val = i;
3752 *str = q;
3753 return TRUE;
3754 }
3755
3756 if (!parse_immediate_expression (&p, &exp, reg_type))
3757 return FALSE;
3758
3759 if (exp.X_op == O_constant
3760 && (uint64_t) exp.X_add_number < size)
3761 {
3762 *val = exp.X_add_number;
3763 *str = p;
3764 return TRUE;
3765 }
3766
3767 /* Use the default error for this operand. */
3768 return FALSE;
3769 }
3770
3771 /* Parse an option for a preload instruction. Returns the encoding for the
3772 option, or PARSE_FAIL. */
3773
3774 static int
3775 parse_pldop (char **str)
3776 {
3777 char *p, *q;
3778 const struct aarch64_name_value_pair *o;
3779
3780 p = q = *str;
3781 while (ISALNUM (*q))
3782 q++;
3783
3784 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3785 if (!o)
3786 return PARSE_FAIL;
3787
3788 *str = q;
3789 return o->value;
3790 }
3791
3792 /* Parse an option for a barrier instruction. Returns the encoding for the
3793 option, or PARSE_FAIL. */
3794
3795 static int
3796 parse_barrier (char **str)
3797 {
3798 char *p, *q;
3799 const asm_barrier_opt *o;
3800
3801 p = q = *str;
3802 while (ISALPHA (*q))
3803 q++;
3804
3805 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3806 if (!o)
3807 return PARSE_FAIL;
3808
3809 *str = q;
3810 return o->value;
3811 }
3812
3813 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3814 return 0 if successful. Otherwise return PARSE_FAIL. */
3815
3816 static int
3817 parse_barrier_psb (char **str,
3818 const struct aarch64_name_value_pair ** hint_opt)
3819 {
3820 char *p, *q;
3821 const struct aarch64_name_value_pair *o;
3822
3823 p = q = *str;
3824 while (ISALPHA (*q))
3825 q++;
3826
3827 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3828 if (!o)
3829 {
3830 set_fatal_syntax_error
3831 ( _("unknown or missing option to PSB"));
3832 return PARSE_FAIL;
3833 }
3834
3835 if (o->value != 0x11)
3836 {
3837 /* PSB only accepts option name 'CSYNC'. */
3838 set_syntax_error
3839 (_("the specified option is not accepted for PSB"));
3840 return PARSE_FAIL;
3841 }
3842
3843 *str = q;
3844 *hint_opt = o;
3845 return 0;
3846 }
3847
3848 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3849 Returns the encoding for the option, or PARSE_FAIL.
3850
3851 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3852 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3853
3854 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3855 field, otherwise as a system register.
3856 */
3857
3858 static int
3859 parse_sys_reg (char **str, struct hash_control *sys_regs,
3860 int imple_defined_p, int pstatefield_p)
3861 {
3862 char *p, *q;
3863 char buf[32];
3864 const aarch64_sys_reg *o;
3865 int value;
3866
3867 p = buf;
3868 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3869 if (p < buf + 31)
3870 *p++ = TOLOWER (*q);
3871 *p = '\0';
3872 /* Assert that BUF be large enough. */
3873 gas_assert (p - buf == q - *str);
3874
3875 o = hash_find (sys_regs, buf);
3876 if (!o)
3877 {
3878 if (!imple_defined_p)
3879 return PARSE_FAIL;
3880 else
3881 {
3882 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3883 unsigned int op0, op1, cn, cm, op2;
3884
3885 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3886 != 5)
3887 return PARSE_FAIL;
3888 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3889 return PARSE_FAIL;
3890 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3891 }
3892 }
3893 else
3894 {
3895 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3896 as_bad (_("selected processor does not support PSTATE field "
3897 "name '%s'"), buf);
3898 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3899 as_bad (_("selected processor does not support system register "
3900 "name '%s'"), buf);
3901 if (aarch64_sys_reg_deprecated_p (o))
3902 as_warn (_("system register name '%s' is deprecated and may be "
3903 "removed in a future release"), buf);
3904 value = o->value;
3905 }
3906
3907 *str = q;
3908 return value;
3909 }
3910
3911 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3912 for the option, or NULL. */
3913
3914 static const aarch64_sys_ins_reg *
3915 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3916 {
3917 char *p, *q;
3918 char buf[32];
3919 const aarch64_sys_ins_reg *o;
3920
3921 p = buf;
3922 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3923 if (p < buf + 31)
3924 *p++ = TOLOWER (*q);
3925 *p = '\0';
3926
3927 o = hash_find (sys_ins_regs, buf);
3928 if (!o)
3929 return NULL;
3930
3931 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3932 as_bad (_("selected processor does not support system register "
3933 "name '%s'"), buf);
3934
3935 *str = q;
3936 return o;
3937 }
3938 \f
3939 #define po_char_or_fail(chr) do { \
3940 if (! skip_past_char (&str, chr)) \
3941 goto failure; \
3942 } while (0)
3943
3944 #define po_reg_or_fail(regtype) do { \
3945 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3946 if (val == PARSE_FAIL) \
3947 { \
3948 set_default_error (); \
3949 goto failure; \
3950 } \
3951 } while (0)
3952
3953 #define po_int_reg_or_fail(reg_type) do { \
3954 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
3955 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
3956 { \
3957 set_default_error (); \
3958 goto failure; \
3959 } \
3960 info->reg.regno = reg->number; \
3961 info->qualifier = qualifier; \
3962 } while (0)
3963
3964 #define po_imm_nc_or_fail() do { \
3965 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3966 goto failure; \
3967 } while (0)
3968
3969 #define po_imm_or_fail(min, max) do { \
3970 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3971 goto failure; \
3972 if (val < min || val > max) \
3973 { \
3974 set_fatal_syntax_error (_("immediate value out of range "\
3975 #min " to "#max)); \
3976 goto failure; \
3977 } \
3978 } while (0)
3979
3980 #define po_enum_or_fail(array) do { \
3981 if (!parse_enum_string (&str, &val, array, \
3982 ARRAY_SIZE (array), imm_reg_type)) \
3983 goto failure; \
3984 } while (0)
3985
3986 #define po_misc_or_fail(expr) do { \
3987 if (!expr) \
3988 goto failure; \
3989 } while (0)
3990 \f
3991 /* encode the 12-bit imm field of Add/sub immediate */
3992 static inline uint32_t
3993 encode_addsub_imm (uint32_t imm)
3994 {
3995 return imm << 10;
3996 }
3997
3998 /* encode the shift amount field of Add/sub immediate */
3999 static inline uint32_t
4000 encode_addsub_imm_shift_amount (uint32_t cnt)
4001 {
4002 return cnt << 22;
4003 }
4004
4005
4006 /* encode the imm field of Adr instruction */
4007 static inline uint32_t
4008 encode_adr_imm (uint32_t imm)
4009 {
4010 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4011 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4012 }
4013
4014 /* encode the immediate field of Move wide immediate */
4015 static inline uint32_t
4016 encode_movw_imm (uint32_t imm)
4017 {
4018 return imm << 5;
4019 }
4020
4021 /* encode the 26-bit offset of unconditional branch */
4022 static inline uint32_t
4023 encode_branch_ofs_26 (uint32_t ofs)
4024 {
4025 return ofs & ((1 << 26) - 1);
4026 }
4027
4028 /* encode the 19-bit offset of conditional branch and compare & branch */
4029 static inline uint32_t
4030 encode_cond_branch_ofs_19 (uint32_t ofs)
4031 {
4032 return (ofs & ((1 << 19) - 1)) << 5;
4033 }
4034
4035 /* encode the 19-bit offset of ld literal */
4036 static inline uint32_t
4037 encode_ld_lit_ofs_19 (uint32_t ofs)
4038 {
4039 return (ofs & ((1 << 19) - 1)) << 5;
4040 }
4041
4042 /* Encode the 14-bit offset of test & branch. */
4043 static inline uint32_t
4044 encode_tst_branch_ofs_14 (uint32_t ofs)
4045 {
4046 return (ofs & ((1 << 14) - 1)) << 5;
4047 }
4048
4049 /* Encode the 16-bit imm field of svc/hvc/smc. */
4050 static inline uint32_t
4051 encode_svc_imm (uint32_t imm)
4052 {
4053 return imm << 5;
4054 }
4055
4056 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4057 static inline uint32_t
4058 reencode_addsub_switch_add_sub (uint32_t opcode)
4059 {
4060 return opcode ^ (1 << 30);
4061 }
4062
4063 static inline uint32_t
4064 reencode_movzn_to_movz (uint32_t opcode)
4065 {
4066 return opcode | (1 << 30);
4067 }
4068
4069 static inline uint32_t
4070 reencode_movzn_to_movn (uint32_t opcode)
4071 {
4072 return opcode & ~(1 << 30);
4073 }
4074
4075 /* Overall per-instruction processing. */
4076
4077 /* We need to be able to fix up arbitrary expressions in some statements.
4078 This is so that we can handle symbols that are an arbitrary distance from
4079 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4080 which returns part of an address in a form which will be valid for
4081 a data instruction. We do this by pushing the expression into a symbol
4082 in the expr_section, and creating a fix for that. */
4083
4084 static fixS *
4085 fix_new_aarch64 (fragS * frag,
4086 int where,
4087 short int size, expressionS * exp, int pc_rel, int reloc)
4088 {
4089 fixS *new_fix;
4090
4091 switch (exp->X_op)
4092 {
4093 case O_constant:
4094 case O_symbol:
4095 case O_add:
4096 case O_subtract:
4097 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4098 break;
4099
4100 default:
4101 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4102 pc_rel, reloc);
4103 break;
4104 }
4105 return new_fix;
4106 }
4107 \f
4108 /* Diagnostics on operands errors. */
4109
4110 /* By default, output verbose error message.
4111 Disable the verbose error message by -mno-verbose-error. */
4112 static int verbose_error_p = 1;
4113
4114 #ifdef DEBUG_AARCH64
4115 /* N.B. this is only for the purpose of debugging. */
4116 const char* operand_mismatch_kind_names[] =
4117 {
4118 "AARCH64_OPDE_NIL",
4119 "AARCH64_OPDE_RECOVERABLE",
4120 "AARCH64_OPDE_SYNTAX_ERROR",
4121 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4122 "AARCH64_OPDE_INVALID_VARIANT",
4123 "AARCH64_OPDE_OUT_OF_RANGE",
4124 "AARCH64_OPDE_UNALIGNED",
4125 "AARCH64_OPDE_REG_LIST",
4126 "AARCH64_OPDE_OTHER_ERROR",
4127 };
4128 #endif /* DEBUG_AARCH64 */
4129
4130 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4131
4132 When multiple errors of different kinds are found in the same assembly
4133 line, only the error of the highest severity will be picked up for
4134 issuing the diagnostics. */
4135
4136 static inline bfd_boolean
4137 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4138 enum aarch64_operand_error_kind rhs)
4139 {
4140 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4141 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4142 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4143 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4144 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4145 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4146 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4147 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4148 return lhs > rhs;
4149 }
4150
4151 /* Helper routine to get the mnemonic name from the assembly instruction
4152 line; should only be called for the diagnosis purpose, as there is
4153 string copy operation involved, which may affect the runtime
4154 performance if used in elsewhere. */
4155
4156 static const char*
4157 get_mnemonic_name (const char *str)
4158 {
4159 static char mnemonic[32];
4160 char *ptr;
4161
4162 /* Get the first 15 bytes and assume that the full name is included. */
4163 strncpy (mnemonic, str, 31);
4164 mnemonic[31] = '\0';
4165
4166 /* Scan up to the end of the mnemonic, which must end in white space,
4167 '.', or end of string. */
4168 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4169 ;
4170
4171 *ptr = '\0';
4172
4173 /* Append '...' to the truncated long name. */
4174 if (ptr - mnemonic == 31)
4175 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4176
4177 return mnemonic;
4178 }
4179
4180 static void
4181 reset_aarch64_instruction (aarch64_instruction *instruction)
4182 {
4183 memset (instruction, '\0', sizeof (aarch64_instruction));
4184 instruction->reloc.type = BFD_RELOC_UNUSED;
4185 }
4186
4187 /* Data strutures storing one user error in the assembly code related to
4188 operands. */
4189
4190 struct operand_error_record
4191 {
4192 const aarch64_opcode *opcode;
4193 aarch64_operand_error detail;
4194 struct operand_error_record *next;
4195 };
4196
4197 typedef struct operand_error_record operand_error_record;
4198
4199 struct operand_errors
4200 {
4201 operand_error_record *head;
4202 operand_error_record *tail;
4203 };
4204
4205 typedef struct operand_errors operand_errors;
4206
4207 /* Top-level data structure reporting user errors for the current line of
4208 the assembly code.
4209 The way md_assemble works is that all opcodes sharing the same mnemonic
4210 name are iterated to find a match to the assembly line. In this data
4211 structure, each of the such opcodes will have one operand_error_record
4212 allocated and inserted. In other words, excessive errors related with
4213 a single opcode are disregarded. */
4214 operand_errors operand_error_report;
4215
4216 /* Free record nodes. */
4217 static operand_error_record *free_opnd_error_record_nodes = NULL;
4218
4219 /* Initialize the data structure that stores the operand mismatch
4220 information on assembling one line of the assembly code. */
4221 static void
4222 init_operand_error_report (void)
4223 {
4224 if (operand_error_report.head != NULL)
4225 {
4226 gas_assert (operand_error_report.tail != NULL);
4227 operand_error_report.tail->next = free_opnd_error_record_nodes;
4228 free_opnd_error_record_nodes = operand_error_report.head;
4229 operand_error_report.head = NULL;
4230 operand_error_report.tail = NULL;
4231 return;
4232 }
4233 gas_assert (operand_error_report.tail == NULL);
4234 }
4235
4236 /* Return TRUE if some operand error has been recorded during the
4237 parsing of the current assembly line using the opcode *OPCODE;
4238 otherwise return FALSE. */
4239 static inline bfd_boolean
4240 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4241 {
4242 operand_error_record *record = operand_error_report.head;
4243 return record && record->opcode == opcode;
4244 }
4245
4246 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4247 OPCODE field is initialized with OPCODE.
4248 N.B. only one record for each opcode, i.e. the maximum of one error is
4249 recorded for each instruction template. */
4250
4251 static void
4252 add_operand_error_record (const operand_error_record* new_record)
4253 {
4254 const aarch64_opcode *opcode = new_record->opcode;
4255 operand_error_record* record = operand_error_report.head;
4256
4257 /* The record may have been created for this opcode. If not, we need
4258 to prepare one. */
4259 if (! opcode_has_operand_error_p (opcode))
4260 {
4261 /* Get one empty record. */
4262 if (free_opnd_error_record_nodes == NULL)
4263 {
4264 record = XNEW (operand_error_record);
4265 }
4266 else
4267 {
4268 record = free_opnd_error_record_nodes;
4269 free_opnd_error_record_nodes = record->next;
4270 }
4271 record->opcode = opcode;
4272 /* Insert at the head. */
4273 record->next = operand_error_report.head;
4274 operand_error_report.head = record;
4275 if (operand_error_report.tail == NULL)
4276 operand_error_report.tail = record;
4277 }
4278 else if (record->detail.kind != AARCH64_OPDE_NIL
4279 && record->detail.index <= new_record->detail.index
4280 && operand_error_higher_severity_p (record->detail.kind,
4281 new_record->detail.kind))
4282 {
4283 /* In the case of multiple errors found on operands related with a
4284 single opcode, only record the error of the leftmost operand and
4285 only if the error is of higher severity. */
4286 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4287 " the existing error %s on operand %d",
4288 operand_mismatch_kind_names[new_record->detail.kind],
4289 new_record->detail.index,
4290 operand_mismatch_kind_names[record->detail.kind],
4291 record->detail.index);
4292 return;
4293 }
4294
4295 record->detail = new_record->detail;
4296 }
4297
4298 static inline void
4299 record_operand_error_info (const aarch64_opcode *opcode,
4300 aarch64_operand_error *error_info)
4301 {
4302 operand_error_record record;
4303 record.opcode = opcode;
4304 record.detail = *error_info;
4305 add_operand_error_record (&record);
4306 }
4307
4308 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4309 error message *ERROR, for operand IDX (count from 0). */
4310
4311 static void
4312 record_operand_error (const aarch64_opcode *opcode, int idx,
4313 enum aarch64_operand_error_kind kind,
4314 const char* error)
4315 {
4316 aarch64_operand_error info;
4317 memset(&info, 0, sizeof (info));
4318 info.index = idx;
4319 info.kind = kind;
4320 info.error = error;
4321 record_operand_error_info (opcode, &info);
4322 }
4323
4324 static void
4325 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4326 enum aarch64_operand_error_kind kind,
4327 const char* error, const int *extra_data)
4328 {
4329 aarch64_operand_error info;
4330 info.index = idx;
4331 info.kind = kind;
4332 info.error = error;
4333 info.data[0] = extra_data[0];
4334 info.data[1] = extra_data[1];
4335 info.data[2] = extra_data[2];
4336 record_operand_error_info (opcode, &info);
4337 }
4338
4339 static void
4340 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4341 const char* error, int lower_bound,
4342 int upper_bound)
4343 {
4344 int data[3] = {lower_bound, upper_bound, 0};
4345 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4346 error, data);
4347 }
4348
4349 /* Remove the operand error record for *OPCODE. */
4350 static void ATTRIBUTE_UNUSED
4351 remove_operand_error_record (const aarch64_opcode *opcode)
4352 {
4353 if (opcode_has_operand_error_p (opcode))
4354 {
4355 operand_error_record* record = operand_error_report.head;
4356 gas_assert (record != NULL && operand_error_report.tail != NULL);
4357 operand_error_report.head = record->next;
4358 record->next = free_opnd_error_record_nodes;
4359 free_opnd_error_record_nodes = record;
4360 if (operand_error_report.head == NULL)
4361 {
4362 gas_assert (operand_error_report.tail == record);
4363 operand_error_report.tail = NULL;
4364 }
4365 }
4366 }
4367
4368 /* Given the instruction in *INSTR, return the index of the best matched
4369 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4370
4371 Return -1 if there is no qualifier sequence; return the first match
4372 if there is multiple matches found. */
4373
4374 static int
4375 find_best_match (const aarch64_inst *instr,
4376 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4377 {
4378 int i, num_opnds, max_num_matched, idx;
4379
4380 num_opnds = aarch64_num_of_operands (instr->opcode);
4381 if (num_opnds == 0)
4382 {
4383 DEBUG_TRACE ("no operand");
4384 return -1;
4385 }
4386
4387 max_num_matched = 0;
4388 idx = 0;
4389
4390 /* For each pattern. */
4391 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4392 {
4393 int j, num_matched;
4394 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4395
4396 /* Most opcodes has much fewer patterns in the list. */
4397 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4398 {
4399 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4400 break;
4401 }
4402
4403 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4404 if (*qualifiers == instr->operands[j].qualifier)
4405 ++num_matched;
4406
4407 if (num_matched > max_num_matched)
4408 {
4409 max_num_matched = num_matched;
4410 idx = i;
4411 }
4412 }
4413
4414 DEBUG_TRACE ("return with %d", idx);
4415 return idx;
4416 }
4417
4418 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4419 corresponding operands in *INSTR. */
4420
4421 static inline void
4422 assign_qualifier_sequence (aarch64_inst *instr,
4423 const aarch64_opnd_qualifier_t *qualifiers)
4424 {
4425 int i = 0;
4426 int num_opnds = aarch64_num_of_operands (instr->opcode);
4427 gas_assert (num_opnds);
4428 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4429 instr->operands[i].qualifier = *qualifiers;
4430 }
4431
4432 /* Print operands for the diagnosis purpose. */
4433
4434 static void
4435 print_operands (char *buf, const aarch64_opcode *opcode,
4436 const aarch64_opnd_info *opnds)
4437 {
4438 int i;
4439
4440 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4441 {
4442 char str[128];
4443
4444 /* We regard the opcode operand info more, however we also look into
4445 the inst->operands to support the disassembling of the optional
4446 operand.
4447 The two operand code should be the same in all cases, apart from
4448 when the operand can be optional. */
4449 if (opcode->operands[i] == AARCH64_OPND_NIL
4450 || opnds[i].type == AARCH64_OPND_NIL)
4451 break;
4452
4453 /* Generate the operand string in STR. */
4454 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4455
4456 /* Delimiter. */
4457 if (str[0] != '\0')
4458 strcat (buf, i == 0 ? " " : ",");
4459
4460 /* Append the operand string. */
4461 strcat (buf, str);
4462 }
4463 }
4464
4465 /* Send to stderr a string as information. */
4466
4467 static void
4468 output_info (const char *format, ...)
4469 {
4470 const char *file;
4471 unsigned int line;
4472 va_list args;
4473
4474 file = as_where (&line);
4475 if (file)
4476 {
4477 if (line != 0)
4478 fprintf (stderr, "%s:%u: ", file, line);
4479 else
4480 fprintf (stderr, "%s: ", file);
4481 }
4482 fprintf (stderr, _("Info: "));
4483 va_start (args, format);
4484 vfprintf (stderr, format, args);
4485 va_end (args);
4486 (void) putc ('\n', stderr);
4487 }
4488
4489 /* Output one operand error record. */
4490
4491 static void
4492 output_operand_error_record (const operand_error_record *record, char *str)
4493 {
4494 const aarch64_operand_error *detail = &record->detail;
4495 int idx = detail->index;
4496 const aarch64_opcode *opcode = record->opcode;
4497 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4498 : AARCH64_OPND_NIL);
4499
4500 switch (detail->kind)
4501 {
4502 case AARCH64_OPDE_NIL:
4503 gas_assert (0);
4504 break;
4505
4506 case AARCH64_OPDE_SYNTAX_ERROR:
4507 case AARCH64_OPDE_RECOVERABLE:
4508 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4509 case AARCH64_OPDE_OTHER_ERROR:
4510 /* Use the prepared error message if there is, otherwise use the
4511 operand description string to describe the error. */
4512 if (detail->error != NULL)
4513 {
4514 if (idx < 0)
4515 as_bad (_("%s -- `%s'"), detail->error, str);
4516 else
4517 as_bad (_("%s at operand %d -- `%s'"),
4518 detail->error, idx + 1, str);
4519 }
4520 else
4521 {
4522 gas_assert (idx >= 0);
4523 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4524 aarch64_get_operand_desc (opd_code), str);
4525 }
4526 break;
4527
4528 case AARCH64_OPDE_INVALID_VARIANT:
4529 as_bad (_("operand mismatch -- `%s'"), str);
4530 if (verbose_error_p)
4531 {
4532 /* We will try to correct the erroneous instruction and also provide
4533 more information e.g. all other valid variants.
4534
4535 The string representation of the corrected instruction and other
4536 valid variants are generated by
4537
4538 1) obtaining the intermediate representation of the erroneous
4539 instruction;
4540 2) manipulating the IR, e.g. replacing the operand qualifier;
4541 3) printing out the instruction by calling the printer functions
4542 shared with the disassembler.
4543
4544 The limitation of this method is that the exact input assembly
4545 line cannot be accurately reproduced in some cases, for example an
4546 optional operand present in the actual assembly line will be
4547 omitted in the output; likewise for the optional syntax rules,
4548 e.g. the # before the immediate. Another limitation is that the
4549 assembly symbols and relocation operations in the assembly line
4550 currently cannot be printed out in the error report. Last but not
4551 least, when there is other error(s) co-exist with this error, the
4552 'corrected' instruction may be still incorrect, e.g. given
4553 'ldnp h0,h1,[x0,#6]!'
4554 this diagnosis will provide the version:
4555 'ldnp s0,s1,[x0,#6]!'
4556 which is still not right. */
4557 size_t len = strlen (get_mnemonic_name (str));
4558 int i, qlf_idx;
4559 bfd_boolean result;
4560 char buf[2048];
4561 aarch64_inst *inst_base = &inst.base;
4562 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4563
4564 /* Init inst. */
4565 reset_aarch64_instruction (&inst);
4566 inst_base->opcode = opcode;
4567
4568 /* Reset the error report so that there is no side effect on the
4569 following operand parsing. */
4570 init_operand_error_report ();
4571
4572 /* Fill inst. */
4573 result = parse_operands (str + len, opcode)
4574 && programmer_friendly_fixup (&inst);
4575 gas_assert (result);
4576 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4577 NULL, NULL);
4578 gas_assert (!result);
4579
4580 /* Find the most matched qualifier sequence. */
4581 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4582 gas_assert (qlf_idx > -1);
4583
4584 /* Assign the qualifiers. */
4585 assign_qualifier_sequence (inst_base,
4586 opcode->qualifiers_list[qlf_idx]);
4587
4588 /* Print the hint. */
4589 output_info (_(" did you mean this?"));
4590 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4591 print_operands (buf, opcode, inst_base->operands);
4592 output_info (_(" %s"), buf);
4593
4594 /* Print out other variant(s) if there is any. */
4595 if (qlf_idx != 0 ||
4596 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4597 output_info (_(" other valid variant(s):"));
4598
4599 /* For each pattern. */
4600 qualifiers_list = opcode->qualifiers_list;
4601 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4602 {
4603 /* Most opcodes has much fewer patterns in the list.
4604 First NIL qualifier indicates the end in the list. */
4605 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4606 break;
4607
4608 if (i != qlf_idx)
4609 {
4610 /* Mnemonics name. */
4611 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4612
4613 /* Assign the qualifiers. */
4614 assign_qualifier_sequence (inst_base, *qualifiers_list);
4615
4616 /* Print instruction. */
4617 print_operands (buf, opcode, inst_base->operands);
4618
4619 output_info (_(" %s"), buf);
4620 }
4621 }
4622 }
4623 break;
4624
4625 case AARCH64_OPDE_UNTIED_OPERAND:
4626 as_bad (_("operand %d must be the same register as operand 1 -- `%s'"),
4627 detail->index + 1, str);
4628 break;
4629
4630 case AARCH64_OPDE_OUT_OF_RANGE:
4631 if (detail->data[0] != detail->data[1])
4632 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4633 detail->error ? detail->error : _("immediate value"),
4634 detail->data[0], detail->data[1], idx + 1, str);
4635 else
4636 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4637 detail->error ? detail->error : _("immediate value"),
4638 detail->data[0], idx + 1, str);
4639 break;
4640
4641 case AARCH64_OPDE_REG_LIST:
4642 if (detail->data[0] == 1)
4643 as_bad (_("invalid number of registers in the list; "
4644 "only 1 register is expected at operand %d -- `%s'"),
4645 idx + 1, str);
4646 else
4647 as_bad (_("invalid number of registers in the list; "
4648 "%d registers are expected at operand %d -- `%s'"),
4649 detail->data[0], idx + 1, str);
4650 break;
4651
4652 case AARCH64_OPDE_UNALIGNED:
4653 as_bad (_("immediate value should be a multiple of "
4654 "%d at operand %d -- `%s'"),
4655 detail->data[0], idx + 1, str);
4656 break;
4657
4658 default:
4659 gas_assert (0);
4660 break;
4661 }
4662 }
4663
4664 /* Process and output the error message about the operand mismatching.
4665
4666 When this function is called, the operand error information had
4667 been collected for an assembly line and there will be multiple
4668 errors in the case of mulitple instruction templates; output the
4669 error message that most closely describes the problem. */
4670
4671 static void
4672 output_operand_error_report (char *str)
4673 {
4674 int largest_error_pos;
4675 const char *msg = NULL;
4676 enum aarch64_operand_error_kind kind;
4677 operand_error_record *curr;
4678 operand_error_record *head = operand_error_report.head;
4679 operand_error_record *record = NULL;
4680
4681 /* No error to report. */
4682 if (head == NULL)
4683 return;
4684
4685 gas_assert (head != NULL && operand_error_report.tail != NULL);
4686
4687 /* Only one error. */
4688 if (head == operand_error_report.tail)
4689 {
4690 DEBUG_TRACE ("single opcode entry with error kind: %s",
4691 operand_mismatch_kind_names[head->detail.kind]);
4692 output_operand_error_record (head, str);
4693 return;
4694 }
4695
4696 /* Find the error kind of the highest severity. */
4697 DEBUG_TRACE ("multiple opcode entres with error kind");
4698 kind = AARCH64_OPDE_NIL;
4699 for (curr = head; curr != NULL; curr = curr->next)
4700 {
4701 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4702 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4703 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4704 kind = curr->detail.kind;
4705 }
4706 gas_assert (kind != AARCH64_OPDE_NIL);
4707
4708 /* Pick up one of errors of KIND to report. */
4709 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4710 for (curr = head; curr != NULL; curr = curr->next)
4711 {
4712 if (curr->detail.kind != kind)
4713 continue;
4714 /* If there are multiple errors, pick up the one with the highest
4715 mismatching operand index. In the case of multiple errors with
4716 the equally highest operand index, pick up the first one or the
4717 first one with non-NULL error message. */
4718 if (curr->detail.index > largest_error_pos
4719 || (curr->detail.index == largest_error_pos && msg == NULL
4720 && curr->detail.error != NULL))
4721 {
4722 largest_error_pos = curr->detail.index;
4723 record = curr;
4724 msg = record->detail.error;
4725 }
4726 }
4727
4728 gas_assert (largest_error_pos != -2 && record != NULL);
4729 DEBUG_TRACE ("Pick up error kind %s to report",
4730 operand_mismatch_kind_names[record->detail.kind]);
4731
4732 /* Output. */
4733 output_operand_error_record (record, str);
4734 }
4735 \f
4736 /* Write an AARCH64 instruction to buf - always little-endian. */
4737 static void
4738 put_aarch64_insn (char *buf, uint32_t insn)
4739 {
4740 unsigned char *where = (unsigned char *) buf;
4741 where[0] = insn;
4742 where[1] = insn >> 8;
4743 where[2] = insn >> 16;
4744 where[3] = insn >> 24;
4745 }
4746
4747 static uint32_t
4748 get_aarch64_insn (char *buf)
4749 {
4750 unsigned char *where = (unsigned char *) buf;
4751 uint32_t result;
4752 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4753 return result;
4754 }
4755
4756 static void
4757 output_inst (struct aarch64_inst *new_inst)
4758 {
4759 char *to = NULL;
4760
4761 to = frag_more (INSN_SIZE);
4762
4763 frag_now->tc_frag_data.recorded = 1;
4764
4765 put_aarch64_insn (to, inst.base.value);
4766
4767 if (inst.reloc.type != BFD_RELOC_UNUSED)
4768 {
4769 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4770 INSN_SIZE, &inst.reloc.exp,
4771 inst.reloc.pc_rel,
4772 inst.reloc.type);
4773 DEBUG_TRACE ("Prepared relocation fix up");
4774 /* Don't check the addend value against the instruction size,
4775 that's the job of our code in md_apply_fix(). */
4776 fixp->fx_no_overflow = 1;
4777 if (new_inst != NULL)
4778 fixp->tc_fix_data.inst = new_inst;
4779 if (aarch64_gas_internal_fixup_p ())
4780 {
4781 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4782 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4783 fixp->fx_addnumber = inst.reloc.flags;
4784 }
4785 }
4786
4787 dwarf2_emit_insn (INSN_SIZE);
4788 }
4789
4790 /* Link together opcodes of the same name. */
4791
4792 struct templates
4793 {
4794 aarch64_opcode *opcode;
4795 struct templates *next;
4796 };
4797
4798 typedef struct templates templates;
4799
4800 static templates *
4801 lookup_mnemonic (const char *start, int len)
4802 {
4803 templates *templ = NULL;
4804
4805 templ = hash_find_n (aarch64_ops_hsh, start, len);
4806 return templ;
4807 }
4808
4809 /* Subroutine of md_assemble, responsible for looking up the primary
4810 opcode from the mnemonic the user wrote. STR points to the
4811 beginning of the mnemonic. */
4812
4813 static templates *
4814 opcode_lookup (char **str)
4815 {
4816 char *end, *base;
4817 const aarch64_cond *cond;
4818 char condname[16];
4819 int len;
4820
4821 /* Scan up to the end of the mnemonic, which must end in white space,
4822 '.', or end of string. */
4823 for (base = end = *str; is_part_of_name(*end); end++)
4824 if (*end == '.')
4825 break;
4826
4827 if (end == base)
4828 return 0;
4829
4830 inst.cond = COND_ALWAYS;
4831
4832 /* Handle a possible condition. */
4833 if (end[0] == '.')
4834 {
4835 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4836 if (cond)
4837 {
4838 inst.cond = cond->value;
4839 *str = end + 3;
4840 }
4841 else
4842 {
4843 *str = end;
4844 return 0;
4845 }
4846 }
4847 else
4848 *str = end;
4849
4850 len = end - base;
4851
4852 if (inst.cond == COND_ALWAYS)
4853 {
4854 /* Look for unaffixed mnemonic. */
4855 return lookup_mnemonic (base, len);
4856 }
4857 else if (len <= 13)
4858 {
4859 /* append ".c" to mnemonic if conditional */
4860 memcpy (condname, base, len);
4861 memcpy (condname + len, ".c", 2);
4862 base = condname;
4863 len += 2;
4864 return lookup_mnemonic (base, len);
4865 }
4866
4867 return NULL;
4868 }
4869
4870 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4871 to a corresponding operand qualifier. */
4872
4873 static inline aarch64_opnd_qualifier_t
4874 vectype_to_qualifier (const struct vector_type_el *vectype)
4875 {
4876 /* Element size in bytes indexed by vector_el_type. */
4877 const unsigned char ele_size[5]
4878 = {1, 2, 4, 8, 16};
4879 const unsigned int ele_base [5] =
4880 {
4881 AARCH64_OPND_QLF_V_8B,
4882 AARCH64_OPND_QLF_V_2H,
4883 AARCH64_OPND_QLF_V_2S,
4884 AARCH64_OPND_QLF_V_1D,
4885 AARCH64_OPND_QLF_V_1Q
4886 };
4887
4888 if (!vectype->defined || vectype->type == NT_invtype)
4889 goto vectype_conversion_fail;
4890
4891 if (vectype->type == NT_zero)
4892 return AARCH64_OPND_QLF_P_Z;
4893 if (vectype->type == NT_merge)
4894 return AARCH64_OPND_QLF_P_M;
4895
4896 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4897
4898 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
4899 /* Vector element register. */
4900 return AARCH64_OPND_QLF_S_B + vectype->type;
4901 else
4902 {
4903 /* Vector register. */
4904 int reg_size = ele_size[vectype->type] * vectype->width;
4905 unsigned offset;
4906 unsigned shift;
4907 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4908 goto vectype_conversion_fail;
4909
4910 /* The conversion is by calculating the offset from the base operand
4911 qualifier for the vector type. The operand qualifiers are regular
4912 enough that the offset can established by shifting the vector width by
4913 a vector-type dependent amount. */
4914 shift = 0;
4915 if (vectype->type == NT_b)
4916 shift = 4;
4917 else if (vectype->type == NT_h || vectype->type == NT_s)
4918 shift = 2;
4919 else if (vectype->type >= NT_d)
4920 shift = 1;
4921 else
4922 gas_assert (0);
4923
4924 offset = ele_base [vectype->type] + (vectype->width >> shift);
4925 gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4926 && offset <= AARCH64_OPND_QLF_V_1Q);
4927 return offset;
4928 }
4929
4930 vectype_conversion_fail:
4931 first_error (_("bad vector arrangement type"));
4932 return AARCH64_OPND_QLF_NIL;
4933 }
4934
4935 /* Process an optional operand that is found omitted from the assembly line.
4936 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4937 instruction's opcode entry while IDX is the index of this omitted operand.
4938 */
4939
4940 static void
4941 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4942 int idx, aarch64_opnd_info *operand)
4943 {
4944 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4945 gas_assert (optional_operand_p (opcode, idx));
4946 gas_assert (!operand->present);
4947
4948 switch (type)
4949 {
4950 case AARCH64_OPND_Rd:
4951 case AARCH64_OPND_Rn:
4952 case AARCH64_OPND_Rm:
4953 case AARCH64_OPND_Rt:
4954 case AARCH64_OPND_Rt2:
4955 case AARCH64_OPND_Rs:
4956 case AARCH64_OPND_Ra:
4957 case AARCH64_OPND_Rt_SYS:
4958 case AARCH64_OPND_Rd_SP:
4959 case AARCH64_OPND_Rn_SP:
4960 case AARCH64_OPND_Fd:
4961 case AARCH64_OPND_Fn:
4962 case AARCH64_OPND_Fm:
4963 case AARCH64_OPND_Fa:
4964 case AARCH64_OPND_Ft:
4965 case AARCH64_OPND_Ft2:
4966 case AARCH64_OPND_Sd:
4967 case AARCH64_OPND_Sn:
4968 case AARCH64_OPND_Sm:
4969 case AARCH64_OPND_Vd:
4970 case AARCH64_OPND_Vn:
4971 case AARCH64_OPND_Vm:
4972 case AARCH64_OPND_VdD1:
4973 case AARCH64_OPND_VnD1:
4974 operand->reg.regno = default_value;
4975 break;
4976
4977 case AARCH64_OPND_Ed:
4978 case AARCH64_OPND_En:
4979 case AARCH64_OPND_Em:
4980 operand->reglane.regno = default_value;
4981 break;
4982
4983 case AARCH64_OPND_IDX:
4984 case AARCH64_OPND_BIT_NUM:
4985 case AARCH64_OPND_IMMR:
4986 case AARCH64_OPND_IMMS:
4987 case AARCH64_OPND_SHLL_IMM:
4988 case AARCH64_OPND_IMM_VLSL:
4989 case AARCH64_OPND_IMM_VLSR:
4990 case AARCH64_OPND_CCMP_IMM:
4991 case AARCH64_OPND_FBITS:
4992 case AARCH64_OPND_UIMM4:
4993 case AARCH64_OPND_UIMM3_OP1:
4994 case AARCH64_OPND_UIMM3_OP2:
4995 case AARCH64_OPND_IMM:
4996 case AARCH64_OPND_WIDTH:
4997 case AARCH64_OPND_UIMM7:
4998 case AARCH64_OPND_NZCV:
4999 case AARCH64_OPND_SVE_PATTERN:
5000 case AARCH64_OPND_SVE_PRFOP:
5001 operand->imm.value = default_value;
5002 break;
5003
5004 case AARCH64_OPND_SVE_PATTERN_SCALED:
5005 operand->imm.value = default_value;
5006 operand->shifter.kind = AARCH64_MOD_MUL;
5007 operand->shifter.amount = 1;
5008 break;
5009
5010 case AARCH64_OPND_EXCEPTION:
5011 inst.reloc.type = BFD_RELOC_UNUSED;
5012 break;
5013
5014 case AARCH64_OPND_BARRIER_ISB:
5015 operand->barrier = aarch64_barrier_options + default_value;
5016
5017 default:
5018 break;
5019 }
5020 }
5021
5022 /* Process the relocation type for move wide instructions.
5023 Return TRUE on success; otherwise return FALSE. */
5024
5025 static bfd_boolean
5026 process_movw_reloc_info (void)
5027 {
5028 int is32;
5029 unsigned shift;
5030
5031 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5032
5033 if (inst.base.opcode->op == OP_MOVK)
5034 switch (inst.reloc.type)
5035 {
5036 case BFD_RELOC_AARCH64_MOVW_G0_S:
5037 case BFD_RELOC_AARCH64_MOVW_G1_S:
5038 case BFD_RELOC_AARCH64_MOVW_G2_S:
5039 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5040 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5041 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5042 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5043 set_syntax_error
5044 (_("the specified relocation type is not allowed for MOVK"));
5045 return FALSE;
5046 default:
5047 break;
5048 }
5049
5050 switch (inst.reloc.type)
5051 {
5052 case BFD_RELOC_AARCH64_MOVW_G0:
5053 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5054 case BFD_RELOC_AARCH64_MOVW_G0_S:
5055 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5056 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5057 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5058 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5059 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5060 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5061 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5062 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5063 shift = 0;
5064 break;
5065 case BFD_RELOC_AARCH64_MOVW_G1:
5066 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5067 case BFD_RELOC_AARCH64_MOVW_G1_S:
5068 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5069 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5070 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5071 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5072 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5073 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5074 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5075 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5076 shift = 16;
5077 break;
5078 case BFD_RELOC_AARCH64_MOVW_G2:
5079 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5080 case BFD_RELOC_AARCH64_MOVW_G2_S:
5081 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5082 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5083 if (is32)
5084 {
5085 set_fatal_syntax_error
5086 (_("the specified relocation type is not allowed for 32-bit "
5087 "register"));
5088 return FALSE;
5089 }
5090 shift = 32;
5091 break;
5092 case BFD_RELOC_AARCH64_MOVW_G3:
5093 if (is32)
5094 {
5095 set_fatal_syntax_error
5096 (_("the specified relocation type is not allowed for 32-bit "
5097 "register"));
5098 return FALSE;
5099 }
5100 shift = 48;
5101 break;
5102 default:
5103 /* More cases should be added when more MOVW-related relocation types
5104 are supported in GAS. */
5105 gas_assert (aarch64_gas_internal_fixup_p ());
5106 /* The shift amount should have already been set by the parser. */
5107 return TRUE;
5108 }
5109 inst.base.operands[1].shifter.amount = shift;
5110 return TRUE;
5111 }
5112
5113 /* A primitive log caculator. */
5114
5115 static inline unsigned int
5116 get_logsz (unsigned int size)
5117 {
5118 const unsigned char ls[16] =
5119 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5120 if (size > 16)
5121 {
5122 gas_assert (0);
5123 return -1;
5124 }
5125 gas_assert (ls[size - 1] != (unsigned char)-1);
5126 return ls[size - 1];
5127 }
5128
5129 /* Determine and return the real reloc type code for an instruction
5130 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5131
5132 static inline bfd_reloc_code_real_type
5133 ldst_lo12_determine_real_reloc_type (void)
5134 {
5135 unsigned logsz;
5136 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5137 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5138
5139 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
5140 {
5141 BFD_RELOC_AARCH64_LDST8_LO12,
5142 BFD_RELOC_AARCH64_LDST16_LO12,
5143 BFD_RELOC_AARCH64_LDST32_LO12,
5144 BFD_RELOC_AARCH64_LDST64_LO12,
5145 BFD_RELOC_AARCH64_LDST128_LO12
5146 },
5147 {
5148 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5149 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5150 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5151 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5152 BFD_RELOC_AARCH64_NONE
5153 },
5154 {
5155 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5156 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5157 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5158 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5159 BFD_RELOC_AARCH64_NONE
5160 }
5161 };
5162
5163 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5164 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5165 || (inst.reloc.type
5166 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
5167 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5168
5169 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5170 opd1_qlf =
5171 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5172 1, opd0_qlf, 0);
5173 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5174
5175 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5176 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5177 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5178 gas_assert (logsz <= 3);
5179 else
5180 gas_assert (logsz <= 4);
5181
5182 /* In reloc.c, these pseudo relocation types should be defined in similar
5183 order as above reloc_ldst_lo12 array. Because the array index calcuation
5184 below relies on this. */
5185 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5186 }
5187
5188 /* Check whether a register list REGINFO is valid. The registers must be
5189 numbered in increasing order (modulo 32), in increments of one or two.
5190
5191 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5192 increments of two.
5193
5194 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5195
5196 static bfd_boolean
5197 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5198 {
5199 uint32_t i, nb_regs, prev_regno, incr;
5200
5201 nb_regs = 1 + (reginfo & 0x3);
5202 reginfo >>= 2;
5203 prev_regno = reginfo & 0x1f;
5204 incr = accept_alternate ? 2 : 1;
5205
5206 for (i = 1; i < nb_regs; ++i)
5207 {
5208 uint32_t curr_regno;
5209 reginfo >>= 5;
5210 curr_regno = reginfo & 0x1f;
5211 if (curr_regno != ((prev_regno + incr) & 0x1f))
5212 return FALSE;
5213 prev_regno = curr_regno;
5214 }
5215
5216 return TRUE;
5217 }
5218
5219 /* Generic instruction operand parser. This does no encoding and no
5220 semantic validation; it merely squirrels values away in the inst
5221 structure. Returns TRUE or FALSE depending on whether the
5222 specified grammar matched. */
5223
5224 static bfd_boolean
5225 parse_operands (char *str, const aarch64_opcode *opcode)
5226 {
5227 int i;
5228 char *backtrack_pos = 0;
5229 const enum aarch64_opnd *operands = opcode->operands;
5230 aarch64_reg_type imm_reg_type;
5231
5232 clear_error ();
5233 skip_whitespace (str);
5234
5235 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5236
5237 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5238 {
5239 int64_t val;
5240 const reg_entry *reg;
5241 int comma_skipped_p = 0;
5242 aarch64_reg_type rtype;
5243 struct vector_type_el vectype;
5244 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5245 aarch64_opnd_info *info = &inst.base.operands[i];
5246 aarch64_reg_type reg_type;
5247
5248 DEBUG_TRACE ("parse operand %d", i);
5249
5250 /* Assign the operand code. */
5251 info->type = operands[i];
5252
5253 if (optional_operand_p (opcode, i))
5254 {
5255 /* Remember where we are in case we need to backtrack. */
5256 gas_assert (!backtrack_pos);
5257 backtrack_pos = str;
5258 }
5259
5260 /* Expect comma between operands; the backtrack mechanizm will take
5261 care of cases of omitted optional operand. */
5262 if (i > 0 && ! skip_past_char (&str, ','))
5263 {
5264 set_syntax_error (_("comma expected between operands"));
5265 goto failure;
5266 }
5267 else
5268 comma_skipped_p = 1;
5269
5270 switch (operands[i])
5271 {
5272 case AARCH64_OPND_Rd:
5273 case AARCH64_OPND_Rn:
5274 case AARCH64_OPND_Rm:
5275 case AARCH64_OPND_Rt:
5276 case AARCH64_OPND_Rt2:
5277 case AARCH64_OPND_Rs:
5278 case AARCH64_OPND_Ra:
5279 case AARCH64_OPND_Rt_SYS:
5280 case AARCH64_OPND_PAIRREG:
5281 po_int_reg_or_fail (REG_TYPE_R_Z);
5282 break;
5283
5284 case AARCH64_OPND_Rd_SP:
5285 case AARCH64_OPND_Rn_SP:
5286 po_int_reg_or_fail (REG_TYPE_R_SP);
5287 break;
5288
5289 case AARCH64_OPND_Rm_EXT:
5290 case AARCH64_OPND_Rm_SFT:
5291 po_misc_or_fail (parse_shifter_operand
5292 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5293 ? SHIFTED_ARITH_IMM
5294 : SHIFTED_LOGIC_IMM)));
5295 if (!info->shifter.operator_present)
5296 {
5297 /* Default to LSL if not present. Libopcodes prefers shifter
5298 kind to be explicit. */
5299 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5300 info->shifter.kind = AARCH64_MOD_LSL;
5301 /* For Rm_EXT, libopcodes will carry out further check on whether
5302 or not stack pointer is used in the instruction (Recall that
5303 "the extend operator is not optional unless at least one of
5304 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5305 }
5306 break;
5307
5308 case AARCH64_OPND_Fd:
5309 case AARCH64_OPND_Fn:
5310 case AARCH64_OPND_Fm:
5311 case AARCH64_OPND_Fa:
5312 case AARCH64_OPND_Ft:
5313 case AARCH64_OPND_Ft2:
5314 case AARCH64_OPND_Sd:
5315 case AARCH64_OPND_Sn:
5316 case AARCH64_OPND_Sm:
5317 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5318 if (val == PARSE_FAIL)
5319 {
5320 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5321 goto failure;
5322 }
5323 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5324
5325 info->reg.regno = val;
5326 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5327 break;
5328
5329 case AARCH64_OPND_SVE_Pd:
5330 case AARCH64_OPND_SVE_Pg3:
5331 case AARCH64_OPND_SVE_Pg4_5:
5332 case AARCH64_OPND_SVE_Pg4_10:
5333 case AARCH64_OPND_SVE_Pg4_16:
5334 case AARCH64_OPND_SVE_Pm:
5335 case AARCH64_OPND_SVE_Pn:
5336 case AARCH64_OPND_SVE_Pt:
5337 reg_type = REG_TYPE_PN;
5338 goto vector_reg;
5339
5340 case AARCH64_OPND_SVE_Za_5:
5341 case AARCH64_OPND_SVE_Za_16:
5342 case AARCH64_OPND_SVE_Zd:
5343 case AARCH64_OPND_SVE_Zm_5:
5344 case AARCH64_OPND_SVE_Zm_16:
5345 case AARCH64_OPND_SVE_Zn:
5346 case AARCH64_OPND_SVE_Zt:
5347 reg_type = REG_TYPE_ZN;
5348 goto vector_reg;
5349
5350 case AARCH64_OPND_Vd:
5351 case AARCH64_OPND_Vn:
5352 case AARCH64_OPND_Vm:
5353 reg_type = REG_TYPE_VN;
5354 vector_reg:
5355 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5356 if (val == PARSE_FAIL)
5357 {
5358 first_error (_(get_reg_expected_msg (reg_type)));
5359 goto failure;
5360 }
5361 if (vectype.defined & NTA_HASINDEX)
5362 goto failure;
5363
5364 info->reg.regno = val;
5365 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5366 && vectype.type == NT_invtype)
5367 /* Unqualified Pn and Zn registers are allowed in certain
5368 contexts. Rely on F_STRICT qualifier checking to catch
5369 invalid uses. */
5370 info->qualifier = AARCH64_OPND_QLF_NIL;
5371 else
5372 {
5373 info->qualifier = vectype_to_qualifier (&vectype);
5374 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5375 goto failure;
5376 }
5377 break;
5378
5379 case AARCH64_OPND_VdD1:
5380 case AARCH64_OPND_VnD1:
5381 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5382 if (val == PARSE_FAIL)
5383 {
5384 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5385 goto failure;
5386 }
5387 if (vectype.type != NT_d || vectype.index != 1)
5388 {
5389 set_fatal_syntax_error
5390 (_("the top half of a 128-bit FP/SIMD register is expected"));
5391 goto failure;
5392 }
5393 info->reg.regno = val;
5394 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5395 here; it is correct for the purpose of encoding/decoding since
5396 only the register number is explicitly encoded in the related
5397 instructions, although this appears a bit hacky. */
5398 info->qualifier = AARCH64_OPND_QLF_S_D;
5399 break;
5400
5401 case AARCH64_OPND_SVE_Zn_INDEX:
5402 reg_type = REG_TYPE_ZN;
5403 goto vector_reg_index;
5404
5405 case AARCH64_OPND_Ed:
5406 case AARCH64_OPND_En:
5407 case AARCH64_OPND_Em:
5408 reg_type = REG_TYPE_VN;
5409 vector_reg_index:
5410 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5411 if (val == PARSE_FAIL)
5412 {
5413 first_error (_(get_reg_expected_msg (reg_type)));
5414 goto failure;
5415 }
5416 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5417 goto failure;
5418
5419 info->reglane.regno = val;
5420 info->reglane.index = vectype.index;
5421 info->qualifier = vectype_to_qualifier (&vectype);
5422 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5423 goto failure;
5424 break;
5425
5426 case AARCH64_OPND_SVE_ZnxN:
5427 case AARCH64_OPND_SVE_ZtxN:
5428 reg_type = REG_TYPE_ZN;
5429 goto vector_reg_list;
5430
5431 case AARCH64_OPND_LVn:
5432 case AARCH64_OPND_LVt:
5433 case AARCH64_OPND_LVt_AL:
5434 case AARCH64_OPND_LEt:
5435 reg_type = REG_TYPE_VN;
5436 vector_reg_list:
5437 if (reg_type == REG_TYPE_ZN
5438 && get_opcode_dependent_value (opcode) == 1
5439 && *str != '{')
5440 {
5441 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5442 if (val == PARSE_FAIL)
5443 {
5444 first_error (_(get_reg_expected_msg (reg_type)));
5445 goto failure;
5446 }
5447 info->reglist.first_regno = val;
5448 info->reglist.num_regs = 1;
5449 }
5450 else
5451 {
5452 val = parse_vector_reg_list (&str, reg_type, &vectype);
5453 if (val == PARSE_FAIL)
5454 goto failure;
5455 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5456 {
5457 set_fatal_syntax_error (_("invalid register list"));
5458 goto failure;
5459 }
5460 info->reglist.first_regno = (val >> 2) & 0x1f;
5461 info->reglist.num_regs = (val & 0x3) + 1;
5462 }
5463 if (operands[i] == AARCH64_OPND_LEt)
5464 {
5465 if (!(vectype.defined & NTA_HASINDEX))
5466 goto failure;
5467 info->reglist.has_index = 1;
5468 info->reglist.index = vectype.index;
5469 }
5470 else
5471 {
5472 if (vectype.defined & NTA_HASINDEX)
5473 goto failure;
5474 if (!(vectype.defined & NTA_HASTYPE))
5475 {
5476 if (reg_type == REG_TYPE_ZN)
5477 set_fatal_syntax_error (_("missing type suffix"));
5478 goto failure;
5479 }
5480 }
5481 info->qualifier = vectype_to_qualifier (&vectype);
5482 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5483 goto failure;
5484 break;
5485
5486 case AARCH64_OPND_Cn:
5487 case AARCH64_OPND_Cm:
5488 po_reg_or_fail (REG_TYPE_CN);
5489 if (val > 15)
5490 {
5491 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5492 goto failure;
5493 }
5494 inst.base.operands[i].reg.regno = val;
5495 break;
5496
5497 case AARCH64_OPND_SHLL_IMM:
5498 case AARCH64_OPND_IMM_VLSR:
5499 po_imm_or_fail (1, 64);
5500 info->imm.value = val;
5501 break;
5502
5503 case AARCH64_OPND_CCMP_IMM:
5504 case AARCH64_OPND_SIMM5:
5505 case AARCH64_OPND_FBITS:
5506 case AARCH64_OPND_UIMM4:
5507 case AARCH64_OPND_UIMM3_OP1:
5508 case AARCH64_OPND_UIMM3_OP2:
5509 case AARCH64_OPND_IMM_VLSL:
5510 case AARCH64_OPND_IMM:
5511 case AARCH64_OPND_WIDTH:
5512 case AARCH64_OPND_SVE_INV_LIMM:
5513 case AARCH64_OPND_SVE_LIMM:
5514 case AARCH64_OPND_SVE_LIMM_MOV:
5515 case AARCH64_OPND_SVE_SHLIMM_PRED:
5516 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5517 case AARCH64_OPND_SVE_SHRIMM_PRED:
5518 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5519 case AARCH64_OPND_SVE_SIMM5:
5520 case AARCH64_OPND_SVE_SIMM5B:
5521 case AARCH64_OPND_SVE_SIMM6:
5522 case AARCH64_OPND_SVE_SIMM8:
5523 case AARCH64_OPND_SVE_UIMM3:
5524 case AARCH64_OPND_SVE_UIMM7:
5525 case AARCH64_OPND_SVE_UIMM8:
5526 case AARCH64_OPND_SVE_UIMM8_53:
5527 po_imm_nc_or_fail ();
5528 info->imm.value = val;
5529 break;
5530
5531 case AARCH64_OPND_SVE_AIMM:
5532 case AARCH64_OPND_SVE_ASIMM:
5533 po_imm_nc_or_fail ();
5534 info->imm.value = val;
5535 skip_whitespace (str);
5536 if (skip_past_comma (&str))
5537 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5538 else
5539 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5540 break;
5541
5542 case AARCH64_OPND_SVE_PATTERN:
5543 po_enum_or_fail (aarch64_sve_pattern_array);
5544 info->imm.value = val;
5545 break;
5546
5547 case AARCH64_OPND_SVE_PATTERN_SCALED:
5548 po_enum_or_fail (aarch64_sve_pattern_array);
5549 info->imm.value = val;
5550 if (skip_past_comma (&str)
5551 && !parse_shift (&str, info, SHIFTED_MUL))
5552 goto failure;
5553 if (!info->shifter.operator_present)
5554 {
5555 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5556 info->shifter.kind = AARCH64_MOD_MUL;
5557 info->shifter.amount = 1;
5558 }
5559 break;
5560
5561 case AARCH64_OPND_SVE_PRFOP:
5562 po_enum_or_fail (aarch64_sve_prfop_array);
5563 info->imm.value = val;
5564 break;
5565
5566 case AARCH64_OPND_UIMM7:
5567 po_imm_or_fail (0, 127);
5568 info->imm.value = val;
5569 break;
5570
5571 case AARCH64_OPND_IDX:
5572 case AARCH64_OPND_BIT_NUM:
5573 case AARCH64_OPND_IMMR:
5574 case AARCH64_OPND_IMMS:
5575 po_imm_or_fail (0, 63);
5576 info->imm.value = val;
5577 break;
5578
5579 case AARCH64_OPND_IMM0:
5580 po_imm_nc_or_fail ();
5581 if (val != 0)
5582 {
5583 set_fatal_syntax_error (_("immediate zero expected"));
5584 goto failure;
5585 }
5586 info->imm.value = 0;
5587 break;
5588
5589 case AARCH64_OPND_FPIMM0:
5590 {
5591 int qfloat;
5592 bfd_boolean res1 = FALSE, res2 = FALSE;
5593 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5594 it is probably not worth the effort to support it. */
5595 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5596 imm_reg_type))
5597 && (error_p ()
5598 || !(res2 = parse_constant_immediate (&str, &val,
5599 imm_reg_type))))
5600 goto failure;
5601 if ((res1 && qfloat == 0) || (res2 && val == 0))
5602 {
5603 info->imm.value = 0;
5604 info->imm.is_fp = 1;
5605 break;
5606 }
5607 set_fatal_syntax_error (_("immediate zero expected"));
5608 goto failure;
5609 }
5610
5611 case AARCH64_OPND_IMM_MOV:
5612 {
5613 char *saved = str;
5614 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5615 reg_name_p (str, REG_TYPE_VN))
5616 goto failure;
5617 str = saved;
5618 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5619 GE_OPT_PREFIX, 1));
5620 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5621 later. fix_mov_imm_insn will try to determine a machine
5622 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5623 message if the immediate cannot be moved by a single
5624 instruction. */
5625 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5626 inst.base.operands[i].skip = 1;
5627 }
5628 break;
5629
5630 case AARCH64_OPND_SIMD_IMM:
5631 case AARCH64_OPND_SIMD_IMM_SFT:
5632 if (! parse_big_immediate (&str, &val, imm_reg_type))
5633 goto failure;
5634 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5635 /* addr_off_p */ 0,
5636 /* need_libopcodes_p */ 1,
5637 /* skip_p */ 1);
5638 /* Parse shift.
5639 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5640 shift, we don't check it here; we leave the checking to
5641 the libopcodes (operand_general_constraint_met_p). By
5642 doing this, we achieve better diagnostics. */
5643 if (skip_past_comma (&str)
5644 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5645 goto failure;
5646 if (!info->shifter.operator_present
5647 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5648 {
5649 /* Default to LSL if not present. Libopcodes prefers shifter
5650 kind to be explicit. */
5651 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5652 info->shifter.kind = AARCH64_MOD_LSL;
5653 }
5654 break;
5655
5656 case AARCH64_OPND_FPIMM:
5657 case AARCH64_OPND_SIMD_FPIMM:
5658 {
5659 int qfloat;
5660 bfd_boolean dp_p
5661 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5662 == 8);
5663 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5664 || !aarch64_imm_float_p (qfloat))
5665 {
5666 if (!error_p ())
5667 set_fatal_syntax_error (_("invalid floating-point"
5668 " constant"));
5669 goto failure;
5670 }
5671 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5672 inst.base.operands[i].imm.is_fp = 1;
5673 }
5674 break;
5675
5676 case AARCH64_OPND_LIMM:
5677 po_misc_or_fail (parse_shifter_operand (&str, info,
5678 SHIFTED_LOGIC_IMM));
5679 if (info->shifter.operator_present)
5680 {
5681 set_fatal_syntax_error
5682 (_("shift not allowed for bitmask immediate"));
5683 goto failure;
5684 }
5685 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5686 /* addr_off_p */ 0,
5687 /* need_libopcodes_p */ 1,
5688 /* skip_p */ 1);
5689 break;
5690
5691 case AARCH64_OPND_AIMM:
5692 if (opcode->op == OP_ADD)
5693 /* ADD may have relocation types. */
5694 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5695 SHIFTED_ARITH_IMM));
5696 else
5697 po_misc_or_fail (parse_shifter_operand (&str, info,
5698 SHIFTED_ARITH_IMM));
5699 switch (inst.reloc.type)
5700 {
5701 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5702 info->shifter.amount = 12;
5703 break;
5704 case BFD_RELOC_UNUSED:
5705 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5706 if (info->shifter.kind != AARCH64_MOD_NONE)
5707 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5708 inst.reloc.pc_rel = 0;
5709 break;
5710 default:
5711 break;
5712 }
5713 info->imm.value = 0;
5714 if (!info->shifter.operator_present)
5715 {
5716 /* Default to LSL if not present. Libopcodes prefers shifter
5717 kind to be explicit. */
5718 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5719 info->shifter.kind = AARCH64_MOD_LSL;
5720 }
5721 break;
5722
5723 case AARCH64_OPND_HALF:
5724 {
5725 /* #<imm16> or relocation. */
5726 int internal_fixup_p;
5727 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5728 if (internal_fixup_p)
5729 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5730 skip_whitespace (str);
5731 if (skip_past_comma (&str))
5732 {
5733 /* {, LSL #<shift>} */
5734 if (! aarch64_gas_internal_fixup_p ())
5735 {
5736 set_fatal_syntax_error (_("can't mix relocation modifier "
5737 "with explicit shift"));
5738 goto failure;
5739 }
5740 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5741 }
5742 else
5743 inst.base.operands[i].shifter.amount = 0;
5744 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5745 inst.base.operands[i].imm.value = 0;
5746 if (! process_movw_reloc_info ())
5747 goto failure;
5748 }
5749 break;
5750
5751 case AARCH64_OPND_EXCEPTION:
5752 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5753 imm_reg_type));
5754 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5755 /* addr_off_p */ 0,
5756 /* need_libopcodes_p */ 0,
5757 /* skip_p */ 1);
5758 break;
5759
5760 case AARCH64_OPND_NZCV:
5761 {
5762 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5763 if (nzcv != NULL)
5764 {
5765 str += 4;
5766 info->imm.value = nzcv->value;
5767 break;
5768 }
5769 po_imm_or_fail (0, 15);
5770 info->imm.value = val;
5771 }
5772 break;
5773
5774 case AARCH64_OPND_COND:
5775 case AARCH64_OPND_COND1:
5776 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5777 str += 2;
5778 if (info->cond == NULL)
5779 {
5780 set_syntax_error (_("invalid condition"));
5781 goto failure;
5782 }
5783 else if (operands[i] == AARCH64_OPND_COND1
5784 && (info->cond->value & 0xe) == 0xe)
5785 {
5786 /* Not allow AL or NV. */
5787 set_default_error ();
5788 goto failure;
5789 }
5790 break;
5791
5792 case AARCH64_OPND_ADDR_ADRP:
5793 po_misc_or_fail (parse_adrp (&str));
5794 /* Clear the value as operand needs to be relocated. */
5795 info->imm.value = 0;
5796 break;
5797
5798 case AARCH64_OPND_ADDR_PCREL14:
5799 case AARCH64_OPND_ADDR_PCREL19:
5800 case AARCH64_OPND_ADDR_PCREL21:
5801 case AARCH64_OPND_ADDR_PCREL26:
5802 po_misc_or_fail (parse_address (&str, info));
5803 if (!info->addr.pcrel)
5804 {
5805 set_syntax_error (_("invalid pc-relative address"));
5806 goto failure;
5807 }
5808 if (inst.gen_lit_pool
5809 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5810 {
5811 /* Only permit "=value" in the literal load instructions.
5812 The literal will be generated by programmer_friendly_fixup. */
5813 set_syntax_error (_("invalid use of \"=immediate\""));
5814 goto failure;
5815 }
5816 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5817 {
5818 set_syntax_error (_("unrecognized relocation suffix"));
5819 goto failure;
5820 }
5821 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5822 {
5823 info->imm.value = inst.reloc.exp.X_add_number;
5824 inst.reloc.type = BFD_RELOC_UNUSED;
5825 }
5826 else
5827 {
5828 info->imm.value = 0;
5829 if (inst.reloc.type == BFD_RELOC_UNUSED)
5830 switch (opcode->iclass)
5831 {
5832 case compbranch:
5833 case condbranch:
5834 /* e.g. CBZ or B.COND */
5835 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5836 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5837 break;
5838 case testbranch:
5839 /* e.g. TBZ */
5840 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5841 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5842 break;
5843 case branch_imm:
5844 /* e.g. B or BL */
5845 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5846 inst.reloc.type =
5847 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5848 : BFD_RELOC_AARCH64_JUMP26;
5849 break;
5850 case loadlit:
5851 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5852 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5853 break;
5854 case pcreladdr:
5855 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5856 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5857 break;
5858 default:
5859 gas_assert (0);
5860 abort ();
5861 }
5862 inst.reloc.pc_rel = 1;
5863 }
5864 break;
5865
5866 case AARCH64_OPND_ADDR_SIMPLE:
5867 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5868 {
5869 /* [<Xn|SP>{, #<simm>}] */
5870 char *start = str;
5871 /* First use the normal address-parsing routines, to get
5872 the usual syntax errors. */
5873 po_misc_or_fail (parse_address (&str, info));
5874 if (info->addr.pcrel || info->addr.offset.is_reg
5875 || !info->addr.preind || info->addr.postind
5876 || info->addr.writeback)
5877 {
5878 set_syntax_error (_("invalid addressing mode"));
5879 goto failure;
5880 }
5881
5882 /* Then retry, matching the specific syntax of these addresses. */
5883 str = start;
5884 po_char_or_fail ('[');
5885 po_reg_or_fail (REG_TYPE_R64_SP);
5886 /* Accept optional ", #0". */
5887 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5888 && skip_past_char (&str, ','))
5889 {
5890 skip_past_char (&str, '#');
5891 if (! skip_past_char (&str, '0'))
5892 {
5893 set_fatal_syntax_error
5894 (_("the optional immediate offset can only be 0"));
5895 goto failure;
5896 }
5897 }
5898 po_char_or_fail (']');
5899 break;
5900 }
5901
5902 case AARCH64_OPND_ADDR_REGOFF:
5903 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5904 po_misc_or_fail (parse_address (&str, info));
5905 regoff_addr:
5906 if (info->addr.pcrel || !info->addr.offset.is_reg
5907 || !info->addr.preind || info->addr.postind
5908 || info->addr.writeback)
5909 {
5910 set_syntax_error (_("invalid addressing mode"));
5911 goto failure;
5912 }
5913 if (!info->shifter.operator_present)
5914 {
5915 /* Default to LSL if not present. Libopcodes prefers shifter
5916 kind to be explicit. */
5917 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5918 info->shifter.kind = AARCH64_MOD_LSL;
5919 }
5920 /* Qualifier to be deduced by libopcodes. */
5921 break;
5922
5923 case AARCH64_OPND_ADDR_SIMM7:
5924 po_misc_or_fail (parse_address (&str, info));
5925 if (info->addr.pcrel || info->addr.offset.is_reg
5926 || (!info->addr.preind && !info->addr.postind))
5927 {
5928 set_syntax_error (_("invalid addressing mode"));
5929 goto failure;
5930 }
5931 if (inst.reloc.type != BFD_RELOC_UNUSED)
5932 {
5933 set_syntax_error (_("relocation not allowed"));
5934 goto failure;
5935 }
5936 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5937 /* addr_off_p */ 1,
5938 /* need_libopcodes_p */ 1,
5939 /* skip_p */ 0);
5940 break;
5941
5942 case AARCH64_OPND_ADDR_SIMM9:
5943 case AARCH64_OPND_ADDR_SIMM9_2:
5944 po_misc_or_fail (parse_address (&str, info));
5945 if (info->addr.pcrel || info->addr.offset.is_reg
5946 || (!info->addr.preind && !info->addr.postind)
5947 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5948 && info->addr.writeback))
5949 {
5950 set_syntax_error (_("invalid addressing mode"));
5951 goto failure;
5952 }
5953 if (inst.reloc.type != BFD_RELOC_UNUSED)
5954 {
5955 set_syntax_error (_("relocation not allowed"));
5956 goto failure;
5957 }
5958 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5959 /* addr_off_p */ 1,
5960 /* need_libopcodes_p */ 1,
5961 /* skip_p */ 0);
5962 break;
5963
5964 case AARCH64_OPND_ADDR_UIMM12:
5965 po_misc_or_fail (parse_address (&str, info));
5966 if (info->addr.pcrel || info->addr.offset.is_reg
5967 || !info->addr.preind || info->addr.writeback)
5968 {
5969 set_syntax_error (_("invalid addressing mode"));
5970 goto failure;
5971 }
5972 if (inst.reloc.type == BFD_RELOC_UNUSED)
5973 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5974 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5975 || (inst.reloc.type
5976 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
5977 || (inst.reloc.type
5978 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
5979 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5980 /* Leave qualifier to be determined by libopcodes. */
5981 break;
5982
5983 case AARCH64_OPND_SIMD_ADDR_POST:
5984 /* [<Xn|SP>], <Xm|#<amount>> */
5985 po_misc_or_fail (parse_address (&str, info));
5986 if (!info->addr.postind || !info->addr.writeback)
5987 {
5988 set_syntax_error (_("invalid addressing mode"));
5989 goto failure;
5990 }
5991 if (!info->addr.offset.is_reg)
5992 {
5993 if (inst.reloc.exp.X_op == O_constant)
5994 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5995 else
5996 {
5997 set_fatal_syntax_error
5998 (_("writeback value should be an immediate constant"));
5999 goto failure;
6000 }
6001 }
6002 /* No qualifier. */
6003 break;
6004
6005 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6006 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6007 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6008 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6009 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6010 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6011 case AARCH64_OPND_SVE_ADDR_RI_U6:
6012 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6013 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6014 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6015 /* [X<n>{, #imm, MUL VL}]
6016 [X<n>{, #imm}]
6017 but recognizing SVE registers. */
6018 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6019 &offset_qualifier));
6020 if (base_qualifier != AARCH64_OPND_QLF_X)
6021 {
6022 set_syntax_error (_("invalid addressing mode"));
6023 goto failure;
6024 }
6025 sve_regimm:
6026 if (info->addr.pcrel || info->addr.offset.is_reg
6027 || !info->addr.preind || info->addr.writeback)
6028 {
6029 set_syntax_error (_("invalid addressing mode"));
6030 goto failure;
6031 }
6032 if (inst.reloc.type != BFD_RELOC_UNUSED
6033 || inst.reloc.exp.X_op != O_constant)
6034 {
6035 /* Make sure this has priority over
6036 "invalid addressing mode". */
6037 set_fatal_syntax_error (_("constant offset required"));
6038 goto failure;
6039 }
6040 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6041 break;
6042
6043 case AARCH64_OPND_SVE_ADDR_RR:
6044 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6045 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6046 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6047 case AARCH64_OPND_SVE_ADDR_RX:
6048 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6049 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6050 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6051 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6052 but recognizing SVE registers. */
6053 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6054 &offset_qualifier));
6055 if (base_qualifier != AARCH64_OPND_QLF_X
6056 || offset_qualifier != AARCH64_OPND_QLF_X)
6057 {
6058 set_syntax_error (_("invalid addressing mode"));
6059 goto failure;
6060 }
6061 goto regoff_addr;
6062
6063 case AARCH64_OPND_SVE_ADDR_RZ:
6064 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6065 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6066 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6067 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6068 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6069 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6070 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6071 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6072 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6073 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6074 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6075 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6076 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6077 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6078 &offset_qualifier));
6079 if (base_qualifier != AARCH64_OPND_QLF_X
6080 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6081 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6082 {
6083 set_syntax_error (_("invalid addressing mode"));
6084 goto failure;
6085 }
6086 info->qualifier = offset_qualifier;
6087 goto regoff_addr;
6088
6089 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6090 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6091 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6092 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6093 /* [Z<n>.<T>{, #imm}] */
6094 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6095 &offset_qualifier));
6096 if (base_qualifier != AARCH64_OPND_QLF_S_S
6097 && base_qualifier != AARCH64_OPND_QLF_S_D)
6098 {
6099 set_syntax_error (_("invalid addressing mode"));
6100 goto failure;
6101 }
6102 info->qualifier = base_qualifier;
6103 goto sve_regimm;
6104
6105 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6106 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6107 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6108 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6109 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6110
6111 We don't reject:
6112
6113 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6114
6115 here since we get better error messages by leaving it to
6116 the qualifier checking routines. */
6117 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6118 &offset_qualifier));
6119 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6120 && base_qualifier != AARCH64_OPND_QLF_S_D)
6121 || offset_qualifier != base_qualifier)
6122 {
6123 set_syntax_error (_("invalid addressing mode"));
6124 goto failure;
6125 }
6126 info->qualifier = base_qualifier;
6127 goto regoff_addr;
6128
6129 case AARCH64_OPND_SYSREG:
6130 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
6131 == PARSE_FAIL)
6132 {
6133 set_syntax_error (_("unknown or missing system register name"));
6134 goto failure;
6135 }
6136 inst.base.operands[i].sysreg = val;
6137 break;
6138
6139 case AARCH64_OPND_PSTATEFIELD:
6140 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
6141 == PARSE_FAIL)
6142 {
6143 set_syntax_error (_("unknown or missing PSTATE field name"));
6144 goto failure;
6145 }
6146 inst.base.operands[i].pstatefield = val;
6147 break;
6148
6149 case AARCH64_OPND_SYSREG_IC:
6150 inst.base.operands[i].sysins_op =
6151 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6152 goto sys_reg_ins;
6153 case AARCH64_OPND_SYSREG_DC:
6154 inst.base.operands[i].sysins_op =
6155 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6156 goto sys_reg_ins;
6157 case AARCH64_OPND_SYSREG_AT:
6158 inst.base.operands[i].sysins_op =
6159 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6160 goto sys_reg_ins;
6161 case AARCH64_OPND_SYSREG_TLBI:
6162 inst.base.operands[i].sysins_op =
6163 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6164 sys_reg_ins:
6165 if (inst.base.operands[i].sysins_op == NULL)
6166 {
6167 set_fatal_syntax_error ( _("unknown or missing operation name"));
6168 goto failure;
6169 }
6170 break;
6171
6172 case AARCH64_OPND_BARRIER:
6173 case AARCH64_OPND_BARRIER_ISB:
6174 val = parse_barrier (&str);
6175 if (val != PARSE_FAIL
6176 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6177 {
6178 /* ISB only accepts options name 'sy'. */
6179 set_syntax_error
6180 (_("the specified option is not accepted in ISB"));
6181 /* Turn off backtrack as this optional operand is present. */
6182 backtrack_pos = 0;
6183 goto failure;
6184 }
6185 /* This is an extension to accept a 0..15 immediate. */
6186 if (val == PARSE_FAIL)
6187 po_imm_or_fail (0, 15);
6188 info->barrier = aarch64_barrier_options + val;
6189 break;
6190
6191 case AARCH64_OPND_PRFOP:
6192 val = parse_pldop (&str);
6193 /* This is an extension to accept a 0..31 immediate. */
6194 if (val == PARSE_FAIL)
6195 po_imm_or_fail (0, 31);
6196 inst.base.operands[i].prfop = aarch64_prfops + val;
6197 break;
6198
6199 case AARCH64_OPND_BARRIER_PSB:
6200 val = parse_barrier_psb (&str, &(info->hint_option));
6201 if (val == PARSE_FAIL)
6202 goto failure;
6203 break;
6204
6205 default:
6206 as_fatal (_("unhandled operand code %d"), operands[i]);
6207 }
6208
6209 /* If we get here, this operand was successfully parsed. */
6210 inst.base.operands[i].present = 1;
6211 continue;
6212
6213 failure:
6214 /* The parse routine should already have set the error, but in case
6215 not, set a default one here. */
6216 if (! error_p ())
6217 set_default_error ();
6218
6219 if (! backtrack_pos)
6220 goto parse_operands_return;
6221
6222 {
6223 /* We reach here because this operand is marked as optional, and
6224 either no operand was supplied or the operand was supplied but it
6225 was syntactically incorrect. In the latter case we report an
6226 error. In the former case we perform a few more checks before
6227 dropping through to the code to insert the default operand. */
6228
6229 char *tmp = backtrack_pos;
6230 char endchar = END_OF_INSN;
6231
6232 if (i != (aarch64_num_of_operands (opcode) - 1))
6233 endchar = ',';
6234 skip_past_char (&tmp, ',');
6235
6236 if (*tmp != endchar)
6237 /* The user has supplied an operand in the wrong format. */
6238 goto parse_operands_return;
6239
6240 /* Make sure there is not a comma before the optional operand.
6241 For example the fifth operand of 'sys' is optional:
6242
6243 sys #0,c0,c0,#0, <--- wrong
6244 sys #0,c0,c0,#0 <--- correct. */
6245 if (comma_skipped_p && i && endchar == END_OF_INSN)
6246 {
6247 set_fatal_syntax_error
6248 (_("unexpected comma before the omitted optional operand"));
6249 goto parse_operands_return;
6250 }
6251 }
6252
6253 /* Reaching here means we are dealing with an optional operand that is
6254 omitted from the assembly line. */
6255 gas_assert (optional_operand_p (opcode, i));
6256 info->present = 0;
6257 process_omitted_operand (operands[i], opcode, i, info);
6258
6259 /* Try again, skipping the optional operand at backtrack_pos. */
6260 str = backtrack_pos;
6261 backtrack_pos = 0;
6262
6263 /* Clear any error record after the omitted optional operand has been
6264 successfully handled. */
6265 clear_error ();
6266 }
6267
6268 /* Check if we have parsed all the operands. */
6269 if (*str != '\0' && ! error_p ())
6270 {
6271 /* Set I to the index of the last present operand; this is
6272 for the purpose of diagnostics. */
6273 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6274 ;
6275 set_fatal_syntax_error
6276 (_("unexpected characters following instruction"));
6277 }
6278
6279 parse_operands_return:
6280
6281 if (error_p ())
6282 {
6283 DEBUG_TRACE ("parsing FAIL: %s - %s",
6284 operand_mismatch_kind_names[get_error_kind ()],
6285 get_error_message ());
6286 /* Record the operand error properly; this is useful when there
6287 are multiple instruction templates for a mnemonic name, so that
6288 later on, we can select the error that most closely describes
6289 the problem. */
6290 record_operand_error (opcode, i, get_error_kind (),
6291 get_error_message ());
6292 return FALSE;
6293 }
6294 else
6295 {
6296 DEBUG_TRACE ("parsing SUCCESS");
6297 return TRUE;
6298 }
6299 }
6300
6301 /* It does some fix-up to provide some programmer friendly feature while
6302 keeping the libopcodes happy, i.e. libopcodes only accepts
6303 the preferred architectural syntax.
6304 Return FALSE if there is any failure; otherwise return TRUE. */
6305
6306 static bfd_boolean
6307 programmer_friendly_fixup (aarch64_instruction *instr)
6308 {
6309 aarch64_inst *base = &instr->base;
6310 const aarch64_opcode *opcode = base->opcode;
6311 enum aarch64_op op = opcode->op;
6312 aarch64_opnd_info *operands = base->operands;
6313
6314 DEBUG_TRACE ("enter");
6315
6316 switch (opcode->iclass)
6317 {
6318 case testbranch:
6319 /* TBNZ Xn|Wn, #uimm6, label
6320 Test and Branch Not Zero: conditionally jumps to label if bit number
6321 uimm6 in register Xn is not zero. The bit number implies the width of
6322 the register, which may be written and should be disassembled as Wn if
6323 uimm is less than 32. */
6324 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6325 {
6326 if (operands[1].imm.value >= 32)
6327 {
6328 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6329 0, 31);
6330 return FALSE;
6331 }
6332 operands[0].qualifier = AARCH64_OPND_QLF_X;
6333 }
6334 break;
6335 case loadlit:
6336 /* LDR Wt, label | =value
6337 As a convenience assemblers will typically permit the notation
6338 "=value" in conjunction with the pc-relative literal load instructions
6339 to automatically place an immediate value or symbolic address in a
6340 nearby literal pool and generate a hidden label which references it.
6341 ISREG has been set to 0 in the case of =value. */
6342 if (instr->gen_lit_pool
6343 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6344 {
6345 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6346 if (op == OP_LDRSW_LIT)
6347 size = 4;
6348 if (instr->reloc.exp.X_op != O_constant
6349 && instr->reloc.exp.X_op != O_big
6350 && instr->reloc.exp.X_op != O_symbol)
6351 {
6352 record_operand_error (opcode, 1,
6353 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6354 _("constant expression expected"));
6355 return FALSE;
6356 }
6357 if (! add_to_lit_pool (&instr->reloc.exp, size))
6358 {
6359 record_operand_error (opcode, 1,
6360 AARCH64_OPDE_OTHER_ERROR,
6361 _("literal pool insertion failed"));
6362 return FALSE;
6363 }
6364 }
6365 break;
6366 case log_shift:
6367 case bitfield:
6368 /* UXT[BHW] Wd, Wn
6369 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6370 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6371 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6372 A programmer-friendly assembler should accept a destination Xd in
6373 place of Wd, however that is not the preferred form for disassembly.
6374 */
6375 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6376 && operands[1].qualifier == AARCH64_OPND_QLF_W
6377 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6378 operands[0].qualifier = AARCH64_OPND_QLF_W;
6379 break;
6380
6381 case addsub_ext:
6382 {
6383 /* In the 64-bit form, the final register operand is written as Wm
6384 for all but the (possibly omitted) UXTX/LSL and SXTX
6385 operators.
6386 As a programmer-friendly assembler, we accept e.g.
6387 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6388 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6389 int idx = aarch64_operand_index (opcode->operands,
6390 AARCH64_OPND_Rm_EXT);
6391 gas_assert (idx == 1 || idx == 2);
6392 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6393 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6394 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6395 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6396 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6397 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6398 }
6399 break;
6400
6401 default:
6402 break;
6403 }
6404
6405 DEBUG_TRACE ("exit with SUCCESS");
6406 return TRUE;
6407 }
6408
6409 /* Check for loads and stores that will cause unpredictable behavior. */
6410
6411 static void
6412 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6413 {
6414 aarch64_inst *base = &instr->base;
6415 const aarch64_opcode *opcode = base->opcode;
6416 const aarch64_opnd_info *opnds = base->operands;
6417 switch (opcode->iclass)
6418 {
6419 case ldst_pos:
6420 case ldst_imm9:
6421 case ldst_unscaled:
6422 case ldst_unpriv:
6423 /* Loading/storing the base register is unpredictable if writeback. */
6424 if ((aarch64_get_operand_class (opnds[0].type)
6425 == AARCH64_OPND_CLASS_INT_REG)
6426 && opnds[0].reg.regno == opnds[1].addr.base_regno
6427 && opnds[1].addr.base_regno != REG_SP
6428 && opnds[1].addr.writeback)
6429 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6430 break;
6431 case ldstpair_off:
6432 case ldstnapair_offs:
6433 case ldstpair_indexed:
6434 /* Loading/storing the base register is unpredictable if writeback. */
6435 if ((aarch64_get_operand_class (opnds[0].type)
6436 == AARCH64_OPND_CLASS_INT_REG)
6437 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6438 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6439 && opnds[2].addr.base_regno != REG_SP
6440 && opnds[2].addr.writeback)
6441 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6442 /* Load operations must load different registers. */
6443 if ((opcode->opcode & (1 << 22))
6444 && opnds[0].reg.regno == opnds[1].reg.regno)
6445 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6446 break;
6447 default:
6448 break;
6449 }
6450 }
6451
6452 /* A wrapper function to interface with libopcodes on encoding and
6453 record the error message if there is any.
6454
6455 Return TRUE on success; otherwise return FALSE. */
6456
6457 static bfd_boolean
6458 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6459 aarch64_insn *code)
6460 {
6461 aarch64_operand_error error_info;
6462 error_info.kind = AARCH64_OPDE_NIL;
6463 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
6464 return TRUE;
6465 else
6466 {
6467 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6468 record_operand_error_info (opcode, &error_info);
6469 return FALSE;
6470 }
6471 }
6472
6473 #ifdef DEBUG_AARCH64
6474 static inline void
6475 dump_opcode_operands (const aarch64_opcode *opcode)
6476 {
6477 int i = 0;
6478 while (opcode->operands[i] != AARCH64_OPND_NIL)
6479 {
6480 aarch64_verbose ("\t\t opnd%d: %s", i,
6481 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6482 ? aarch64_get_operand_name (opcode->operands[i])
6483 : aarch64_get_operand_desc (opcode->operands[i]));
6484 ++i;
6485 }
6486 }
6487 #endif /* DEBUG_AARCH64 */
6488
6489 /* This is the guts of the machine-dependent assembler. STR points to a
6490 machine dependent instruction. This function is supposed to emit
6491 the frags/bytes it assembles to. */
6492
6493 void
6494 md_assemble (char *str)
6495 {
6496 char *p = str;
6497 templates *template;
6498 aarch64_opcode *opcode;
6499 aarch64_inst *inst_base;
6500 unsigned saved_cond;
6501
6502 /* Align the previous label if needed. */
6503 if (last_label_seen != NULL)
6504 {
6505 symbol_set_frag (last_label_seen, frag_now);
6506 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6507 S_SET_SEGMENT (last_label_seen, now_seg);
6508 }
6509
6510 inst.reloc.type = BFD_RELOC_UNUSED;
6511
6512 DEBUG_TRACE ("\n\n");
6513 DEBUG_TRACE ("==============================");
6514 DEBUG_TRACE ("Enter md_assemble with %s", str);
6515
6516 template = opcode_lookup (&p);
6517 if (!template)
6518 {
6519 /* It wasn't an instruction, but it might be a register alias of
6520 the form alias .req reg directive. */
6521 if (!create_register_alias (str, p))
6522 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6523 str);
6524 return;
6525 }
6526
6527 skip_whitespace (p);
6528 if (*p == ',')
6529 {
6530 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6531 get_mnemonic_name (str), str);
6532 return;
6533 }
6534
6535 init_operand_error_report ();
6536
6537 /* Sections are assumed to start aligned. In executable section, there is no
6538 MAP_DATA symbol pending. So we only align the address during
6539 MAP_DATA --> MAP_INSN transition.
6540 For other sections, this is not guaranteed. */
6541 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6542 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6543 frag_align_code (2, 0);
6544
6545 saved_cond = inst.cond;
6546 reset_aarch64_instruction (&inst);
6547 inst.cond = saved_cond;
6548
6549 /* Iterate through all opcode entries with the same mnemonic name. */
6550 do
6551 {
6552 opcode = template->opcode;
6553
6554 DEBUG_TRACE ("opcode %s found", opcode->name);
6555 #ifdef DEBUG_AARCH64
6556 if (debug_dump)
6557 dump_opcode_operands (opcode);
6558 #endif /* DEBUG_AARCH64 */
6559
6560 mapping_state (MAP_INSN);
6561
6562 inst_base = &inst.base;
6563 inst_base->opcode = opcode;
6564
6565 /* Truly conditionally executed instructions, e.g. b.cond. */
6566 if (opcode->flags & F_COND)
6567 {
6568 gas_assert (inst.cond != COND_ALWAYS);
6569 inst_base->cond = get_cond_from_value (inst.cond);
6570 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6571 }
6572 else if (inst.cond != COND_ALWAYS)
6573 {
6574 /* It shouldn't arrive here, where the assembly looks like a
6575 conditional instruction but the found opcode is unconditional. */
6576 gas_assert (0);
6577 continue;
6578 }
6579
6580 if (parse_operands (p, opcode)
6581 && programmer_friendly_fixup (&inst)
6582 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6583 {
6584 /* Check that this instruction is supported for this CPU. */
6585 if (!opcode->avariant
6586 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6587 {
6588 as_bad (_("selected processor does not support `%s'"), str);
6589 return;
6590 }
6591
6592 warn_unpredictable_ldst (&inst, str);
6593
6594 if (inst.reloc.type == BFD_RELOC_UNUSED
6595 || !inst.reloc.need_libopcodes_p)
6596 output_inst (NULL);
6597 else
6598 {
6599 /* If there is relocation generated for the instruction,
6600 store the instruction information for the future fix-up. */
6601 struct aarch64_inst *copy;
6602 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6603 copy = XNEW (struct aarch64_inst);
6604 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6605 output_inst (copy);
6606 }
6607 return;
6608 }
6609
6610 template = template->next;
6611 if (template != NULL)
6612 {
6613 reset_aarch64_instruction (&inst);
6614 inst.cond = saved_cond;
6615 }
6616 }
6617 while (template != NULL);
6618
6619 /* Issue the error messages if any. */
6620 output_operand_error_report (str);
6621 }
6622
6623 /* Various frobbings of labels and their addresses. */
6624
6625 void
6626 aarch64_start_line_hook (void)
6627 {
6628 last_label_seen = NULL;
6629 }
6630
6631 void
6632 aarch64_frob_label (symbolS * sym)
6633 {
6634 last_label_seen = sym;
6635
6636 dwarf2_emit_label (sym);
6637 }
6638
6639 int
6640 aarch64_data_in_code (void)
6641 {
6642 if (!strncmp (input_line_pointer + 1, "data:", 5))
6643 {
6644 *input_line_pointer = '/';
6645 input_line_pointer += 5;
6646 *input_line_pointer = 0;
6647 return 1;
6648 }
6649
6650 return 0;
6651 }
6652
6653 char *
6654 aarch64_canonicalize_symbol_name (char *name)
6655 {
6656 int len;
6657
6658 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6659 *(name + len - 5) = 0;
6660
6661 return name;
6662 }
6663 \f
6664 /* Table of all register names defined by default. The user can
6665 define additional names with .req. Note that all register names
6666 should appear in both upper and lowercase variants. Some registers
6667 also have mixed-case names. */
6668
6669 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6670 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6671 #define REGSET16(p,t) \
6672 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6673 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6674 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6675 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6676 #define REGSET31(p,t) \
6677 REGSET16(p, t), \
6678 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6679 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6680 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6681 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6682 #define REGSET(p,t) \
6683 REGSET31(p,t), REGNUM(p,31,t)
6684
6685 /* These go into aarch64_reg_hsh hash-table. */
6686 static const reg_entry reg_names[] = {
6687 /* Integer registers. */
6688 REGSET31 (x, R_64), REGSET31 (X, R_64),
6689 REGSET31 (w, R_32), REGSET31 (W, R_32),
6690
6691 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6692 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6693
6694 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6695 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6696
6697 /* Coprocessor register numbers. */
6698 REGSET (c, CN), REGSET (C, CN),
6699
6700 /* Floating-point single precision registers. */
6701 REGSET (s, FP_S), REGSET (S, FP_S),
6702
6703 /* Floating-point double precision registers. */
6704 REGSET (d, FP_D), REGSET (D, FP_D),
6705
6706 /* Floating-point half precision registers. */
6707 REGSET (h, FP_H), REGSET (H, FP_H),
6708
6709 /* Floating-point byte precision registers. */
6710 REGSET (b, FP_B), REGSET (B, FP_B),
6711
6712 /* Floating-point quad precision registers. */
6713 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6714
6715 /* FP/SIMD registers. */
6716 REGSET (v, VN), REGSET (V, VN),
6717
6718 /* SVE vector registers. */
6719 REGSET (z, ZN), REGSET (Z, ZN),
6720
6721 /* SVE predicate registers. */
6722 REGSET16 (p, PN), REGSET16 (P, PN)
6723 };
6724
6725 #undef REGDEF
6726 #undef REGNUM
6727 #undef REGSET16
6728 #undef REGSET31
6729 #undef REGSET
6730
6731 #define N 1
6732 #define n 0
6733 #define Z 1
6734 #define z 0
6735 #define C 1
6736 #define c 0
6737 #define V 1
6738 #define v 0
6739 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6740 static const asm_nzcv nzcv_names[] = {
6741 {"nzcv", B (n, z, c, v)},
6742 {"nzcV", B (n, z, c, V)},
6743 {"nzCv", B (n, z, C, v)},
6744 {"nzCV", B (n, z, C, V)},
6745 {"nZcv", B (n, Z, c, v)},
6746 {"nZcV", B (n, Z, c, V)},
6747 {"nZCv", B (n, Z, C, v)},
6748 {"nZCV", B (n, Z, C, V)},
6749 {"Nzcv", B (N, z, c, v)},
6750 {"NzcV", B (N, z, c, V)},
6751 {"NzCv", B (N, z, C, v)},
6752 {"NzCV", B (N, z, C, V)},
6753 {"NZcv", B (N, Z, c, v)},
6754 {"NZcV", B (N, Z, c, V)},
6755 {"NZCv", B (N, Z, C, v)},
6756 {"NZCV", B (N, Z, C, V)}
6757 };
6758
6759 #undef N
6760 #undef n
6761 #undef Z
6762 #undef z
6763 #undef C
6764 #undef c
6765 #undef V
6766 #undef v
6767 #undef B
6768 \f
6769 /* MD interface: bits in the object file. */
6770
6771 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6772 for use in the a.out file, and stores them in the array pointed to by buf.
6773 This knows about the endian-ness of the target machine and does
6774 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6775 2 (short) and 4 (long) Floating numbers are put out as a series of
6776 LITTLENUMS (shorts, here at least). */
6777
6778 void
6779 md_number_to_chars (char *buf, valueT val, int n)
6780 {
6781 if (target_big_endian)
6782 number_to_chars_bigendian (buf, val, n);
6783 else
6784 number_to_chars_littleendian (buf, val, n);
6785 }
6786
6787 /* MD interface: Sections. */
6788
6789 /* Estimate the size of a frag before relaxing. Assume everything fits in
6790 4 bytes. */
6791
6792 int
6793 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6794 {
6795 fragp->fr_var = 4;
6796 return 4;
6797 }
6798
6799 /* Round up a section size to the appropriate boundary. */
6800
6801 valueT
6802 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6803 {
6804 return size;
6805 }
6806
6807 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6808 of an rs_align_code fragment.
6809
6810 Here we fill the frag with the appropriate info for padding the
6811 output stream. The resulting frag will consist of a fixed (fr_fix)
6812 and of a repeating (fr_var) part.
6813
6814 The fixed content is always emitted before the repeating content and
6815 these two parts are used as follows in constructing the output:
6816 - the fixed part will be used to align to a valid instruction word
6817 boundary, in case that we start at a misaligned address; as no
6818 executable instruction can live at the misaligned location, we
6819 simply fill with zeros;
6820 - the variable part will be used to cover the remaining padding and
6821 we fill using the AArch64 NOP instruction.
6822
6823 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6824 enough storage space for up to 3 bytes for padding the back to a valid
6825 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6826
6827 void
6828 aarch64_handle_align (fragS * fragP)
6829 {
6830 /* NOP = d503201f */
6831 /* AArch64 instructions are always little-endian. */
6832 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6833
6834 int bytes, fix, noop_size;
6835 char *p;
6836
6837 if (fragP->fr_type != rs_align_code)
6838 return;
6839
6840 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6841 p = fragP->fr_literal + fragP->fr_fix;
6842
6843 #ifdef OBJ_ELF
6844 gas_assert (fragP->tc_frag_data.recorded);
6845 #endif
6846
6847 noop_size = sizeof (aarch64_noop);
6848
6849 fix = bytes & (noop_size - 1);
6850 if (fix)
6851 {
6852 #ifdef OBJ_ELF
6853 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6854 #endif
6855 memset (p, 0, fix);
6856 p += fix;
6857 fragP->fr_fix += fix;
6858 }
6859
6860 if (noop_size)
6861 memcpy (p, aarch64_noop, noop_size);
6862 fragP->fr_var = noop_size;
6863 }
6864
6865 /* Perform target specific initialisation of a frag.
6866 Note - despite the name this initialisation is not done when the frag
6867 is created, but only when its type is assigned. A frag can be created
6868 and used a long time before its type is set, so beware of assuming that
6869 this initialisationis performed first. */
6870
6871 #ifndef OBJ_ELF
6872 void
6873 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6874 int max_chars ATTRIBUTE_UNUSED)
6875 {
6876 }
6877
6878 #else /* OBJ_ELF is defined. */
6879 void
6880 aarch64_init_frag (fragS * fragP, int max_chars)
6881 {
6882 /* Record a mapping symbol for alignment frags. We will delete this
6883 later if the alignment ends up empty. */
6884 if (!fragP->tc_frag_data.recorded)
6885 fragP->tc_frag_data.recorded = 1;
6886
6887 switch (fragP->fr_type)
6888 {
6889 case rs_align_test:
6890 case rs_fill:
6891 mapping_state_2 (MAP_DATA, max_chars);
6892 break;
6893 case rs_align:
6894 /* PR 20364: We can get alignment frags in code sections,
6895 so do not just assume that we should use the MAP_DATA state. */
6896 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
6897 break;
6898 case rs_align_code:
6899 mapping_state_2 (MAP_INSN, max_chars);
6900 break;
6901 default:
6902 break;
6903 }
6904 }
6905 \f
6906 /* Initialize the DWARF-2 unwind information for this procedure. */
6907
6908 void
6909 tc_aarch64_frame_initial_instructions (void)
6910 {
6911 cfi_add_CFA_def_cfa (REG_SP, 0);
6912 }
6913 #endif /* OBJ_ELF */
6914
6915 /* Convert REGNAME to a DWARF-2 register number. */
6916
6917 int
6918 tc_aarch64_regname_to_dw2regnum (char *regname)
6919 {
6920 const reg_entry *reg = parse_reg (&regname);
6921 if (reg == NULL)
6922 return -1;
6923
6924 switch (reg->type)
6925 {
6926 case REG_TYPE_SP_32:
6927 case REG_TYPE_SP_64:
6928 case REG_TYPE_R_32:
6929 case REG_TYPE_R_64:
6930 return reg->number;
6931
6932 case REG_TYPE_FP_B:
6933 case REG_TYPE_FP_H:
6934 case REG_TYPE_FP_S:
6935 case REG_TYPE_FP_D:
6936 case REG_TYPE_FP_Q:
6937 return reg->number + 64;
6938
6939 default:
6940 break;
6941 }
6942 return -1;
6943 }
6944
6945 /* Implement DWARF2_ADDR_SIZE. */
6946
6947 int
6948 aarch64_dwarf2_addr_size (void)
6949 {
6950 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6951 if (ilp32_p)
6952 return 4;
6953 #endif
6954 return bfd_arch_bits_per_address (stdoutput) / 8;
6955 }
6956
6957 /* MD interface: Symbol and relocation handling. */
6958
6959 /* Return the address within the segment that a PC-relative fixup is
6960 relative to. For AArch64 PC-relative fixups applied to instructions
6961 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6962
6963 long
6964 md_pcrel_from_section (fixS * fixP, segT seg)
6965 {
6966 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6967
6968 /* If this is pc-relative and we are going to emit a relocation
6969 then we just want to put out any pipeline compensation that the linker
6970 will need. Otherwise we want to use the calculated base. */
6971 if (fixP->fx_pcrel
6972 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6973 || aarch64_force_relocation (fixP)))
6974 base = 0;
6975
6976 /* AArch64 should be consistent for all pc-relative relocations. */
6977 return base + AARCH64_PCREL_OFFSET;
6978 }
6979
6980 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6981 Otherwise we have no need to default values of symbols. */
6982
6983 symbolS *
6984 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6985 {
6986 #ifdef OBJ_ELF
6987 if (name[0] == '_' && name[1] == 'G'
6988 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6989 {
6990 if (!GOT_symbol)
6991 {
6992 if (symbol_find (name))
6993 as_bad (_("GOT already in the symbol table"));
6994
6995 GOT_symbol = symbol_new (name, undefined_section,
6996 (valueT) 0, &zero_address_frag);
6997 }
6998
6999 return GOT_symbol;
7000 }
7001 #endif
7002
7003 return 0;
7004 }
7005
7006 /* Return non-zero if the indicated VALUE has overflowed the maximum
7007 range expressible by a unsigned number with the indicated number of
7008 BITS. */
7009
7010 static bfd_boolean
7011 unsigned_overflow (valueT value, unsigned bits)
7012 {
7013 valueT lim;
7014 if (bits >= sizeof (valueT) * 8)
7015 return FALSE;
7016 lim = (valueT) 1 << bits;
7017 return (value >= lim);
7018 }
7019
7020
7021 /* Return non-zero if the indicated VALUE has overflowed the maximum
7022 range expressible by an signed number with the indicated number of
7023 BITS. */
7024
7025 static bfd_boolean
7026 signed_overflow (offsetT value, unsigned bits)
7027 {
7028 offsetT lim;
7029 if (bits >= sizeof (offsetT) * 8)
7030 return FALSE;
7031 lim = (offsetT) 1 << (bits - 1);
7032 return (value < -lim || value >= lim);
7033 }
7034
7035 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7036 unsigned immediate offset load/store instruction, try to encode it as
7037 an unscaled, 9-bit, signed immediate offset load/store instruction.
7038 Return TRUE if it is successful; otherwise return FALSE.
7039
7040 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7041 in response to the standard LDR/STR mnemonics when the immediate offset is
7042 unambiguous, i.e. when it is negative or unaligned. */
7043
7044 static bfd_boolean
7045 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7046 {
7047 int idx;
7048 enum aarch64_op new_op;
7049 const aarch64_opcode *new_opcode;
7050
7051 gas_assert (instr->opcode->iclass == ldst_pos);
7052
7053 switch (instr->opcode->op)
7054 {
7055 case OP_LDRB_POS:new_op = OP_LDURB; break;
7056 case OP_STRB_POS: new_op = OP_STURB; break;
7057 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7058 case OP_LDRH_POS: new_op = OP_LDURH; break;
7059 case OP_STRH_POS: new_op = OP_STURH; break;
7060 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7061 case OP_LDR_POS: new_op = OP_LDUR; break;
7062 case OP_STR_POS: new_op = OP_STUR; break;
7063 case OP_LDRF_POS: new_op = OP_LDURV; break;
7064 case OP_STRF_POS: new_op = OP_STURV; break;
7065 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7066 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7067 default: new_op = OP_NIL; break;
7068 }
7069
7070 if (new_op == OP_NIL)
7071 return FALSE;
7072
7073 new_opcode = aarch64_get_opcode (new_op);
7074 gas_assert (new_opcode != NULL);
7075
7076 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7077 instr->opcode->op, new_opcode->op);
7078
7079 aarch64_replace_opcode (instr, new_opcode);
7080
7081 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7082 qualifier matching may fail because the out-of-date qualifier will
7083 prevent the operand being updated with a new and correct qualifier. */
7084 idx = aarch64_operand_index (instr->opcode->operands,
7085 AARCH64_OPND_ADDR_SIMM9);
7086 gas_assert (idx == 1);
7087 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7088
7089 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7090
7091 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
7092 return FALSE;
7093
7094 return TRUE;
7095 }
7096
7097 /* Called by fix_insn to fix a MOV immediate alias instruction.
7098
7099 Operand for a generic move immediate instruction, which is an alias
7100 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7101 a 32-bit/64-bit immediate value into general register. An assembler error
7102 shall result if the immediate cannot be created by a single one of these
7103 instructions. If there is a choice, then to ensure reversability an
7104 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7105
7106 static void
7107 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7108 {
7109 const aarch64_opcode *opcode;
7110
7111 /* Need to check if the destination is SP/ZR. The check has to be done
7112 before any aarch64_replace_opcode. */
7113 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7114 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7115
7116 instr->operands[1].imm.value = value;
7117 instr->operands[1].skip = 0;
7118
7119 if (try_mov_wide_p)
7120 {
7121 /* Try the MOVZ alias. */
7122 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7123 aarch64_replace_opcode (instr, opcode);
7124 if (aarch64_opcode_encode (instr->opcode, instr,
7125 &instr->value, NULL, NULL))
7126 {
7127 put_aarch64_insn (buf, instr->value);
7128 return;
7129 }
7130 /* Try the MOVK alias. */
7131 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7132 aarch64_replace_opcode (instr, opcode);
7133 if (aarch64_opcode_encode (instr->opcode, instr,
7134 &instr->value, NULL, NULL))
7135 {
7136 put_aarch64_insn (buf, instr->value);
7137 return;
7138 }
7139 }
7140
7141 if (try_mov_bitmask_p)
7142 {
7143 /* Try the ORR alias. */
7144 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7145 aarch64_replace_opcode (instr, opcode);
7146 if (aarch64_opcode_encode (instr->opcode, instr,
7147 &instr->value, NULL, NULL))
7148 {
7149 put_aarch64_insn (buf, instr->value);
7150 return;
7151 }
7152 }
7153
7154 as_bad_where (fixP->fx_file, fixP->fx_line,
7155 _("immediate cannot be moved by a single instruction"));
7156 }
7157
7158 /* An instruction operand which is immediate related may have symbol used
7159 in the assembly, e.g.
7160
7161 mov w0, u32
7162 .set u32, 0x00ffff00
7163
7164 At the time when the assembly instruction is parsed, a referenced symbol,
7165 like 'u32' in the above example may not have been seen; a fixS is created
7166 in such a case and is handled here after symbols have been resolved.
7167 Instruction is fixed up with VALUE using the information in *FIXP plus
7168 extra information in FLAGS.
7169
7170 This function is called by md_apply_fix to fix up instructions that need
7171 a fix-up described above but does not involve any linker-time relocation. */
7172
7173 static void
7174 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7175 {
7176 int idx;
7177 uint32_t insn;
7178 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7179 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7180 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7181
7182 if (new_inst)
7183 {
7184 /* Now the instruction is about to be fixed-up, so the operand that
7185 was previously marked as 'ignored' needs to be unmarked in order
7186 to get the encoding done properly. */
7187 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7188 new_inst->operands[idx].skip = 0;
7189 }
7190
7191 gas_assert (opnd != AARCH64_OPND_NIL);
7192
7193 switch (opnd)
7194 {
7195 case AARCH64_OPND_EXCEPTION:
7196 if (unsigned_overflow (value, 16))
7197 as_bad_where (fixP->fx_file, fixP->fx_line,
7198 _("immediate out of range"));
7199 insn = get_aarch64_insn (buf);
7200 insn |= encode_svc_imm (value);
7201 put_aarch64_insn (buf, insn);
7202 break;
7203
7204 case AARCH64_OPND_AIMM:
7205 /* ADD or SUB with immediate.
7206 NOTE this assumes we come here with a add/sub shifted reg encoding
7207 3 322|2222|2 2 2 21111 111111
7208 1 098|7654|3 2 1 09876 543210 98765 43210
7209 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7210 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7211 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7212 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7213 ->
7214 3 322|2222|2 2 221111111111
7215 1 098|7654|3 2 109876543210 98765 43210
7216 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7217 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7218 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7219 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7220 Fields sf Rn Rd are already set. */
7221 insn = get_aarch64_insn (buf);
7222 if (value < 0)
7223 {
7224 /* Add <-> sub. */
7225 insn = reencode_addsub_switch_add_sub (insn);
7226 value = -value;
7227 }
7228
7229 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7230 && unsigned_overflow (value, 12))
7231 {
7232 /* Try to shift the value by 12 to make it fit. */
7233 if (((value >> 12) << 12) == value
7234 && ! unsigned_overflow (value, 12 + 12))
7235 {
7236 value >>= 12;
7237 insn |= encode_addsub_imm_shift_amount (1);
7238 }
7239 }
7240
7241 if (unsigned_overflow (value, 12))
7242 as_bad_where (fixP->fx_file, fixP->fx_line,
7243 _("immediate out of range"));
7244
7245 insn |= encode_addsub_imm (value);
7246
7247 put_aarch64_insn (buf, insn);
7248 break;
7249
7250 case AARCH64_OPND_SIMD_IMM:
7251 case AARCH64_OPND_SIMD_IMM_SFT:
7252 case AARCH64_OPND_LIMM:
7253 /* Bit mask immediate. */
7254 gas_assert (new_inst != NULL);
7255 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7256 new_inst->operands[idx].imm.value = value;
7257 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7258 &new_inst->value, NULL, NULL))
7259 put_aarch64_insn (buf, new_inst->value);
7260 else
7261 as_bad_where (fixP->fx_file, fixP->fx_line,
7262 _("invalid immediate"));
7263 break;
7264
7265 case AARCH64_OPND_HALF:
7266 /* 16-bit unsigned immediate. */
7267 if (unsigned_overflow (value, 16))
7268 as_bad_where (fixP->fx_file, fixP->fx_line,
7269 _("immediate out of range"));
7270 insn = get_aarch64_insn (buf);
7271 insn |= encode_movw_imm (value & 0xffff);
7272 put_aarch64_insn (buf, insn);
7273 break;
7274
7275 case AARCH64_OPND_IMM_MOV:
7276 /* Operand for a generic move immediate instruction, which is
7277 an alias instruction that generates a single MOVZ, MOVN or ORR
7278 instruction to loads a 32-bit/64-bit immediate value into general
7279 register. An assembler error shall result if the immediate cannot be
7280 created by a single one of these instructions. If there is a choice,
7281 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7282 and MOVZ or MOVN to ORR. */
7283 gas_assert (new_inst != NULL);
7284 fix_mov_imm_insn (fixP, buf, new_inst, value);
7285 break;
7286
7287 case AARCH64_OPND_ADDR_SIMM7:
7288 case AARCH64_OPND_ADDR_SIMM9:
7289 case AARCH64_OPND_ADDR_SIMM9_2:
7290 case AARCH64_OPND_ADDR_UIMM12:
7291 /* Immediate offset in an address. */
7292 insn = get_aarch64_insn (buf);
7293
7294 gas_assert (new_inst != NULL && new_inst->value == insn);
7295 gas_assert (new_inst->opcode->operands[1] == opnd
7296 || new_inst->opcode->operands[2] == opnd);
7297
7298 /* Get the index of the address operand. */
7299 if (new_inst->opcode->operands[1] == opnd)
7300 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7301 idx = 1;
7302 else
7303 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7304 idx = 2;
7305
7306 /* Update the resolved offset value. */
7307 new_inst->operands[idx].addr.offset.imm = value;
7308
7309 /* Encode/fix-up. */
7310 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7311 &new_inst->value, NULL, NULL))
7312 {
7313 put_aarch64_insn (buf, new_inst->value);
7314 break;
7315 }
7316 else if (new_inst->opcode->iclass == ldst_pos
7317 && try_to_encode_as_unscaled_ldst (new_inst))
7318 {
7319 put_aarch64_insn (buf, new_inst->value);
7320 break;
7321 }
7322
7323 as_bad_where (fixP->fx_file, fixP->fx_line,
7324 _("immediate offset out of range"));
7325 break;
7326
7327 default:
7328 gas_assert (0);
7329 as_fatal (_("unhandled operand code %d"), opnd);
7330 }
7331 }
7332
7333 /* Apply a fixup (fixP) to segment data, once it has been determined
7334 by our caller that we have all the info we need to fix it up.
7335
7336 Parameter valP is the pointer to the value of the bits. */
7337
7338 void
7339 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7340 {
7341 offsetT value = *valP;
7342 uint32_t insn;
7343 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7344 int scale;
7345 unsigned flags = fixP->fx_addnumber;
7346
7347 DEBUG_TRACE ("\n\n");
7348 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7349 DEBUG_TRACE ("Enter md_apply_fix");
7350
7351 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7352
7353 /* Note whether this will delete the relocation. */
7354
7355 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7356 fixP->fx_done = 1;
7357
7358 /* Process the relocations. */
7359 switch (fixP->fx_r_type)
7360 {
7361 case BFD_RELOC_NONE:
7362 /* This will need to go in the object file. */
7363 fixP->fx_done = 0;
7364 break;
7365
7366 case BFD_RELOC_8:
7367 case BFD_RELOC_8_PCREL:
7368 if (fixP->fx_done || !seg->use_rela_p)
7369 md_number_to_chars (buf, value, 1);
7370 break;
7371
7372 case BFD_RELOC_16:
7373 case BFD_RELOC_16_PCREL:
7374 if (fixP->fx_done || !seg->use_rela_p)
7375 md_number_to_chars (buf, value, 2);
7376 break;
7377
7378 case BFD_RELOC_32:
7379 case BFD_RELOC_32_PCREL:
7380 if (fixP->fx_done || !seg->use_rela_p)
7381 md_number_to_chars (buf, value, 4);
7382 break;
7383
7384 case BFD_RELOC_64:
7385 case BFD_RELOC_64_PCREL:
7386 if (fixP->fx_done || !seg->use_rela_p)
7387 md_number_to_chars (buf, value, 8);
7388 break;
7389
7390 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7391 /* We claim that these fixups have been processed here, even if
7392 in fact we generate an error because we do not have a reloc
7393 for them, so tc_gen_reloc() will reject them. */
7394 fixP->fx_done = 1;
7395 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7396 {
7397 as_bad_where (fixP->fx_file, fixP->fx_line,
7398 _("undefined symbol %s used as an immediate value"),
7399 S_GET_NAME (fixP->fx_addsy));
7400 goto apply_fix_return;
7401 }
7402 fix_insn (fixP, flags, value);
7403 break;
7404
7405 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7406 if (fixP->fx_done || !seg->use_rela_p)
7407 {
7408 if (value & 3)
7409 as_bad_where (fixP->fx_file, fixP->fx_line,
7410 _("pc-relative load offset not word aligned"));
7411 if (signed_overflow (value, 21))
7412 as_bad_where (fixP->fx_file, fixP->fx_line,
7413 _("pc-relative load offset out of range"));
7414 insn = get_aarch64_insn (buf);
7415 insn |= encode_ld_lit_ofs_19 (value >> 2);
7416 put_aarch64_insn (buf, insn);
7417 }
7418 break;
7419
7420 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7421 if (fixP->fx_done || !seg->use_rela_p)
7422 {
7423 if (signed_overflow (value, 21))
7424 as_bad_where (fixP->fx_file, fixP->fx_line,
7425 _("pc-relative address offset out of range"));
7426 insn = get_aarch64_insn (buf);
7427 insn |= encode_adr_imm (value);
7428 put_aarch64_insn (buf, insn);
7429 }
7430 break;
7431
7432 case BFD_RELOC_AARCH64_BRANCH19:
7433 if (fixP->fx_done || !seg->use_rela_p)
7434 {
7435 if (value & 3)
7436 as_bad_where (fixP->fx_file, fixP->fx_line,
7437 _("conditional branch target not word aligned"));
7438 if (signed_overflow (value, 21))
7439 as_bad_where (fixP->fx_file, fixP->fx_line,
7440 _("conditional branch out of range"));
7441 insn = get_aarch64_insn (buf);
7442 insn |= encode_cond_branch_ofs_19 (value >> 2);
7443 put_aarch64_insn (buf, insn);
7444 }
7445 break;
7446
7447 case BFD_RELOC_AARCH64_TSTBR14:
7448 if (fixP->fx_done || !seg->use_rela_p)
7449 {
7450 if (value & 3)
7451 as_bad_where (fixP->fx_file, fixP->fx_line,
7452 _("conditional branch target not word aligned"));
7453 if (signed_overflow (value, 16))
7454 as_bad_where (fixP->fx_file, fixP->fx_line,
7455 _("conditional branch out of range"));
7456 insn = get_aarch64_insn (buf);
7457 insn |= encode_tst_branch_ofs_14 (value >> 2);
7458 put_aarch64_insn (buf, insn);
7459 }
7460 break;
7461
7462 case BFD_RELOC_AARCH64_CALL26:
7463 case BFD_RELOC_AARCH64_JUMP26:
7464 if (fixP->fx_done || !seg->use_rela_p)
7465 {
7466 if (value & 3)
7467 as_bad_where (fixP->fx_file, fixP->fx_line,
7468 _("branch target not word aligned"));
7469 if (signed_overflow (value, 28))
7470 as_bad_where (fixP->fx_file, fixP->fx_line,
7471 _("branch out of range"));
7472 insn = get_aarch64_insn (buf);
7473 insn |= encode_branch_ofs_26 (value >> 2);
7474 put_aarch64_insn (buf, insn);
7475 }
7476 break;
7477
7478 case BFD_RELOC_AARCH64_MOVW_G0:
7479 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7480 case BFD_RELOC_AARCH64_MOVW_G0_S:
7481 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7482 scale = 0;
7483 goto movw_common;
7484 case BFD_RELOC_AARCH64_MOVW_G1:
7485 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7486 case BFD_RELOC_AARCH64_MOVW_G1_S:
7487 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7488 scale = 16;
7489 goto movw_common;
7490 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7491 scale = 0;
7492 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7493 /* Should always be exported to object file, see
7494 aarch64_force_relocation(). */
7495 gas_assert (!fixP->fx_done);
7496 gas_assert (seg->use_rela_p);
7497 goto movw_common;
7498 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7499 scale = 16;
7500 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7501 /* Should always be exported to object file, see
7502 aarch64_force_relocation(). */
7503 gas_assert (!fixP->fx_done);
7504 gas_assert (seg->use_rela_p);
7505 goto movw_common;
7506 case BFD_RELOC_AARCH64_MOVW_G2:
7507 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7508 case BFD_RELOC_AARCH64_MOVW_G2_S:
7509 scale = 32;
7510 goto movw_common;
7511 case BFD_RELOC_AARCH64_MOVW_G3:
7512 scale = 48;
7513 movw_common:
7514 if (fixP->fx_done || !seg->use_rela_p)
7515 {
7516 insn = get_aarch64_insn (buf);
7517
7518 if (!fixP->fx_done)
7519 {
7520 /* REL signed addend must fit in 16 bits */
7521 if (signed_overflow (value, 16))
7522 as_bad_where (fixP->fx_file, fixP->fx_line,
7523 _("offset out of range"));
7524 }
7525 else
7526 {
7527 /* Check for overflow and scale. */
7528 switch (fixP->fx_r_type)
7529 {
7530 case BFD_RELOC_AARCH64_MOVW_G0:
7531 case BFD_RELOC_AARCH64_MOVW_G1:
7532 case BFD_RELOC_AARCH64_MOVW_G2:
7533 case BFD_RELOC_AARCH64_MOVW_G3:
7534 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7535 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7536 if (unsigned_overflow (value, scale + 16))
7537 as_bad_where (fixP->fx_file, fixP->fx_line,
7538 _("unsigned value out of range"));
7539 break;
7540 case BFD_RELOC_AARCH64_MOVW_G0_S:
7541 case BFD_RELOC_AARCH64_MOVW_G1_S:
7542 case BFD_RELOC_AARCH64_MOVW_G2_S:
7543 /* NOTE: We can only come here with movz or movn. */
7544 if (signed_overflow (value, scale + 16))
7545 as_bad_where (fixP->fx_file, fixP->fx_line,
7546 _("signed value out of range"));
7547 if (value < 0)
7548 {
7549 /* Force use of MOVN. */
7550 value = ~value;
7551 insn = reencode_movzn_to_movn (insn);
7552 }
7553 else
7554 {
7555 /* Force use of MOVZ. */
7556 insn = reencode_movzn_to_movz (insn);
7557 }
7558 break;
7559 default:
7560 /* Unchecked relocations. */
7561 break;
7562 }
7563 value >>= scale;
7564 }
7565
7566 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7567 insn |= encode_movw_imm (value & 0xffff);
7568
7569 put_aarch64_insn (buf, insn);
7570 }
7571 break;
7572
7573 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7574 fixP->fx_r_type = (ilp32_p
7575 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7576 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7577 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7578 /* Should always be exported to object file, see
7579 aarch64_force_relocation(). */
7580 gas_assert (!fixP->fx_done);
7581 gas_assert (seg->use_rela_p);
7582 break;
7583
7584 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7585 fixP->fx_r_type = (ilp32_p
7586 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7587 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
7588 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7589 /* Should always be exported to object file, see
7590 aarch64_force_relocation(). */
7591 gas_assert (!fixP->fx_done);
7592 gas_assert (seg->use_rela_p);
7593 break;
7594
7595 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7596 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7597 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7598 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7599 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7600 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7601 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7602 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7603 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7604 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7605 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7606 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7607 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7608 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7609 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7610 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7611 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7612 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7613 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7614 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7615 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7616 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7617 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7618 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7619 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7620 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7621 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7622 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7623 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7624 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7625 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7626 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7627 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7628 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7629 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7630 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7631 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7632 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7633 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7634 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7635 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7636 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7637 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7638 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7639 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7640 /* Should always be exported to object file, see
7641 aarch64_force_relocation(). */
7642 gas_assert (!fixP->fx_done);
7643 gas_assert (seg->use_rela_p);
7644 break;
7645
7646 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7647 /* Should always be exported to object file, see
7648 aarch64_force_relocation(). */
7649 fixP->fx_r_type = (ilp32_p
7650 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7651 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7652 gas_assert (!fixP->fx_done);
7653 gas_assert (seg->use_rela_p);
7654 break;
7655
7656 case BFD_RELOC_AARCH64_ADD_LO12:
7657 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7658 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7659 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7660 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7661 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7662 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7663 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7664 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7665 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7666 case BFD_RELOC_AARCH64_LDST128_LO12:
7667 case BFD_RELOC_AARCH64_LDST16_LO12:
7668 case BFD_RELOC_AARCH64_LDST32_LO12:
7669 case BFD_RELOC_AARCH64_LDST64_LO12:
7670 case BFD_RELOC_AARCH64_LDST8_LO12:
7671 /* Should always be exported to object file, see
7672 aarch64_force_relocation(). */
7673 gas_assert (!fixP->fx_done);
7674 gas_assert (seg->use_rela_p);
7675 break;
7676
7677 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7678 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7679 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7680 break;
7681
7682 case BFD_RELOC_UNUSED:
7683 /* An error will already have been reported. */
7684 break;
7685
7686 default:
7687 as_bad_where (fixP->fx_file, fixP->fx_line,
7688 _("unexpected %s fixup"),
7689 bfd_get_reloc_code_name (fixP->fx_r_type));
7690 break;
7691 }
7692
7693 apply_fix_return:
7694 /* Free the allocated the struct aarch64_inst.
7695 N.B. currently there are very limited number of fix-up types actually use
7696 this field, so the impact on the performance should be minimal . */
7697 if (fixP->tc_fix_data.inst != NULL)
7698 free (fixP->tc_fix_data.inst);
7699
7700 return;
7701 }
7702
7703 /* Translate internal representation of relocation info to BFD target
7704 format. */
7705
7706 arelent *
7707 tc_gen_reloc (asection * section, fixS * fixp)
7708 {
7709 arelent *reloc;
7710 bfd_reloc_code_real_type code;
7711
7712 reloc = XNEW (arelent);
7713
7714 reloc->sym_ptr_ptr = XNEW (asymbol *);
7715 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7716 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7717
7718 if (fixp->fx_pcrel)
7719 {
7720 if (section->use_rela_p)
7721 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7722 else
7723 fixp->fx_offset = reloc->address;
7724 }
7725 reloc->addend = fixp->fx_offset;
7726
7727 code = fixp->fx_r_type;
7728 switch (code)
7729 {
7730 case BFD_RELOC_16:
7731 if (fixp->fx_pcrel)
7732 code = BFD_RELOC_16_PCREL;
7733 break;
7734
7735 case BFD_RELOC_32:
7736 if (fixp->fx_pcrel)
7737 code = BFD_RELOC_32_PCREL;
7738 break;
7739
7740 case BFD_RELOC_64:
7741 if (fixp->fx_pcrel)
7742 code = BFD_RELOC_64_PCREL;
7743 break;
7744
7745 default:
7746 break;
7747 }
7748
7749 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7750 if (reloc->howto == NULL)
7751 {
7752 as_bad_where (fixp->fx_file, fixp->fx_line,
7753 _
7754 ("cannot represent %s relocation in this object file format"),
7755 bfd_get_reloc_code_name (code));
7756 return NULL;
7757 }
7758
7759 return reloc;
7760 }
7761
7762 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7763
7764 void
7765 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7766 {
7767 bfd_reloc_code_real_type type;
7768 int pcrel = 0;
7769
7770 /* Pick a reloc.
7771 FIXME: @@ Should look at CPU word size. */
7772 switch (size)
7773 {
7774 case 1:
7775 type = BFD_RELOC_8;
7776 break;
7777 case 2:
7778 type = BFD_RELOC_16;
7779 break;
7780 case 4:
7781 type = BFD_RELOC_32;
7782 break;
7783 case 8:
7784 type = BFD_RELOC_64;
7785 break;
7786 default:
7787 as_bad (_("cannot do %u-byte relocation"), size);
7788 type = BFD_RELOC_UNUSED;
7789 break;
7790 }
7791
7792 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7793 }
7794
7795 int
7796 aarch64_force_relocation (struct fix *fixp)
7797 {
7798 switch (fixp->fx_r_type)
7799 {
7800 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7801 /* Perform these "immediate" internal relocations
7802 even if the symbol is extern or weak. */
7803 return 0;
7804
7805 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7806 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7807 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7808 /* Pseudo relocs that need to be fixed up according to
7809 ilp32_p. */
7810 return 0;
7811
7812 case BFD_RELOC_AARCH64_ADD_LO12:
7813 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7814 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7815 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7816 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7817 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7818 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7819 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7820 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7821 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7822 case BFD_RELOC_AARCH64_LDST128_LO12:
7823 case BFD_RELOC_AARCH64_LDST16_LO12:
7824 case BFD_RELOC_AARCH64_LDST32_LO12:
7825 case BFD_RELOC_AARCH64_LDST64_LO12:
7826 case BFD_RELOC_AARCH64_LDST8_LO12:
7827 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7828 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7829 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7830 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7831 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7832 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7833 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7834 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7835 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7836 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7837 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7838 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7839 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7840 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7841 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7842 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7843 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7844 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7845 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7846 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7847 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7848 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7849 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7850 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7851 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7852 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7853 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7854 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7855 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7856 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7857 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7858 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7859 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7860 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7861 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7862 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7863 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7864 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7865 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7866 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7867 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7868 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7869 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7870 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7871 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7872 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7873 /* Always leave these relocations for the linker. */
7874 return 1;
7875
7876 default:
7877 break;
7878 }
7879
7880 return generic_force_reloc (fixp);
7881 }
7882
7883 #ifdef OBJ_ELF
7884
7885 const char *
7886 elf64_aarch64_target_format (void)
7887 {
7888 if (strcmp (TARGET_OS, "cloudabi") == 0)
7889 {
7890 /* FIXME: What to do for ilp32_p ? */
7891 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
7892 }
7893 if (target_big_endian)
7894 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7895 else
7896 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7897 }
7898
7899 void
7900 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7901 {
7902 elf_frob_symbol (symp, puntp);
7903 }
7904 #endif
7905
7906 /* MD interface: Finalization. */
7907
7908 /* A good place to do this, although this was probably not intended
7909 for this kind of use. We need to dump the literal pool before
7910 references are made to a null symbol pointer. */
7911
7912 void
7913 aarch64_cleanup (void)
7914 {
7915 literal_pool *pool;
7916
7917 for (pool = list_of_pools; pool; pool = pool->next)
7918 {
7919 /* Put it at the end of the relevant section. */
7920 subseg_set (pool->section, pool->sub_section);
7921 s_ltorg (0);
7922 }
7923 }
7924
7925 #ifdef OBJ_ELF
7926 /* Remove any excess mapping symbols generated for alignment frags in
7927 SEC. We may have created a mapping symbol before a zero byte
7928 alignment; remove it if there's a mapping symbol after the
7929 alignment. */
7930 static void
7931 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7932 void *dummy ATTRIBUTE_UNUSED)
7933 {
7934 segment_info_type *seginfo = seg_info (sec);
7935 fragS *fragp;
7936
7937 if (seginfo == NULL || seginfo->frchainP == NULL)
7938 return;
7939
7940 for (fragp = seginfo->frchainP->frch_root;
7941 fragp != NULL; fragp = fragp->fr_next)
7942 {
7943 symbolS *sym = fragp->tc_frag_data.last_map;
7944 fragS *next = fragp->fr_next;
7945
7946 /* Variable-sized frags have been converted to fixed size by
7947 this point. But if this was variable-sized to start with,
7948 there will be a fixed-size frag after it. So don't handle
7949 next == NULL. */
7950 if (sym == NULL || next == NULL)
7951 continue;
7952
7953 if (S_GET_VALUE (sym) < next->fr_address)
7954 /* Not at the end of this frag. */
7955 continue;
7956 know (S_GET_VALUE (sym) == next->fr_address);
7957
7958 do
7959 {
7960 if (next->tc_frag_data.first_map != NULL)
7961 {
7962 /* Next frag starts with a mapping symbol. Discard this
7963 one. */
7964 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7965 break;
7966 }
7967
7968 if (next->fr_next == NULL)
7969 {
7970 /* This mapping symbol is at the end of the section. Discard
7971 it. */
7972 know (next->fr_fix == 0 && next->fr_var == 0);
7973 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7974 break;
7975 }
7976
7977 /* As long as we have empty frags without any mapping symbols,
7978 keep looking. */
7979 /* If the next frag is non-empty and does not start with a
7980 mapping symbol, then this mapping symbol is required. */
7981 if (next->fr_address != next->fr_next->fr_address)
7982 break;
7983
7984 next = next->fr_next;
7985 }
7986 while (next != NULL);
7987 }
7988 }
7989 #endif
7990
7991 /* Adjust the symbol table. */
7992
7993 void
7994 aarch64_adjust_symtab (void)
7995 {
7996 #ifdef OBJ_ELF
7997 /* Remove any overlapping mapping symbols generated by alignment frags. */
7998 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7999 /* Now do generic ELF adjustments. */
8000 elf_adjust_symtab ();
8001 #endif
8002 }
8003
8004 static void
8005 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8006 {
8007 const char *hash_err;
8008
8009 hash_err = hash_insert (table, key, value);
8010 if (hash_err)
8011 printf ("Internal Error: Can't hash %s\n", key);
8012 }
8013
8014 static void
8015 fill_instruction_hash_table (void)
8016 {
8017 aarch64_opcode *opcode = aarch64_opcode_table;
8018
8019 while (opcode->name != NULL)
8020 {
8021 templates *templ, *new_templ;
8022 templ = hash_find (aarch64_ops_hsh, opcode->name);
8023
8024 new_templ = XNEW (templates);
8025 new_templ->opcode = opcode;
8026 new_templ->next = NULL;
8027
8028 if (!templ)
8029 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8030 else
8031 {
8032 new_templ->next = templ->next;
8033 templ->next = new_templ;
8034 }
8035 ++opcode;
8036 }
8037 }
8038
8039 static inline void
8040 convert_to_upper (char *dst, const char *src, size_t num)
8041 {
8042 unsigned int i;
8043 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8044 *dst = TOUPPER (*src);
8045 *dst = '\0';
8046 }
8047
8048 /* Assume STR point to a lower-case string, allocate, convert and return
8049 the corresponding upper-case string. */
8050 static inline const char*
8051 get_upper_str (const char *str)
8052 {
8053 char *ret;
8054 size_t len = strlen (str);
8055 ret = XNEWVEC (char, len + 1);
8056 convert_to_upper (ret, str, len);
8057 return ret;
8058 }
8059
8060 /* MD interface: Initialization. */
8061
8062 void
8063 md_begin (void)
8064 {
8065 unsigned mach;
8066 unsigned int i;
8067
8068 if ((aarch64_ops_hsh = hash_new ()) == NULL
8069 || (aarch64_cond_hsh = hash_new ()) == NULL
8070 || (aarch64_shift_hsh = hash_new ()) == NULL
8071 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8072 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8073 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8074 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8075 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8076 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8077 || (aarch64_reg_hsh = hash_new ()) == NULL
8078 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8079 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8080 || (aarch64_pldop_hsh = hash_new ()) == NULL
8081 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8082 as_fatal (_("virtual memory exhausted"));
8083
8084 fill_instruction_hash_table ();
8085
8086 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8087 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8088 (void *) (aarch64_sys_regs + i));
8089
8090 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8091 checked_hash_insert (aarch64_pstatefield_hsh,
8092 aarch64_pstatefields[i].name,
8093 (void *) (aarch64_pstatefields + i));
8094
8095 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8096 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8097 aarch64_sys_regs_ic[i].name,
8098 (void *) (aarch64_sys_regs_ic + i));
8099
8100 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8101 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8102 aarch64_sys_regs_dc[i].name,
8103 (void *) (aarch64_sys_regs_dc + i));
8104
8105 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8106 checked_hash_insert (aarch64_sys_regs_at_hsh,
8107 aarch64_sys_regs_at[i].name,
8108 (void *) (aarch64_sys_regs_at + i));
8109
8110 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8111 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8112 aarch64_sys_regs_tlbi[i].name,
8113 (void *) (aarch64_sys_regs_tlbi + i));
8114
8115 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8116 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8117 (void *) (reg_names + i));
8118
8119 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8120 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8121 (void *) (nzcv_names + i));
8122
8123 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8124 {
8125 const char *name = aarch64_operand_modifiers[i].name;
8126 checked_hash_insert (aarch64_shift_hsh, name,
8127 (void *) (aarch64_operand_modifiers + i));
8128 /* Also hash the name in the upper case. */
8129 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8130 (void *) (aarch64_operand_modifiers + i));
8131 }
8132
8133 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8134 {
8135 unsigned int j;
8136 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8137 the same condition code. */
8138 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8139 {
8140 const char *name = aarch64_conds[i].names[j];
8141 if (name == NULL)
8142 break;
8143 checked_hash_insert (aarch64_cond_hsh, name,
8144 (void *) (aarch64_conds + i));
8145 /* Also hash the name in the upper case. */
8146 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8147 (void *) (aarch64_conds + i));
8148 }
8149 }
8150
8151 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8152 {
8153 const char *name = aarch64_barrier_options[i].name;
8154 /* Skip xx00 - the unallocated values of option. */
8155 if ((i & 0x3) == 0)
8156 continue;
8157 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8158 (void *) (aarch64_barrier_options + i));
8159 /* Also hash the name in the upper case. */
8160 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8161 (void *) (aarch64_barrier_options + i));
8162 }
8163
8164 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8165 {
8166 const char* name = aarch64_prfops[i].name;
8167 /* Skip the unallocated hint encodings. */
8168 if (name == NULL)
8169 continue;
8170 checked_hash_insert (aarch64_pldop_hsh, name,
8171 (void *) (aarch64_prfops + i));
8172 /* Also hash the name in the upper case. */
8173 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8174 (void *) (aarch64_prfops + i));
8175 }
8176
8177 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8178 {
8179 const char* name = aarch64_hint_options[i].name;
8180
8181 checked_hash_insert (aarch64_hint_opt_hsh, name,
8182 (void *) (aarch64_hint_options + i));
8183 /* Also hash the name in the upper case. */
8184 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8185 (void *) (aarch64_hint_options + i));
8186 }
8187
8188 /* Set the cpu variant based on the command-line options. */
8189 if (!mcpu_cpu_opt)
8190 mcpu_cpu_opt = march_cpu_opt;
8191
8192 if (!mcpu_cpu_opt)
8193 mcpu_cpu_opt = &cpu_default;
8194
8195 cpu_variant = *mcpu_cpu_opt;
8196
8197 /* Record the CPU type. */
8198 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8199
8200 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8201 }
8202
8203 /* Command line processing. */
8204
8205 const char *md_shortopts = "m:";
8206
8207 #ifdef AARCH64_BI_ENDIAN
8208 #define OPTION_EB (OPTION_MD_BASE + 0)
8209 #define OPTION_EL (OPTION_MD_BASE + 1)
8210 #else
8211 #if TARGET_BYTES_BIG_ENDIAN
8212 #define OPTION_EB (OPTION_MD_BASE + 0)
8213 #else
8214 #define OPTION_EL (OPTION_MD_BASE + 1)
8215 #endif
8216 #endif
8217
8218 struct option md_longopts[] = {
8219 #ifdef OPTION_EB
8220 {"EB", no_argument, NULL, OPTION_EB},
8221 #endif
8222 #ifdef OPTION_EL
8223 {"EL", no_argument, NULL, OPTION_EL},
8224 #endif
8225 {NULL, no_argument, NULL, 0}
8226 };
8227
8228 size_t md_longopts_size = sizeof (md_longopts);
8229
8230 struct aarch64_option_table
8231 {
8232 const char *option; /* Option name to match. */
8233 const char *help; /* Help information. */
8234 int *var; /* Variable to change. */
8235 int value; /* What to change it to. */
8236 char *deprecated; /* If non-null, print this message. */
8237 };
8238
8239 static struct aarch64_option_table aarch64_opts[] = {
8240 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8241 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8242 NULL},
8243 #ifdef DEBUG_AARCH64
8244 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8245 #endif /* DEBUG_AARCH64 */
8246 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8247 NULL},
8248 {"mno-verbose-error", N_("do not output verbose error messages"),
8249 &verbose_error_p, 0, NULL},
8250 {NULL, NULL, NULL, 0, NULL}
8251 };
8252
8253 struct aarch64_cpu_option_table
8254 {
8255 const char *name;
8256 const aarch64_feature_set value;
8257 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8258 case. */
8259 const char *canonical_name;
8260 };
8261
8262 /* This list should, at a minimum, contain all the cpu names
8263 recognized by GCC. */
8264 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8265 {"all", AARCH64_ANY, NULL},
8266 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8267 AARCH64_FEATURE_CRC), "Cortex-A35"},
8268 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8269 AARCH64_FEATURE_CRC), "Cortex-A53"},
8270 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8271 AARCH64_FEATURE_CRC), "Cortex-A57"},
8272 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8273 AARCH64_FEATURE_CRC), "Cortex-A72"},
8274 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8275 AARCH64_FEATURE_CRC), "Cortex-A73"},
8276 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8277 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8278 "Samsung Exynos M1"},
8279 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8280 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8281 "Qualcomm QDF24XX"},
8282 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8283 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8284 "Cavium ThunderX"},
8285 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8286 AARCH64_FEATURE_CRYPTO),
8287 "Broadcom Vulcan"},
8288 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8289 in earlier releases and is superseded by 'xgene1' in all
8290 tools. */
8291 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8292 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8293 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8294 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8295 {"generic", AARCH64_ARCH_V8, NULL},
8296
8297 {NULL, AARCH64_ARCH_NONE, NULL}
8298 };
8299
8300 struct aarch64_arch_option_table
8301 {
8302 const char *name;
8303 const aarch64_feature_set value;
8304 };
8305
8306 /* This list should, at a minimum, contain all the architecture names
8307 recognized by GCC. */
8308 static const struct aarch64_arch_option_table aarch64_archs[] = {
8309 {"all", AARCH64_ANY},
8310 {"armv8-a", AARCH64_ARCH_V8},
8311 {"armv8.1-a", AARCH64_ARCH_V8_1},
8312 {"armv8.2-a", AARCH64_ARCH_V8_2},
8313 {NULL, AARCH64_ARCH_NONE}
8314 };
8315
8316 /* ISA extensions. */
8317 struct aarch64_option_cpu_value_table
8318 {
8319 const char *name;
8320 const aarch64_feature_set value;
8321 const aarch64_feature_set require; /* Feature dependencies. */
8322 };
8323
8324 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8325 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8326 AARCH64_ARCH_NONE},
8327 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
8328 AARCH64_ARCH_NONE},
8329 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8330 AARCH64_ARCH_NONE},
8331 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8332 AARCH64_ARCH_NONE},
8333 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8334 AARCH64_ARCH_NONE},
8335 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8336 AARCH64_ARCH_NONE},
8337 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8338 AARCH64_ARCH_NONE},
8339 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8340 AARCH64_ARCH_NONE},
8341 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8342 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8343 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8344 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8345 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8346 AARCH64_ARCH_NONE},
8347 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8348 };
8349
8350 struct aarch64_long_option_table
8351 {
8352 const char *option; /* Substring to match. */
8353 const char *help; /* Help information. */
8354 int (*func) (const char *subopt); /* Function to decode sub-option. */
8355 char *deprecated; /* If non-null, print this message. */
8356 };
8357
8358 /* Transitive closure of features depending on set. */
8359 static aarch64_feature_set
8360 aarch64_feature_disable_set (aarch64_feature_set set)
8361 {
8362 const struct aarch64_option_cpu_value_table *opt;
8363 aarch64_feature_set prev = 0;
8364
8365 while (prev != set) {
8366 prev = set;
8367 for (opt = aarch64_features; opt->name != NULL; opt++)
8368 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8369 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8370 }
8371 return set;
8372 }
8373
8374 /* Transitive closure of dependencies of set. */
8375 static aarch64_feature_set
8376 aarch64_feature_enable_set (aarch64_feature_set set)
8377 {
8378 const struct aarch64_option_cpu_value_table *opt;
8379 aarch64_feature_set prev = 0;
8380
8381 while (prev != set) {
8382 prev = set;
8383 for (opt = aarch64_features; opt->name != NULL; opt++)
8384 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8385 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8386 }
8387 return set;
8388 }
8389
8390 static int
8391 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8392 bfd_boolean ext_only)
8393 {
8394 /* We insist on extensions being added before being removed. We achieve
8395 this by using the ADDING_VALUE variable to indicate whether we are
8396 adding an extension (1) or removing it (0) and only allowing it to
8397 change in the order -1 -> 1 -> 0. */
8398 int adding_value = -1;
8399 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8400
8401 /* Copy the feature set, so that we can modify it. */
8402 *ext_set = **opt_p;
8403 *opt_p = ext_set;
8404
8405 while (str != NULL && *str != 0)
8406 {
8407 const struct aarch64_option_cpu_value_table *opt;
8408 const char *ext = NULL;
8409 int optlen;
8410
8411 if (!ext_only)
8412 {
8413 if (*str != '+')
8414 {
8415 as_bad (_("invalid architectural extension"));
8416 return 0;
8417 }
8418
8419 ext = strchr (++str, '+');
8420 }
8421
8422 if (ext != NULL)
8423 optlen = ext - str;
8424 else
8425 optlen = strlen (str);
8426
8427 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8428 {
8429 if (adding_value != 0)
8430 adding_value = 0;
8431 optlen -= 2;
8432 str += 2;
8433 }
8434 else if (optlen > 0)
8435 {
8436 if (adding_value == -1)
8437 adding_value = 1;
8438 else if (adding_value != 1)
8439 {
8440 as_bad (_("must specify extensions to add before specifying "
8441 "those to remove"));
8442 return FALSE;
8443 }
8444 }
8445
8446 if (optlen == 0)
8447 {
8448 as_bad (_("missing architectural extension"));
8449 return 0;
8450 }
8451
8452 gas_assert (adding_value != -1);
8453
8454 for (opt = aarch64_features; opt->name != NULL; opt++)
8455 if (strncmp (opt->name, str, optlen) == 0)
8456 {
8457 aarch64_feature_set set;
8458
8459 /* Add or remove the extension. */
8460 if (adding_value)
8461 {
8462 set = aarch64_feature_enable_set (opt->value);
8463 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8464 }
8465 else
8466 {
8467 set = aarch64_feature_disable_set (opt->value);
8468 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8469 }
8470 break;
8471 }
8472
8473 if (opt->name == NULL)
8474 {
8475 as_bad (_("unknown architectural extension `%s'"), str);
8476 return 0;
8477 }
8478
8479 str = ext;
8480 };
8481
8482 return 1;
8483 }
8484
8485 static int
8486 aarch64_parse_cpu (const char *str)
8487 {
8488 const struct aarch64_cpu_option_table *opt;
8489 const char *ext = strchr (str, '+');
8490 size_t optlen;
8491
8492 if (ext != NULL)
8493 optlen = ext - str;
8494 else
8495 optlen = strlen (str);
8496
8497 if (optlen == 0)
8498 {
8499 as_bad (_("missing cpu name `%s'"), str);
8500 return 0;
8501 }
8502
8503 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8504 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8505 {
8506 mcpu_cpu_opt = &opt->value;
8507 if (ext != NULL)
8508 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8509
8510 return 1;
8511 }
8512
8513 as_bad (_("unknown cpu `%s'"), str);
8514 return 0;
8515 }
8516
8517 static int
8518 aarch64_parse_arch (const char *str)
8519 {
8520 const struct aarch64_arch_option_table *opt;
8521 const char *ext = strchr (str, '+');
8522 size_t optlen;
8523
8524 if (ext != NULL)
8525 optlen = ext - str;
8526 else
8527 optlen = strlen (str);
8528
8529 if (optlen == 0)
8530 {
8531 as_bad (_("missing architecture name `%s'"), str);
8532 return 0;
8533 }
8534
8535 for (opt = aarch64_archs; opt->name != NULL; opt++)
8536 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8537 {
8538 march_cpu_opt = &opt->value;
8539 if (ext != NULL)
8540 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8541
8542 return 1;
8543 }
8544
8545 as_bad (_("unknown architecture `%s'\n"), str);
8546 return 0;
8547 }
8548
8549 /* ABIs. */
8550 struct aarch64_option_abi_value_table
8551 {
8552 const char *name;
8553 enum aarch64_abi_type value;
8554 };
8555
8556 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8557 {"ilp32", AARCH64_ABI_ILP32},
8558 {"lp64", AARCH64_ABI_LP64},
8559 };
8560
8561 static int
8562 aarch64_parse_abi (const char *str)
8563 {
8564 unsigned int i;
8565
8566 if (str[0] == '\0')
8567 {
8568 as_bad (_("missing abi name `%s'"), str);
8569 return 0;
8570 }
8571
8572 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8573 if (strcmp (str, aarch64_abis[i].name) == 0)
8574 {
8575 aarch64_abi = aarch64_abis[i].value;
8576 return 1;
8577 }
8578
8579 as_bad (_("unknown abi `%s'\n"), str);
8580 return 0;
8581 }
8582
8583 static struct aarch64_long_option_table aarch64_long_opts[] = {
8584 #ifdef OBJ_ELF
8585 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8586 aarch64_parse_abi, NULL},
8587 #endif /* OBJ_ELF */
8588 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8589 aarch64_parse_cpu, NULL},
8590 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8591 aarch64_parse_arch, NULL},
8592 {NULL, NULL, 0, NULL}
8593 };
8594
8595 int
8596 md_parse_option (int c, const char *arg)
8597 {
8598 struct aarch64_option_table *opt;
8599 struct aarch64_long_option_table *lopt;
8600
8601 switch (c)
8602 {
8603 #ifdef OPTION_EB
8604 case OPTION_EB:
8605 target_big_endian = 1;
8606 break;
8607 #endif
8608
8609 #ifdef OPTION_EL
8610 case OPTION_EL:
8611 target_big_endian = 0;
8612 break;
8613 #endif
8614
8615 case 'a':
8616 /* Listing option. Just ignore these, we don't support additional
8617 ones. */
8618 return 0;
8619
8620 default:
8621 for (opt = aarch64_opts; opt->option != NULL; opt++)
8622 {
8623 if (c == opt->option[0]
8624 && ((arg == NULL && opt->option[1] == 0)
8625 || streq (arg, opt->option + 1)))
8626 {
8627 /* If the option is deprecated, tell the user. */
8628 if (opt->deprecated != NULL)
8629 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8630 arg ? arg : "", _(opt->deprecated));
8631
8632 if (opt->var != NULL)
8633 *opt->var = opt->value;
8634
8635 return 1;
8636 }
8637 }
8638
8639 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8640 {
8641 /* These options are expected to have an argument. */
8642 if (c == lopt->option[0]
8643 && arg != NULL
8644 && strncmp (arg, lopt->option + 1,
8645 strlen (lopt->option + 1)) == 0)
8646 {
8647 /* If the option is deprecated, tell the user. */
8648 if (lopt->deprecated != NULL)
8649 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8650 _(lopt->deprecated));
8651
8652 /* Call the sup-option parser. */
8653 return lopt->func (arg + strlen (lopt->option) - 1);
8654 }
8655 }
8656
8657 return 0;
8658 }
8659
8660 return 1;
8661 }
8662
8663 void
8664 md_show_usage (FILE * fp)
8665 {
8666 struct aarch64_option_table *opt;
8667 struct aarch64_long_option_table *lopt;
8668
8669 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8670
8671 for (opt = aarch64_opts; opt->option != NULL; opt++)
8672 if (opt->help != NULL)
8673 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8674
8675 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8676 if (lopt->help != NULL)
8677 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8678
8679 #ifdef OPTION_EB
8680 fprintf (fp, _("\
8681 -EB assemble code for a big-endian cpu\n"));
8682 #endif
8683
8684 #ifdef OPTION_EL
8685 fprintf (fp, _("\
8686 -EL assemble code for a little-endian cpu\n"));
8687 #endif
8688 }
8689
8690 /* Parse a .cpu directive. */
8691
8692 static void
8693 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8694 {
8695 const struct aarch64_cpu_option_table *opt;
8696 char saved_char;
8697 char *name;
8698 char *ext;
8699 size_t optlen;
8700
8701 name = input_line_pointer;
8702 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8703 input_line_pointer++;
8704 saved_char = *input_line_pointer;
8705 *input_line_pointer = 0;
8706
8707 ext = strchr (name, '+');
8708
8709 if (ext != NULL)
8710 optlen = ext - name;
8711 else
8712 optlen = strlen (name);
8713
8714 /* Skip the first "all" entry. */
8715 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8716 if (strlen (opt->name) == optlen
8717 && strncmp (name, opt->name, optlen) == 0)
8718 {
8719 mcpu_cpu_opt = &opt->value;
8720 if (ext != NULL)
8721 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8722 return;
8723
8724 cpu_variant = *mcpu_cpu_opt;
8725
8726 *input_line_pointer = saved_char;
8727 demand_empty_rest_of_line ();
8728 return;
8729 }
8730 as_bad (_("unknown cpu `%s'"), name);
8731 *input_line_pointer = saved_char;
8732 ignore_rest_of_line ();
8733 }
8734
8735
8736 /* Parse a .arch directive. */
8737
8738 static void
8739 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8740 {
8741 const struct aarch64_arch_option_table *opt;
8742 char saved_char;
8743 char *name;
8744 char *ext;
8745 size_t optlen;
8746
8747 name = input_line_pointer;
8748 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8749 input_line_pointer++;
8750 saved_char = *input_line_pointer;
8751 *input_line_pointer = 0;
8752
8753 ext = strchr (name, '+');
8754
8755 if (ext != NULL)
8756 optlen = ext - name;
8757 else
8758 optlen = strlen (name);
8759
8760 /* Skip the first "all" entry. */
8761 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8762 if (strlen (opt->name) == optlen
8763 && strncmp (name, opt->name, optlen) == 0)
8764 {
8765 mcpu_cpu_opt = &opt->value;
8766 if (ext != NULL)
8767 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8768 return;
8769
8770 cpu_variant = *mcpu_cpu_opt;
8771
8772 *input_line_pointer = saved_char;
8773 demand_empty_rest_of_line ();
8774 return;
8775 }
8776
8777 as_bad (_("unknown architecture `%s'\n"), name);
8778 *input_line_pointer = saved_char;
8779 ignore_rest_of_line ();
8780 }
8781
8782 /* Parse a .arch_extension directive. */
8783
8784 static void
8785 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8786 {
8787 char saved_char;
8788 char *ext = input_line_pointer;;
8789
8790 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8791 input_line_pointer++;
8792 saved_char = *input_line_pointer;
8793 *input_line_pointer = 0;
8794
8795 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8796 return;
8797
8798 cpu_variant = *mcpu_cpu_opt;
8799
8800 *input_line_pointer = saved_char;
8801 demand_empty_rest_of_line ();
8802 }
8803
8804 /* Copy symbol information. */
8805
8806 void
8807 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8808 {
8809 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8810 }