[AArch64][SVE 09/32] Improve error messages for invalid floats
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum vector_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in vector_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct vector_type_el
94 {
95 enum vector_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Macros to define the register types and masks for the purpose
251 of parsing. */
252
253 #undef AARCH64_REG_TYPES
254 #define AARCH64_REG_TYPES \
255 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
256 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
257 BASIC_REG_TYPE(SP_32) /* wsp */ \
258 BASIC_REG_TYPE(SP_64) /* sp */ \
259 BASIC_REG_TYPE(Z_32) /* wzr */ \
260 BASIC_REG_TYPE(Z_64) /* xzr */ \
261 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
262 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
263 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
264 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
265 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
266 BASIC_REG_TYPE(CN) /* c[0-7] */ \
267 BASIC_REG_TYPE(VN) /* v[0-31] */ \
268 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
269 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
270 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
271 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
272 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
273 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
274 /* Typecheck: any [BHSDQ]P FP. */ \
275 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
276 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
277 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
278 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
279 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
280 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
281 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
282 /* Any integer register; used for error messages only. */ \
283 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
284 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
285 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
286 /* Pseudo type to mark the end of the enumerator sequence. */ \
287 BASIC_REG_TYPE(MAX)
288
289 #undef BASIC_REG_TYPE
290 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
291 #undef MULTI_REG_TYPE
292 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
293
294 /* Register type enumerators. */
295 typedef enum aarch64_reg_type_
296 {
297 /* A list of REG_TYPE_*. */
298 AARCH64_REG_TYPES
299 } aarch64_reg_type;
300
301 #undef BASIC_REG_TYPE
302 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
303 #undef REG_TYPE
304 #define REG_TYPE(T) (1 << REG_TYPE_##T)
305 #undef MULTI_REG_TYPE
306 #define MULTI_REG_TYPE(T,V) V,
307
308 /* Structure for a hash table entry for a register. */
309 typedef struct
310 {
311 const char *name;
312 unsigned char number;
313 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
314 unsigned char builtin;
315 } reg_entry;
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409 static struct hash_control *aarch64_hint_opt_hsh;
410
411 /* Stuff needed to resolve the label ambiguity
412 As:
413 ...
414 label: <insn>
415 may differ from:
416 ...
417 label:
418 <insn> */
419
420 static symbolS *last_label_seen;
421
422 /* Literal pool structure. Held on a per-section
423 and per-sub-section basis. */
424
425 #define MAX_LITERAL_POOL_SIZE 1024
426 typedef struct literal_expression
427 {
428 expressionS exp;
429 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
430 LITTLENUM_TYPE * bignum;
431 } literal_expression;
432
433 typedef struct literal_pool
434 {
435 literal_expression literals[MAX_LITERAL_POOL_SIZE];
436 unsigned int next_free_entry;
437 unsigned int id;
438 symbolS *symbol;
439 segT section;
440 subsegT sub_section;
441 int size;
442 struct literal_pool *next;
443 } literal_pool;
444
445 /* Pointer to a linked list of literal pools. */
446 static literal_pool *list_of_pools = NULL;
447 \f
448 /* Pure syntax. */
449
450 /* This array holds the chars that always start a comment. If the
451 pre-processor is disabled, these aren't very useful. */
452 const char comment_chars[] = "";
453
454 /* This array holds the chars that only start a comment at the beginning of
455 a line. If the line seems to have the form '# 123 filename'
456 .line and .file directives will appear in the pre-processed output. */
457 /* Note that input_file.c hand checks for '#' at the beginning of the
458 first line of the input file. This is because the compiler outputs
459 #NO_APP at the beginning of its output. */
460 /* Also note that comments like this one will always work. */
461 const char line_comment_chars[] = "#";
462
463 const char line_separator_chars[] = ";";
464
465 /* Chars that can be used to separate mant
466 from exp in floating point numbers. */
467 const char EXP_CHARS[] = "eE";
468
469 /* Chars that mean this number is a floating point constant. */
470 /* As in 0f12.456 */
471 /* or 0d1.2345e12 */
472
473 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
474
475 /* Prefix character that indicates the start of an immediate value. */
476 #define is_immediate_prefix(C) ((C) == '#')
477
478 /* Separator character handling. */
479
480 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
481
482 static inline bfd_boolean
483 skip_past_char (char **str, char c)
484 {
485 if (**str == c)
486 {
487 (*str)++;
488 return TRUE;
489 }
490 else
491 return FALSE;
492 }
493
494 #define skip_past_comma(str) skip_past_char (str, ',')
495
496 /* Arithmetic expressions (possibly involving symbols). */
497
498 static bfd_boolean in_my_get_expression_p = FALSE;
499
500 /* Third argument to my_get_expression. */
501 #define GE_NO_PREFIX 0
502 #define GE_OPT_PREFIX 1
503
504 /* Return TRUE if the string pointed by *STR is successfully parsed
505 as an valid expression; *EP will be filled with the information of
506 such an expression. Otherwise return FALSE. */
507
508 static bfd_boolean
509 my_get_expression (expressionS * ep, char **str, int prefix_mode,
510 int reject_absent)
511 {
512 char *save_in;
513 segT seg;
514 int prefix_present_p = 0;
515
516 switch (prefix_mode)
517 {
518 case GE_NO_PREFIX:
519 break;
520 case GE_OPT_PREFIX:
521 if (is_immediate_prefix (**str))
522 {
523 (*str)++;
524 prefix_present_p = 1;
525 }
526 break;
527 default:
528 abort ();
529 }
530
531 memset (ep, 0, sizeof (expressionS));
532
533 save_in = input_line_pointer;
534 input_line_pointer = *str;
535 in_my_get_expression_p = TRUE;
536 seg = expression (ep);
537 in_my_get_expression_p = FALSE;
538
539 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
540 {
541 /* We found a bad expression in md_operand(). */
542 *str = input_line_pointer;
543 input_line_pointer = save_in;
544 if (prefix_present_p && ! error_p ())
545 set_fatal_syntax_error (_("bad expression"));
546 else
547 set_first_syntax_error (_("bad expression"));
548 return FALSE;
549 }
550
551 #ifdef OBJ_AOUT
552 if (seg != absolute_section
553 && seg != text_section
554 && seg != data_section
555 && seg != bss_section && seg != undefined_section)
556 {
557 set_syntax_error (_("bad segment"));
558 *str = input_line_pointer;
559 input_line_pointer = save_in;
560 return FALSE;
561 }
562 #else
563 (void) seg;
564 #endif
565
566 *str = input_line_pointer;
567 input_line_pointer = save_in;
568 return TRUE;
569 }
570
571 /* Turn a string in input_line_pointer into a floating point constant
572 of type TYPE, and store the appropriate bytes in *LITP. The number
573 of LITTLENUMS emitted is stored in *SIZEP. An error message is
574 returned, or NULL on OK. */
575
576 const char *
577 md_atof (int type, char *litP, int *sizeP)
578 {
579 return ieee_md_atof (type, litP, sizeP, target_big_endian);
580 }
581
582 /* We handle all bad expressions here, so that we can report the faulty
583 instruction in the error message. */
584 void
585 md_operand (expressionS * exp)
586 {
587 if (in_my_get_expression_p)
588 exp->X_op = O_illegal;
589 }
590
591 /* Immediate values. */
592
593 /* Errors may be set multiple times during parsing or bit encoding
594 (particularly in the Neon bits), but usually the earliest error which is set
595 will be the most meaningful. Avoid overwriting it with later (cascading)
596 errors by calling this function. */
597
598 static void
599 first_error (const char *error)
600 {
601 if (! error_p ())
602 set_syntax_error (error);
603 }
604
605 /* Similiar to first_error, but this function accepts formatted error
606 message. */
607 static void
608 first_error_fmt (const char *format, ...)
609 {
610 va_list args;
611 enum
612 { size = 100 };
613 /* N.B. this single buffer will not cause error messages for different
614 instructions to pollute each other; this is because at the end of
615 processing of each assembly line, error message if any will be
616 collected by as_bad. */
617 static char buffer[size];
618
619 if (! error_p ())
620 {
621 int ret ATTRIBUTE_UNUSED;
622 va_start (args, format);
623 ret = vsnprintf (buffer, size, format, args);
624 know (ret <= size - 1 && ret >= 0);
625 va_end (args);
626 set_syntax_error (buffer);
627 }
628 }
629
630 /* Register parsing. */
631
632 /* Generic register parser which is called by other specialized
633 register parsers.
634 CCP points to what should be the beginning of a register name.
635 If it is indeed a valid register name, advance CCP over it and
636 return the reg_entry structure; otherwise return NULL.
637 It does not issue diagnostics. */
638
639 static reg_entry *
640 parse_reg (char **ccp)
641 {
642 char *start = *ccp;
643 char *p;
644 reg_entry *reg;
645
646 #ifdef REGISTER_PREFIX
647 if (*start != REGISTER_PREFIX)
648 return NULL;
649 start++;
650 #endif
651
652 p = start;
653 if (!ISALPHA (*p) || !is_name_beginner (*p))
654 return NULL;
655
656 do
657 p++;
658 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
659
660 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
661
662 if (!reg)
663 return NULL;
664
665 *ccp = p;
666 return reg;
667 }
668
669 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
670 return FALSE. */
671 static bfd_boolean
672 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
673 {
674 if (reg->type == type)
675 return TRUE;
676
677 switch (type)
678 {
679 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
680 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
681 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
682 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
683 case REG_TYPE_VN: /* Vector register. */
684 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
685 return ((reg_type_masks[reg->type] & reg_type_masks[type])
686 == reg_type_masks[reg->type]);
687 default:
688 as_fatal ("unhandled type %d", type);
689 abort ();
690 }
691 }
692
693 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
694 Return the register number otherwise. *ISREG32 is set to one if the
695 register is 32-bit wide; *ISREGZERO is set to one if the register is
696 of type Z_32 or Z_64.
697 Note that this function does not issue any diagnostics. */
698
699 static int
700 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
701 int *isreg32, int *isregzero)
702 {
703 char *str = *ccp;
704 const reg_entry *reg = parse_reg (&str);
705
706 if (reg == NULL)
707 return PARSE_FAIL;
708
709 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
710 return PARSE_FAIL;
711
712 switch (reg->type)
713 {
714 case REG_TYPE_SP_32:
715 case REG_TYPE_SP_64:
716 if (reject_sp)
717 return PARSE_FAIL;
718 *isreg32 = reg->type == REG_TYPE_SP_32;
719 *isregzero = 0;
720 break;
721 case REG_TYPE_R_32:
722 case REG_TYPE_R_64:
723 *isreg32 = reg->type == REG_TYPE_R_32;
724 *isregzero = 0;
725 break;
726 case REG_TYPE_Z_32:
727 case REG_TYPE_Z_64:
728 if (reject_rz)
729 return PARSE_FAIL;
730 *isreg32 = reg->type == REG_TYPE_Z_32;
731 *isregzero = 1;
732 break;
733 default:
734 return PARSE_FAIL;
735 }
736
737 *ccp = str;
738
739 return reg->number;
740 }
741
742 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
743 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
744 otherwise return FALSE.
745
746 Accept only one occurrence of:
747 8b 16b 2h 4h 8h 2s 4s 1d 2d
748 b h s d q */
749 static bfd_boolean
750 parse_vector_type_for_operand (struct vector_type_el *parsed_type, char **str)
751 {
752 char *ptr = *str;
753 unsigned width;
754 unsigned element_size;
755 enum vector_el_type type;
756
757 /* skip '.' */
758 ptr++;
759
760 if (!ISDIGIT (*ptr))
761 {
762 width = 0;
763 goto elt_size;
764 }
765 width = strtoul (ptr, &ptr, 10);
766 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
767 {
768 first_error_fmt (_("bad size %d in vector width specifier"), width);
769 return FALSE;
770 }
771
772 elt_size:
773 switch (TOLOWER (*ptr))
774 {
775 case 'b':
776 type = NT_b;
777 element_size = 8;
778 break;
779 case 'h':
780 type = NT_h;
781 element_size = 16;
782 break;
783 case 's':
784 type = NT_s;
785 element_size = 32;
786 break;
787 case 'd':
788 type = NT_d;
789 element_size = 64;
790 break;
791 case 'q':
792 if (width == 1)
793 {
794 type = NT_q;
795 element_size = 128;
796 break;
797 }
798 /* fall through. */
799 default:
800 if (*ptr != '\0')
801 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
802 else
803 first_error (_("missing element size"));
804 return FALSE;
805 }
806 if (width != 0 && width * element_size != 64 && width * element_size != 128
807 && !(width == 2 && element_size == 16))
808 {
809 first_error_fmt (_
810 ("invalid element size %d and vector size combination %c"),
811 width, *ptr);
812 return FALSE;
813 }
814 ptr++;
815
816 parsed_type->type = type;
817 parsed_type->width = width;
818
819 *str = ptr;
820
821 return TRUE;
822 }
823
824 /* Parse a register of the type TYPE.
825
826 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
827 name or the parsed register is not of TYPE.
828
829 Otherwise return the register number, and optionally fill in the actual
830 type of the register in *RTYPE when multiple alternatives were given, and
831 return the register shape and element index information in *TYPEINFO.
832
833 IN_REG_LIST should be set with TRUE if the caller is parsing a register
834 list. */
835
836 static int
837 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
838 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
839 {
840 char *str = *ccp;
841 const reg_entry *reg = parse_reg (&str);
842 struct vector_type_el atype;
843 struct vector_type_el parsetype;
844 bfd_boolean is_typed_vecreg = FALSE;
845
846 atype.defined = 0;
847 atype.type = NT_invtype;
848 atype.width = -1;
849 atype.index = 0;
850
851 if (reg == NULL)
852 {
853 if (typeinfo)
854 *typeinfo = atype;
855 set_default_error ();
856 return PARSE_FAIL;
857 }
858
859 if (! aarch64_check_reg_type (reg, type))
860 {
861 DEBUG_TRACE ("reg type check failed");
862 set_default_error ();
863 return PARSE_FAIL;
864 }
865 type = reg->type;
866
867 if (type == REG_TYPE_VN && *str == '.')
868 {
869 if (!parse_vector_type_for_operand (&parsetype, &str))
870 return PARSE_FAIL;
871
872 /* Register if of the form Vn.[bhsdq]. */
873 is_typed_vecreg = TRUE;
874
875 if (parsetype.width == 0)
876 /* Expect index. In the new scheme we cannot have
877 Vn.[bhsdq] represent a scalar. Therefore any
878 Vn.[bhsdq] should have an index following it.
879 Except in reglists ofcourse. */
880 atype.defined |= NTA_HASINDEX;
881 else
882 atype.defined |= NTA_HASTYPE;
883
884 atype.type = parsetype.type;
885 atype.width = parsetype.width;
886 }
887
888 if (skip_past_char (&str, '['))
889 {
890 expressionS exp;
891
892 /* Reject Sn[index] syntax. */
893 if (!is_typed_vecreg)
894 {
895 first_error (_("this type of register can't be indexed"));
896 return PARSE_FAIL;
897 }
898
899 if (in_reg_list == TRUE)
900 {
901 first_error (_("index not allowed inside register list"));
902 return PARSE_FAIL;
903 }
904
905 atype.defined |= NTA_HASINDEX;
906
907 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
908
909 if (exp.X_op != O_constant)
910 {
911 first_error (_("constant expression required"));
912 return PARSE_FAIL;
913 }
914
915 if (! skip_past_char (&str, ']'))
916 return PARSE_FAIL;
917
918 atype.index = exp.X_add_number;
919 }
920 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
921 {
922 /* Indexed vector register expected. */
923 first_error (_("indexed vector register expected"));
924 return PARSE_FAIL;
925 }
926
927 /* A vector reg Vn should be typed or indexed. */
928 if (type == REG_TYPE_VN && atype.defined == 0)
929 {
930 first_error (_("invalid use of vector register"));
931 }
932
933 if (typeinfo)
934 *typeinfo = atype;
935
936 if (rtype)
937 *rtype = type;
938
939 *ccp = str;
940
941 return reg->number;
942 }
943
944 /* Parse register.
945
946 Return the register number on success; return PARSE_FAIL otherwise.
947
948 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
949 the register (e.g. NEON double or quad reg when either has been requested).
950
951 If this is a NEON vector register with additional type information, fill
952 in the struct pointed to by VECTYPE (if non-NULL).
953
954 This parser does not handle register list. */
955
956 static int
957 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
958 aarch64_reg_type *rtype, struct vector_type_el *vectype)
959 {
960 struct vector_type_el atype;
961 char *str = *ccp;
962 int reg = parse_typed_reg (&str, type, rtype, &atype,
963 /*in_reg_list= */ FALSE);
964
965 if (reg == PARSE_FAIL)
966 return PARSE_FAIL;
967
968 if (vectype)
969 *vectype = atype;
970
971 *ccp = str;
972
973 return reg;
974 }
975
976 static inline bfd_boolean
977 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
978 {
979 return
980 e1.type == e2.type
981 && e1.defined == e2.defined
982 && e1.width == e2.width && e1.index == e2.index;
983 }
984
985 /* This function parses a list of vector registers of type TYPE.
986 On success, it returns the parsed register list information in the
987 following encoded format:
988
989 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
990 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
991
992 The information of the register shape and/or index is returned in
993 *VECTYPE.
994
995 It returns PARSE_FAIL if the register list is invalid.
996
997 The list contains one to four registers.
998 Each register can be one of:
999 <Vt>.<T>[<index>]
1000 <Vt>.<T>
1001 All <T> should be identical.
1002 All <index> should be identical.
1003 There are restrictions on <Vt> numbers which are checked later
1004 (by reg_list_valid_p). */
1005
1006 static int
1007 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1008 struct vector_type_el *vectype)
1009 {
1010 char *str = *ccp;
1011 int nb_regs;
1012 struct vector_type_el typeinfo, typeinfo_first;
1013 int val, val_range;
1014 int in_range;
1015 int ret_val;
1016 int i;
1017 bfd_boolean error = FALSE;
1018 bfd_boolean expect_index = FALSE;
1019
1020 if (*str != '{')
1021 {
1022 set_syntax_error (_("expecting {"));
1023 return PARSE_FAIL;
1024 }
1025 str++;
1026
1027 nb_regs = 0;
1028 typeinfo_first.defined = 0;
1029 typeinfo_first.type = NT_invtype;
1030 typeinfo_first.width = -1;
1031 typeinfo_first.index = 0;
1032 ret_val = 0;
1033 val = -1;
1034 val_range = -1;
1035 in_range = 0;
1036 do
1037 {
1038 if (in_range)
1039 {
1040 str++; /* skip over '-' */
1041 val_range = val;
1042 }
1043 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1044 /*in_reg_list= */ TRUE);
1045 if (val == PARSE_FAIL)
1046 {
1047 set_first_syntax_error (_("invalid vector register in list"));
1048 error = TRUE;
1049 continue;
1050 }
1051 /* reject [bhsd]n */
1052 if (typeinfo.defined == 0)
1053 {
1054 set_first_syntax_error (_("invalid scalar register in list"));
1055 error = TRUE;
1056 continue;
1057 }
1058
1059 if (typeinfo.defined & NTA_HASINDEX)
1060 expect_index = TRUE;
1061
1062 if (in_range)
1063 {
1064 if (val < val_range)
1065 {
1066 set_first_syntax_error
1067 (_("invalid range in vector register list"));
1068 error = TRUE;
1069 }
1070 val_range++;
1071 }
1072 else
1073 {
1074 val_range = val;
1075 if (nb_regs == 0)
1076 typeinfo_first = typeinfo;
1077 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1078 {
1079 set_first_syntax_error
1080 (_("type mismatch in vector register list"));
1081 error = TRUE;
1082 }
1083 }
1084 if (! error)
1085 for (i = val_range; i <= val; i++)
1086 {
1087 ret_val |= i << (5 * nb_regs);
1088 nb_regs++;
1089 }
1090 in_range = 0;
1091 }
1092 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1093
1094 skip_whitespace (str);
1095 if (*str != '}')
1096 {
1097 set_first_syntax_error (_("end of vector register list not found"));
1098 error = TRUE;
1099 }
1100 str++;
1101
1102 skip_whitespace (str);
1103
1104 if (expect_index)
1105 {
1106 if (skip_past_char (&str, '['))
1107 {
1108 expressionS exp;
1109
1110 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1111 if (exp.X_op != O_constant)
1112 {
1113 set_first_syntax_error (_("constant expression required."));
1114 error = TRUE;
1115 }
1116 if (! skip_past_char (&str, ']'))
1117 error = TRUE;
1118 else
1119 typeinfo_first.index = exp.X_add_number;
1120 }
1121 else
1122 {
1123 set_first_syntax_error (_("expected index"));
1124 error = TRUE;
1125 }
1126 }
1127
1128 if (nb_regs > 4)
1129 {
1130 set_first_syntax_error (_("too many registers in vector register list"));
1131 error = TRUE;
1132 }
1133 else if (nb_regs == 0)
1134 {
1135 set_first_syntax_error (_("empty vector register list"));
1136 error = TRUE;
1137 }
1138
1139 *ccp = str;
1140 if (! error)
1141 *vectype = typeinfo_first;
1142
1143 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1144 }
1145
1146 /* Directives: register aliases. */
1147
1148 static reg_entry *
1149 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1150 {
1151 reg_entry *new;
1152 const char *name;
1153
1154 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1155 {
1156 if (new->builtin)
1157 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1158 str);
1159
1160 /* Only warn about a redefinition if it's not defined as the
1161 same register. */
1162 else if (new->number != number || new->type != type)
1163 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1164
1165 return NULL;
1166 }
1167
1168 name = xstrdup (str);
1169 new = XNEW (reg_entry);
1170
1171 new->name = name;
1172 new->number = number;
1173 new->type = type;
1174 new->builtin = FALSE;
1175
1176 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1177 abort ();
1178
1179 return new;
1180 }
1181
1182 /* Look for the .req directive. This is of the form:
1183
1184 new_register_name .req existing_register_name
1185
1186 If we find one, or if it looks sufficiently like one that we want to
1187 handle any error here, return TRUE. Otherwise return FALSE. */
1188
1189 static bfd_boolean
1190 create_register_alias (char *newname, char *p)
1191 {
1192 const reg_entry *old;
1193 char *oldname, *nbuf;
1194 size_t nlen;
1195
1196 /* The input scrubber ensures that whitespace after the mnemonic is
1197 collapsed to single spaces. */
1198 oldname = p;
1199 if (strncmp (oldname, " .req ", 6) != 0)
1200 return FALSE;
1201
1202 oldname += 6;
1203 if (*oldname == '\0')
1204 return FALSE;
1205
1206 old = hash_find (aarch64_reg_hsh, oldname);
1207 if (!old)
1208 {
1209 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1210 return TRUE;
1211 }
1212
1213 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1214 the desired alias name, and p points to its end. If not, then
1215 the desired alias name is in the global original_case_string. */
1216 #ifdef TC_CASE_SENSITIVE
1217 nlen = p - newname;
1218 #else
1219 newname = original_case_string;
1220 nlen = strlen (newname);
1221 #endif
1222
1223 nbuf = xmemdup0 (newname, nlen);
1224
1225 /* Create aliases under the new name as stated; an all-lowercase
1226 version of the new name; and an all-uppercase version of the new
1227 name. */
1228 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1229 {
1230 for (p = nbuf; *p; p++)
1231 *p = TOUPPER (*p);
1232
1233 if (strncmp (nbuf, newname, nlen))
1234 {
1235 /* If this attempt to create an additional alias fails, do not bother
1236 trying to create the all-lower case alias. We will fail and issue
1237 a second, duplicate error message. This situation arises when the
1238 programmer does something like:
1239 foo .req r0
1240 Foo .req r1
1241 The second .req creates the "Foo" alias but then fails to create
1242 the artificial FOO alias because it has already been created by the
1243 first .req. */
1244 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1245 {
1246 free (nbuf);
1247 return TRUE;
1248 }
1249 }
1250
1251 for (p = nbuf; *p; p++)
1252 *p = TOLOWER (*p);
1253
1254 if (strncmp (nbuf, newname, nlen))
1255 insert_reg_alias (nbuf, old->number, old->type);
1256 }
1257
1258 free (nbuf);
1259 return TRUE;
1260 }
1261
1262 /* Should never be called, as .req goes between the alias and the
1263 register name, not at the beginning of the line. */
1264 static void
1265 s_req (int a ATTRIBUTE_UNUSED)
1266 {
1267 as_bad (_("invalid syntax for .req directive"));
1268 }
1269
1270 /* The .unreq directive deletes an alias which was previously defined
1271 by .req. For example:
1272
1273 my_alias .req r11
1274 .unreq my_alias */
1275
1276 static void
1277 s_unreq (int a ATTRIBUTE_UNUSED)
1278 {
1279 char *name;
1280 char saved_char;
1281
1282 name = input_line_pointer;
1283
1284 while (*input_line_pointer != 0
1285 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1286 ++input_line_pointer;
1287
1288 saved_char = *input_line_pointer;
1289 *input_line_pointer = 0;
1290
1291 if (!*name)
1292 as_bad (_("invalid syntax for .unreq directive"));
1293 else
1294 {
1295 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1296
1297 if (!reg)
1298 as_bad (_("unknown register alias '%s'"), name);
1299 else if (reg->builtin)
1300 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1301 name);
1302 else
1303 {
1304 char *p;
1305 char *nbuf;
1306
1307 hash_delete (aarch64_reg_hsh, name, FALSE);
1308 free ((char *) reg->name);
1309 free (reg);
1310
1311 /* Also locate the all upper case and all lower case versions.
1312 Do not complain if we cannot find one or the other as it
1313 was probably deleted above. */
1314
1315 nbuf = strdup (name);
1316 for (p = nbuf; *p; p++)
1317 *p = TOUPPER (*p);
1318 reg = hash_find (aarch64_reg_hsh, nbuf);
1319 if (reg)
1320 {
1321 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1322 free ((char *) reg->name);
1323 free (reg);
1324 }
1325
1326 for (p = nbuf; *p; p++)
1327 *p = TOLOWER (*p);
1328 reg = hash_find (aarch64_reg_hsh, nbuf);
1329 if (reg)
1330 {
1331 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1332 free ((char *) reg->name);
1333 free (reg);
1334 }
1335
1336 free (nbuf);
1337 }
1338 }
1339
1340 *input_line_pointer = saved_char;
1341 demand_empty_rest_of_line ();
1342 }
1343
1344 /* Directives: Instruction set selection. */
1345
1346 #ifdef OBJ_ELF
1347 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1348 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1349 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1350 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1351
1352 /* Create a new mapping symbol for the transition to STATE. */
1353
1354 static void
1355 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1356 {
1357 symbolS *symbolP;
1358 const char *symname;
1359 int type;
1360
1361 switch (state)
1362 {
1363 case MAP_DATA:
1364 symname = "$d";
1365 type = BSF_NO_FLAGS;
1366 break;
1367 case MAP_INSN:
1368 symname = "$x";
1369 type = BSF_NO_FLAGS;
1370 break;
1371 default:
1372 abort ();
1373 }
1374
1375 symbolP = symbol_new (symname, now_seg, value, frag);
1376 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1377
1378 /* Save the mapping symbols for future reference. Also check that
1379 we do not place two mapping symbols at the same offset within a
1380 frag. We'll handle overlap between frags in
1381 check_mapping_symbols.
1382
1383 If .fill or other data filling directive generates zero sized data,
1384 the mapping symbol for the following code will have the same value
1385 as the one generated for the data filling directive. In this case,
1386 we replace the old symbol with the new one at the same address. */
1387 if (value == 0)
1388 {
1389 if (frag->tc_frag_data.first_map != NULL)
1390 {
1391 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1392 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1393 &symbol_lastP);
1394 }
1395 frag->tc_frag_data.first_map = symbolP;
1396 }
1397 if (frag->tc_frag_data.last_map != NULL)
1398 {
1399 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1400 S_GET_VALUE (symbolP));
1401 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1402 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1403 &symbol_lastP);
1404 }
1405 frag->tc_frag_data.last_map = symbolP;
1406 }
1407
1408 /* We must sometimes convert a region marked as code to data during
1409 code alignment, if an odd number of bytes have to be padded. The
1410 code mapping symbol is pushed to an aligned address. */
1411
1412 static void
1413 insert_data_mapping_symbol (enum mstate state,
1414 valueT value, fragS * frag, offsetT bytes)
1415 {
1416 /* If there was already a mapping symbol, remove it. */
1417 if (frag->tc_frag_data.last_map != NULL
1418 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1419 frag->fr_address + value)
1420 {
1421 symbolS *symp = frag->tc_frag_data.last_map;
1422
1423 if (value == 0)
1424 {
1425 know (frag->tc_frag_data.first_map == symp);
1426 frag->tc_frag_data.first_map = NULL;
1427 }
1428 frag->tc_frag_data.last_map = NULL;
1429 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1430 }
1431
1432 make_mapping_symbol (MAP_DATA, value, frag);
1433 make_mapping_symbol (state, value + bytes, frag);
1434 }
1435
1436 static void mapping_state_2 (enum mstate state, int max_chars);
1437
1438 /* Set the mapping state to STATE. Only call this when about to
1439 emit some STATE bytes to the file. */
1440
1441 void
1442 mapping_state (enum mstate state)
1443 {
1444 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1445
1446 if (state == MAP_INSN)
1447 /* AArch64 instructions require 4-byte alignment. When emitting
1448 instructions into any section, record the appropriate section
1449 alignment. */
1450 record_alignment (now_seg, 2);
1451
1452 if (mapstate == state)
1453 /* The mapping symbol has already been emitted.
1454 There is nothing else to do. */
1455 return;
1456
1457 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1458 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1459 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1460 evaluated later in the next else. */
1461 return;
1462 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1463 {
1464 /* Only add the symbol if the offset is > 0:
1465 if we're at the first frag, check it's size > 0;
1466 if we're not at the first frag, then for sure
1467 the offset is > 0. */
1468 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1469 const int add_symbol = (frag_now != frag_first)
1470 || (frag_now_fix () > 0);
1471
1472 if (add_symbol)
1473 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1474 }
1475 #undef TRANSITION
1476
1477 mapping_state_2 (state, 0);
1478 }
1479
1480 /* Same as mapping_state, but MAX_CHARS bytes have already been
1481 allocated. Put the mapping symbol that far back. */
1482
1483 static void
1484 mapping_state_2 (enum mstate state, int max_chars)
1485 {
1486 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1487
1488 if (!SEG_NORMAL (now_seg))
1489 return;
1490
1491 if (mapstate == state)
1492 /* The mapping symbol has already been emitted.
1493 There is nothing else to do. */
1494 return;
1495
1496 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1497 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1498 }
1499 #else
1500 #define mapping_state(x) /* nothing */
1501 #define mapping_state_2(x, y) /* nothing */
1502 #endif
1503
1504 /* Directives: sectioning and alignment. */
1505
1506 static void
1507 s_bss (int ignore ATTRIBUTE_UNUSED)
1508 {
1509 /* We don't support putting frags in the BSS segment, we fake it by
1510 marking in_bss, then looking at s_skip for clues. */
1511 subseg_set (bss_section, 0);
1512 demand_empty_rest_of_line ();
1513 mapping_state (MAP_DATA);
1514 }
1515
1516 static void
1517 s_even (int ignore ATTRIBUTE_UNUSED)
1518 {
1519 /* Never make frag if expect extra pass. */
1520 if (!need_pass_2)
1521 frag_align (1, 0, 0);
1522
1523 record_alignment (now_seg, 1);
1524
1525 demand_empty_rest_of_line ();
1526 }
1527
1528 /* Directives: Literal pools. */
1529
1530 static literal_pool *
1531 find_literal_pool (int size)
1532 {
1533 literal_pool *pool;
1534
1535 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1536 {
1537 if (pool->section == now_seg
1538 && pool->sub_section == now_subseg && pool->size == size)
1539 break;
1540 }
1541
1542 return pool;
1543 }
1544
1545 static literal_pool *
1546 find_or_make_literal_pool (int size)
1547 {
1548 /* Next literal pool ID number. */
1549 static unsigned int latest_pool_num = 1;
1550 literal_pool *pool;
1551
1552 pool = find_literal_pool (size);
1553
1554 if (pool == NULL)
1555 {
1556 /* Create a new pool. */
1557 pool = XNEW (literal_pool);
1558 if (!pool)
1559 return NULL;
1560
1561 /* Currently we always put the literal pool in the current text
1562 section. If we were generating "small" model code where we
1563 knew that all code and initialised data was within 1MB then
1564 we could output literals to mergeable, read-only data
1565 sections. */
1566
1567 pool->next_free_entry = 0;
1568 pool->section = now_seg;
1569 pool->sub_section = now_subseg;
1570 pool->size = size;
1571 pool->next = list_of_pools;
1572 pool->symbol = NULL;
1573
1574 /* Add it to the list. */
1575 list_of_pools = pool;
1576 }
1577
1578 /* New pools, and emptied pools, will have a NULL symbol. */
1579 if (pool->symbol == NULL)
1580 {
1581 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1582 (valueT) 0, &zero_address_frag);
1583 pool->id = latest_pool_num++;
1584 }
1585
1586 /* Done. */
1587 return pool;
1588 }
1589
1590 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1591 Return TRUE on success, otherwise return FALSE. */
1592 static bfd_boolean
1593 add_to_lit_pool (expressionS *exp, int size)
1594 {
1595 literal_pool *pool;
1596 unsigned int entry;
1597
1598 pool = find_or_make_literal_pool (size);
1599
1600 /* Check if this literal value is already in the pool. */
1601 for (entry = 0; entry < pool->next_free_entry; entry++)
1602 {
1603 expressionS * litexp = & pool->literals[entry].exp;
1604
1605 if ((litexp->X_op == exp->X_op)
1606 && (exp->X_op == O_constant)
1607 && (litexp->X_add_number == exp->X_add_number)
1608 && (litexp->X_unsigned == exp->X_unsigned))
1609 break;
1610
1611 if ((litexp->X_op == exp->X_op)
1612 && (exp->X_op == O_symbol)
1613 && (litexp->X_add_number == exp->X_add_number)
1614 && (litexp->X_add_symbol == exp->X_add_symbol)
1615 && (litexp->X_op_symbol == exp->X_op_symbol))
1616 break;
1617 }
1618
1619 /* Do we need to create a new entry? */
1620 if (entry == pool->next_free_entry)
1621 {
1622 if (entry >= MAX_LITERAL_POOL_SIZE)
1623 {
1624 set_syntax_error (_("literal pool overflow"));
1625 return FALSE;
1626 }
1627
1628 pool->literals[entry].exp = *exp;
1629 pool->next_free_entry += 1;
1630 if (exp->X_op == O_big)
1631 {
1632 /* PR 16688: Bignums are held in a single global array. We must
1633 copy and preserve that value now, before it is overwritten. */
1634 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1635 exp->X_add_number);
1636 memcpy (pool->literals[entry].bignum, generic_bignum,
1637 CHARS_PER_LITTLENUM * exp->X_add_number);
1638 }
1639 else
1640 pool->literals[entry].bignum = NULL;
1641 }
1642
1643 exp->X_op = O_symbol;
1644 exp->X_add_number = ((int) entry) * size;
1645 exp->X_add_symbol = pool->symbol;
1646
1647 return TRUE;
1648 }
1649
1650 /* Can't use symbol_new here, so have to create a symbol and then at
1651 a later date assign it a value. Thats what these functions do. */
1652
1653 static void
1654 symbol_locate (symbolS * symbolP,
1655 const char *name,/* It is copied, the caller can modify. */
1656 segT segment, /* Segment identifier (SEG_<something>). */
1657 valueT valu, /* Symbol value. */
1658 fragS * frag) /* Associated fragment. */
1659 {
1660 size_t name_length;
1661 char *preserved_copy_of_name;
1662
1663 name_length = strlen (name) + 1; /* +1 for \0. */
1664 obstack_grow (&notes, name, name_length);
1665 preserved_copy_of_name = obstack_finish (&notes);
1666
1667 #ifdef tc_canonicalize_symbol_name
1668 preserved_copy_of_name =
1669 tc_canonicalize_symbol_name (preserved_copy_of_name);
1670 #endif
1671
1672 S_SET_NAME (symbolP, preserved_copy_of_name);
1673
1674 S_SET_SEGMENT (symbolP, segment);
1675 S_SET_VALUE (symbolP, valu);
1676 symbol_clear_list_pointers (symbolP);
1677
1678 symbol_set_frag (symbolP, frag);
1679
1680 /* Link to end of symbol chain. */
1681 {
1682 extern int symbol_table_frozen;
1683
1684 if (symbol_table_frozen)
1685 abort ();
1686 }
1687
1688 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1689
1690 obj_symbol_new_hook (symbolP);
1691
1692 #ifdef tc_symbol_new_hook
1693 tc_symbol_new_hook (symbolP);
1694 #endif
1695
1696 #ifdef DEBUG_SYMS
1697 verify_symbol_chain (symbol_rootP, symbol_lastP);
1698 #endif /* DEBUG_SYMS */
1699 }
1700
1701
1702 static void
1703 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1704 {
1705 unsigned int entry;
1706 literal_pool *pool;
1707 char sym_name[20];
1708 int align;
1709
1710 for (align = 2; align <= 4; align++)
1711 {
1712 int size = 1 << align;
1713
1714 pool = find_literal_pool (size);
1715 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1716 continue;
1717
1718 /* Align pool as you have word accesses.
1719 Only make a frag if we have to. */
1720 if (!need_pass_2)
1721 frag_align (align, 0, 0);
1722
1723 mapping_state (MAP_DATA);
1724
1725 record_alignment (now_seg, align);
1726
1727 sprintf (sym_name, "$$lit_\002%x", pool->id);
1728
1729 symbol_locate (pool->symbol, sym_name, now_seg,
1730 (valueT) frag_now_fix (), frag_now);
1731 symbol_table_insert (pool->symbol);
1732
1733 for (entry = 0; entry < pool->next_free_entry; entry++)
1734 {
1735 expressionS * exp = & pool->literals[entry].exp;
1736
1737 if (exp->X_op == O_big)
1738 {
1739 /* PR 16688: Restore the global bignum value. */
1740 gas_assert (pool->literals[entry].bignum != NULL);
1741 memcpy (generic_bignum, pool->literals[entry].bignum,
1742 CHARS_PER_LITTLENUM * exp->X_add_number);
1743 }
1744
1745 /* First output the expression in the instruction to the pool. */
1746 emit_expr (exp, size); /* .word|.xword */
1747
1748 if (exp->X_op == O_big)
1749 {
1750 free (pool->literals[entry].bignum);
1751 pool->literals[entry].bignum = NULL;
1752 }
1753 }
1754
1755 /* Mark the pool as empty. */
1756 pool->next_free_entry = 0;
1757 pool->symbol = NULL;
1758 }
1759 }
1760
1761 #ifdef OBJ_ELF
1762 /* Forward declarations for functions below, in the MD interface
1763 section. */
1764 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1765 static struct reloc_table_entry * find_reloc_table_entry (char **);
1766
1767 /* Directives: Data. */
1768 /* N.B. the support for relocation suffix in this directive needs to be
1769 implemented properly. */
1770
1771 static void
1772 s_aarch64_elf_cons (int nbytes)
1773 {
1774 expressionS exp;
1775
1776 #ifdef md_flush_pending_output
1777 md_flush_pending_output ();
1778 #endif
1779
1780 if (is_it_end_of_statement ())
1781 {
1782 demand_empty_rest_of_line ();
1783 return;
1784 }
1785
1786 #ifdef md_cons_align
1787 md_cons_align (nbytes);
1788 #endif
1789
1790 mapping_state (MAP_DATA);
1791 do
1792 {
1793 struct reloc_table_entry *reloc;
1794
1795 expression (&exp);
1796
1797 if (exp.X_op != O_symbol)
1798 emit_expr (&exp, (unsigned int) nbytes);
1799 else
1800 {
1801 skip_past_char (&input_line_pointer, '#');
1802 if (skip_past_char (&input_line_pointer, ':'))
1803 {
1804 reloc = find_reloc_table_entry (&input_line_pointer);
1805 if (reloc == NULL)
1806 as_bad (_("unrecognized relocation suffix"));
1807 else
1808 as_bad (_("unimplemented relocation suffix"));
1809 ignore_rest_of_line ();
1810 return;
1811 }
1812 else
1813 emit_expr (&exp, (unsigned int) nbytes);
1814 }
1815 }
1816 while (*input_line_pointer++ == ',');
1817
1818 /* Put terminator back into stream. */
1819 input_line_pointer--;
1820 demand_empty_rest_of_line ();
1821 }
1822
1823 #endif /* OBJ_ELF */
1824
1825 /* Output a 32-bit word, but mark as an instruction. */
1826
1827 static void
1828 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1829 {
1830 expressionS exp;
1831
1832 #ifdef md_flush_pending_output
1833 md_flush_pending_output ();
1834 #endif
1835
1836 if (is_it_end_of_statement ())
1837 {
1838 demand_empty_rest_of_line ();
1839 return;
1840 }
1841
1842 /* Sections are assumed to start aligned. In executable section, there is no
1843 MAP_DATA symbol pending. So we only align the address during
1844 MAP_DATA --> MAP_INSN transition.
1845 For other sections, this is not guaranteed. */
1846 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1847 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1848 frag_align_code (2, 0);
1849
1850 #ifdef OBJ_ELF
1851 mapping_state (MAP_INSN);
1852 #endif
1853
1854 do
1855 {
1856 expression (&exp);
1857 if (exp.X_op != O_constant)
1858 {
1859 as_bad (_("constant expression required"));
1860 ignore_rest_of_line ();
1861 return;
1862 }
1863
1864 if (target_big_endian)
1865 {
1866 unsigned int val = exp.X_add_number;
1867 exp.X_add_number = SWAP_32 (val);
1868 }
1869 emit_expr (&exp, 4);
1870 }
1871 while (*input_line_pointer++ == ',');
1872
1873 /* Put terminator back into stream. */
1874 input_line_pointer--;
1875 demand_empty_rest_of_line ();
1876 }
1877
1878 #ifdef OBJ_ELF
1879 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1880
1881 static void
1882 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1883 {
1884 expressionS exp;
1885
1886 expression (&exp);
1887 frag_grow (4);
1888 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1889 BFD_RELOC_AARCH64_TLSDESC_ADD);
1890
1891 demand_empty_rest_of_line ();
1892 }
1893
1894 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1895
1896 static void
1897 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1898 {
1899 expressionS exp;
1900
1901 /* Since we're just labelling the code, there's no need to define a
1902 mapping symbol. */
1903 expression (&exp);
1904 /* Make sure there is enough room in this frag for the following
1905 blr. This trick only works if the blr follows immediately after
1906 the .tlsdesc directive. */
1907 frag_grow (4);
1908 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1909 BFD_RELOC_AARCH64_TLSDESC_CALL);
1910
1911 demand_empty_rest_of_line ();
1912 }
1913
1914 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
1915
1916 static void
1917 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
1918 {
1919 expressionS exp;
1920
1921 expression (&exp);
1922 frag_grow (4);
1923 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1924 BFD_RELOC_AARCH64_TLSDESC_LDR);
1925
1926 demand_empty_rest_of_line ();
1927 }
1928 #endif /* OBJ_ELF */
1929
1930 static void s_aarch64_arch (int);
1931 static void s_aarch64_cpu (int);
1932 static void s_aarch64_arch_extension (int);
1933
1934 /* This table describes all the machine specific pseudo-ops the assembler
1935 has to support. The fields are:
1936 pseudo-op name without dot
1937 function to call to execute this pseudo-op
1938 Integer arg to pass to the function. */
1939
1940 const pseudo_typeS md_pseudo_table[] = {
1941 /* Never called because '.req' does not start a line. */
1942 {"req", s_req, 0},
1943 {"unreq", s_unreq, 0},
1944 {"bss", s_bss, 0},
1945 {"even", s_even, 0},
1946 {"ltorg", s_ltorg, 0},
1947 {"pool", s_ltorg, 0},
1948 {"cpu", s_aarch64_cpu, 0},
1949 {"arch", s_aarch64_arch, 0},
1950 {"arch_extension", s_aarch64_arch_extension, 0},
1951 {"inst", s_aarch64_inst, 0},
1952 #ifdef OBJ_ELF
1953 {"tlsdescadd", s_tlsdescadd, 0},
1954 {"tlsdesccall", s_tlsdesccall, 0},
1955 {"tlsdescldr", s_tlsdescldr, 0},
1956 {"word", s_aarch64_elf_cons, 4},
1957 {"long", s_aarch64_elf_cons, 4},
1958 {"xword", s_aarch64_elf_cons, 8},
1959 {"dword", s_aarch64_elf_cons, 8},
1960 #endif
1961 {0, 0, 0}
1962 };
1963 \f
1964
1965 /* Check whether STR points to a register name followed by a comma or the
1966 end of line; REG_TYPE indicates which register types are checked
1967 against. Return TRUE if STR is such a register name; otherwise return
1968 FALSE. The function does not intend to produce any diagnostics, but since
1969 the register parser aarch64_reg_parse, which is called by this function,
1970 does produce diagnostics, we call clear_error to clear any diagnostics
1971 that may be generated by aarch64_reg_parse.
1972 Also, the function returns FALSE directly if there is any user error
1973 present at the function entry. This prevents the existing diagnostics
1974 state from being spoiled.
1975 The function currently serves parse_constant_immediate and
1976 parse_big_immediate only. */
1977 static bfd_boolean
1978 reg_name_p (char *str, aarch64_reg_type reg_type)
1979 {
1980 int reg;
1981
1982 /* Prevent the diagnostics state from being spoiled. */
1983 if (error_p ())
1984 return FALSE;
1985
1986 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1987
1988 /* Clear the parsing error that may be set by the reg parser. */
1989 clear_error ();
1990
1991 if (reg == PARSE_FAIL)
1992 return FALSE;
1993
1994 skip_whitespace (str);
1995 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1996 return TRUE;
1997
1998 return FALSE;
1999 }
2000
2001 /* Parser functions used exclusively in instruction operands. */
2002
2003 /* Parse an immediate expression which may not be constant.
2004
2005 To prevent the expression parser from pushing a register name
2006 into the symbol table as an undefined symbol, firstly a check is
2007 done to find out whether STR is a register of type REG_TYPE followed
2008 by a comma or the end of line. Return FALSE if STR is such a string. */
2009
2010 static bfd_boolean
2011 parse_immediate_expression (char **str, expressionS *exp,
2012 aarch64_reg_type reg_type)
2013 {
2014 if (reg_name_p (*str, reg_type))
2015 {
2016 set_recoverable_error (_("immediate operand required"));
2017 return FALSE;
2018 }
2019
2020 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2021
2022 if (exp->X_op == O_absent)
2023 {
2024 set_fatal_syntax_error (_("missing immediate expression"));
2025 return FALSE;
2026 }
2027
2028 return TRUE;
2029 }
2030
2031 /* Constant immediate-value read function for use in insn parsing.
2032 STR points to the beginning of the immediate (with the optional
2033 leading #); *VAL receives the value. REG_TYPE says which register
2034 names should be treated as registers rather than as symbolic immediates.
2035
2036 Return TRUE on success; otherwise return FALSE. */
2037
2038 static bfd_boolean
2039 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2040 {
2041 expressionS exp;
2042
2043 if (! parse_immediate_expression (str, &exp, reg_type))
2044 return FALSE;
2045
2046 if (exp.X_op != O_constant)
2047 {
2048 set_syntax_error (_("constant expression required"));
2049 return FALSE;
2050 }
2051
2052 *val = exp.X_add_number;
2053 return TRUE;
2054 }
2055
2056 static uint32_t
2057 encode_imm_float_bits (uint32_t imm)
2058 {
2059 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2060 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2061 }
2062
2063 /* Return TRUE if the single-precision floating-point value encoded in IMM
2064 can be expressed in the AArch64 8-bit signed floating-point format with
2065 3-bit exponent and normalized 4 bits of precision; in other words, the
2066 floating-point value must be expressable as
2067 (+/-) n / 16 * power (2, r)
2068 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2069
2070 static bfd_boolean
2071 aarch64_imm_float_p (uint32_t imm)
2072 {
2073 /* If a single-precision floating-point value has the following bit
2074 pattern, it can be expressed in the AArch64 8-bit floating-point
2075 format:
2076
2077 3 32222222 2221111111111
2078 1 09876543 21098765432109876543210
2079 n Eeeeeexx xxxx0000000000000000000
2080
2081 where n, e and each x are either 0 or 1 independently, with
2082 E == ~ e. */
2083
2084 uint32_t pattern;
2085
2086 /* Prepare the pattern for 'Eeeeee'. */
2087 if (((imm >> 30) & 0x1) == 0)
2088 pattern = 0x3e000000;
2089 else
2090 pattern = 0x40000000;
2091
2092 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2093 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2094 }
2095
2096 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2097 as an IEEE float without any loss of precision. Store the value in
2098 *FPWORD if so. */
2099
2100 static bfd_boolean
2101 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2102 {
2103 /* If a double-precision floating-point value has the following bit
2104 pattern, it can be expressed in a float:
2105
2106 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2107 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2108 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2109
2110 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2111 if Eeee_eeee != 1111_1111
2112
2113 where n, e, s and S are either 0 or 1 independently and where ~ is the
2114 inverse of E. */
2115
2116 uint32_t pattern;
2117 uint32_t high32 = imm >> 32;
2118 uint32_t low32 = imm;
2119
2120 /* Lower 29 bits need to be 0s. */
2121 if ((imm & 0x1fffffff) != 0)
2122 return FALSE;
2123
2124 /* Prepare the pattern for 'Eeeeeeeee'. */
2125 if (((high32 >> 30) & 0x1) == 0)
2126 pattern = 0x38000000;
2127 else
2128 pattern = 0x40000000;
2129
2130 /* Check E~~~. */
2131 if ((high32 & 0x78000000) != pattern)
2132 return FALSE;
2133
2134 /* Check Eeee_eeee != 1111_1111. */
2135 if ((high32 & 0x7ff00000) == 0x47f00000)
2136 return FALSE;
2137
2138 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2139 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2140 | (low32 >> 29)); /* 3 S bits. */
2141 return TRUE;
2142 }
2143
2144 /* Parse a floating-point immediate. Return TRUE on success and return the
2145 value in *IMMED in the format of IEEE754 single-precision encoding.
2146 *CCP points to the start of the string; DP_P is TRUE when the immediate
2147 is expected to be in double-precision (N.B. this only matters when
2148 hexadecimal representation is involved). REG_TYPE says which register
2149 names should be treated as registers rather than as symbolic immediates.
2150
2151 N.B. 0.0 is accepted by this function. */
2152
2153 static bfd_boolean
2154 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2155 aarch64_reg_type reg_type)
2156 {
2157 char *str = *ccp;
2158 char *fpnum;
2159 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2160 int found_fpchar = 0;
2161 int64_t val = 0;
2162 unsigned fpword = 0;
2163 bfd_boolean hex_p = FALSE;
2164
2165 skip_past_char (&str, '#');
2166
2167 fpnum = str;
2168 skip_whitespace (fpnum);
2169
2170 if (strncmp (fpnum, "0x", 2) == 0)
2171 {
2172 /* Support the hexadecimal representation of the IEEE754 encoding.
2173 Double-precision is expected when DP_P is TRUE, otherwise the
2174 representation should be in single-precision. */
2175 if (! parse_constant_immediate (&str, &val, reg_type))
2176 goto invalid_fp;
2177
2178 if (dp_p)
2179 {
2180 if (!can_convert_double_to_float (val, &fpword))
2181 goto invalid_fp;
2182 }
2183 else if ((uint64_t) val > 0xffffffff)
2184 goto invalid_fp;
2185 else
2186 fpword = val;
2187
2188 hex_p = TRUE;
2189 }
2190 else
2191 {
2192 if (reg_name_p (str, reg_type))
2193 {
2194 set_recoverable_error (_("immediate operand required"));
2195 return FALSE;
2196 }
2197
2198 /* We must not accidentally parse an integer as a floating-point number.
2199 Make sure that the value we parse is not an integer by checking for
2200 special characters '.' or 'e'. */
2201 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2202 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2203 {
2204 found_fpchar = 1;
2205 break;
2206 }
2207
2208 if (!found_fpchar)
2209 return FALSE;
2210 }
2211
2212 if (! hex_p)
2213 {
2214 int i;
2215
2216 if ((str = atof_ieee (str, 's', words)) == NULL)
2217 goto invalid_fp;
2218
2219 /* Our FP word must be 32 bits (single-precision FP). */
2220 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2221 {
2222 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2223 fpword |= words[i];
2224 }
2225 }
2226
2227 if (aarch64_imm_float_p (fpword) || fpword == 0)
2228 {
2229 *immed = fpword;
2230 *ccp = str;
2231 return TRUE;
2232 }
2233
2234 invalid_fp:
2235 set_fatal_syntax_error (_("invalid floating-point constant"));
2236 return FALSE;
2237 }
2238
2239 /* Less-generic immediate-value read function with the possibility of loading
2240 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2241 instructions.
2242
2243 To prevent the expression parser from pushing a register name into the
2244 symbol table as an undefined symbol, a check is firstly done to find
2245 out whether STR is a register of type REG_TYPE followed by a comma or
2246 the end of line. Return FALSE if STR is such a register. */
2247
2248 static bfd_boolean
2249 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2250 {
2251 char *ptr = *str;
2252
2253 if (reg_name_p (ptr, reg_type))
2254 {
2255 set_syntax_error (_("immediate operand required"));
2256 return FALSE;
2257 }
2258
2259 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2260
2261 if (inst.reloc.exp.X_op == O_constant)
2262 *imm = inst.reloc.exp.X_add_number;
2263
2264 *str = ptr;
2265
2266 return TRUE;
2267 }
2268
2269 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2270 if NEED_LIBOPCODES is non-zero, the fixup will need
2271 assistance from the libopcodes. */
2272
2273 static inline void
2274 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2275 const aarch64_opnd_info *operand,
2276 int need_libopcodes_p)
2277 {
2278 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2279 reloc->opnd = operand->type;
2280 if (need_libopcodes_p)
2281 reloc->need_libopcodes_p = 1;
2282 };
2283
2284 /* Return TRUE if the instruction needs to be fixed up later internally by
2285 the GAS; otherwise return FALSE. */
2286
2287 static inline bfd_boolean
2288 aarch64_gas_internal_fixup_p (void)
2289 {
2290 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2291 }
2292
2293 /* Assign the immediate value to the relavant field in *OPERAND if
2294 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2295 needs an internal fixup in a later stage.
2296 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2297 IMM.VALUE that may get assigned with the constant. */
2298 static inline void
2299 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2300 aarch64_opnd_info *operand,
2301 int addr_off_p,
2302 int need_libopcodes_p,
2303 int skip_p)
2304 {
2305 if (reloc->exp.X_op == O_constant)
2306 {
2307 if (addr_off_p)
2308 operand->addr.offset.imm = reloc->exp.X_add_number;
2309 else
2310 operand->imm.value = reloc->exp.X_add_number;
2311 reloc->type = BFD_RELOC_UNUSED;
2312 }
2313 else
2314 {
2315 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2316 /* Tell libopcodes to ignore this operand or not. This is helpful
2317 when one of the operands needs to be fixed up later but we need
2318 libopcodes to check the other operands. */
2319 operand->skip = skip_p;
2320 }
2321 }
2322
2323 /* Relocation modifiers. Each entry in the table contains the textual
2324 name for the relocation which may be placed before a symbol used as
2325 a load/store offset, or add immediate. It must be surrounded by a
2326 leading and trailing colon, for example:
2327
2328 ldr x0, [x1, #:rello:varsym]
2329 add x0, x1, #:rello:varsym */
2330
2331 struct reloc_table_entry
2332 {
2333 const char *name;
2334 int pc_rel;
2335 bfd_reloc_code_real_type adr_type;
2336 bfd_reloc_code_real_type adrp_type;
2337 bfd_reloc_code_real_type movw_type;
2338 bfd_reloc_code_real_type add_type;
2339 bfd_reloc_code_real_type ldst_type;
2340 bfd_reloc_code_real_type ld_literal_type;
2341 };
2342
2343 static struct reloc_table_entry reloc_table[] = {
2344 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2345 {"lo12", 0,
2346 0, /* adr_type */
2347 0,
2348 0,
2349 BFD_RELOC_AARCH64_ADD_LO12,
2350 BFD_RELOC_AARCH64_LDST_LO12,
2351 0},
2352
2353 /* Higher 21 bits of pc-relative page offset: ADRP */
2354 {"pg_hi21", 1,
2355 0, /* adr_type */
2356 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2357 0,
2358 0,
2359 0,
2360 0},
2361
2362 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2363 {"pg_hi21_nc", 1,
2364 0, /* adr_type */
2365 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2366 0,
2367 0,
2368 0,
2369 0},
2370
2371 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2372 {"abs_g0", 0,
2373 0, /* adr_type */
2374 0,
2375 BFD_RELOC_AARCH64_MOVW_G0,
2376 0,
2377 0,
2378 0},
2379
2380 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2381 {"abs_g0_s", 0,
2382 0, /* adr_type */
2383 0,
2384 BFD_RELOC_AARCH64_MOVW_G0_S,
2385 0,
2386 0,
2387 0},
2388
2389 /* Less significant bits 0-15 of address/value: MOVK, no check */
2390 {"abs_g0_nc", 0,
2391 0, /* adr_type */
2392 0,
2393 BFD_RELOC_AARCH64_MOVW_G0_NC,
2394 0,
2395 0,
2396 0},
2397
2398 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2399 {"abs_g1", 0,
2400 0, /* adr_type */
2401 0,
2402 BFD_RELOC_AARCH64_MOVW_G1,
2403 0,
2404 0,
2405 0},
2406
2407 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2408 {"abs_g1_s", 0,
2409 0, /* adr_type */
2410 0,
2411 BFD_RELOC_AARCH64_MOVW_G1_S,
2412 0,
2413 0,
2414 0},
2415
2416 /* Less significant bits 16-31 of address/value: MOVK, no check */
2417 {"abs_g1_nc", 0,
2418 0, /* adr_type */
2419 0,
2420 BFD_RELOC_AARCH64_MOVW_G1_NC,
2421 0,
2422 0,
2423 0},
2424
2425 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2426 {"abs_g2", 0,
2427 0, /* adr_type */
2428 0,
2429 BFD_RELOC_AARCH64_MOVW_G2,
2430 0,
2431 0,
2432 0},
2433
2434 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2435 {"abs_g2_s", 0,
2436 0, /* adr_type */
2437 0,
2438 BFD_RELOC_AARCH64_MOVW_G2_S,
2439 0,
2440 0,
2441 0},
2442
2443 /* Less significant bits 32-47 of address/value: MOVK, no check */
2444 {"abs_g2_nc", 0,
2445 0, /* adr_type */
2446 0,
2447 BFD_RELOC_AARCH64_MOVW_G2_NC,
2448 0,
2449 0,
2450 0},
2451
2452 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2453 {"abs_g3", 0,
2454 0, /* adr_type */
2455 0,
2456 BFD_RELOC_AARCH64_MOVW_G3,
2457 0,
2458 0,
2459 0},
2460
2461 /* Get to the page containing GOT entry for a symbol. */
2462 {"got", 1,
2463 0, /* adr_type */
2464 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2465 0,
2466 0,
2467 0,
2468 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2469
2470 /* 12 bit offset into the page containing GOT entry for that symbol. */
2471 {"got_lo12", 0,
2472 0, /* adr_type */
2473 0,
2474 0,
2475 0,
2476 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2477 0},
2478
2479 /* 0-15 bits of address/value: MOVk, no check. */
2480 {"gotoff_g0_nc", 0,
2481 0, /* adr_type */
2482 0,
2483 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2484 0,
2485 0,
2486 0},
2487
2488 /* Most significant bits 16-31 of address/value: MOVZ. */
2489 {"gotoff_g1", 0,
2490 0, /* adr_type */
2491 0,
2492 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2493 0,
2494 0,
2495 0},
2496
2497 /* 15 bit offset into the page containing GOT entry for that symbol. */
2498 {"gotoff_lo15", 0,
2499 0, /* adr_type */
2500 0,
2501 0,
2502 0,
2503 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2504 0},
2505
2506 /* Get to the page containing GOT TLS entry for a symbol */
2507 {"gottprel_g0_nc", 0,
2508 0, /* adr_type */
2509 0,
2510 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2511 0,
2512 0,
2513 0},
2514
2515 /* Get to the page containing GOT TLS entry for a symbol */
2516 {"gottprel_g1", 0,
2517 0, /* adr_type */
2518 0,
2519 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2520 0,
2521 0,
2522 0},
2523
2524 /* Get to the page containing GOT TLS entry for a symbol */
2525 {"tlsgd", 0,
2526 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2527 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2528 0,
2529 0,
2530 0,
2531 0},
2532
2533 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2534 {"tlsgd_lo12", 0,
2535 0, /* adr_type */
2536 0,
2537 0,
2538 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2539 0,
2540 0},
2541
2542 /* Lower 16 bits address/value: MOVk. */
2543 {"tlsgd_g0_nc", 0,
2544 0, /* adr_type */
2545 0,
2546 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2547 0,
2548 0,
2549 0},
2550
2551 /* Most significant bits 16-31 of address/value: MOVZ. */
2552 {"tlsgd_g1", 0,
2553 0, /* adr_type */
2554 0,
2555 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2556 0,
2557 0,
2558 0},
2559
2560 /* Get to the page containing GOT TLS entry for a symbol */
2561 {"tlsdesc", 0,
2562 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2563 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2564 0,
2565 0,
2566 0,
2567 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2568
2569 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2570 {"tlsdesc_lo12", 0,
2571 0, /* adr_type */
2572 0,
2573 0,
2574 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2575 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2576 0},
2577
2578 /* Get to the page containing GOT TLS entry for a symbol.
2579 The same as GD, we allocate two consecutive GOT slots
2580 for module index and module offset, the only difference
2581 with GD is the module offset should be intialized to
2582 zero without any outstanding runtime relocation. */
2583 {"tlsldm", 0,
2584 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2585 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2586 0,
2587 0,
2588 0,
2589 0},
2590
2591 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2592 {"tlsldm_lo12_nc", 0,
2593 0, /* adr_type */
2594 0,
2595 0,
2596 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2597 0,
2598 0},
2599
2600 /* 12 bit offset into the module TLS base address. */
2601 {"dtprel_lo12", 0,
2602 0, /* adr_type */
2603 0,
2604 0,
2605 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2606 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2607 0},
2608
2609 /* Same as dtprel_lo12, no overflow check. */
2610 {"dtprel_lo12_nc", 0,
2611 0, /* adr_type */
2612 0,
2613 0,
2614 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2615 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2616 0},
2617
2618 /* bits[23:12] of offset to the module TLS base address. */
2619 {"dtprel_hi12", 0,
2620 0, /* adr_type */
2621 0,
2622 0,
2623 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2624 0,
2625 0},
2626
2627 /* bits[15:0] of offset to the module TLS base address. */
2628 {"dtprel_g0", 0,
2629 0, /* adr_type */
2630 0,
2631 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2632 0,
2633 0,
2634 0},
2635
2636 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2637 {"dtprel_g0_nc", 0,
2638 0, /* adr_type */
2639 0,
2640 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2641 0,
2642 0,
2643 0},
2644
2645 /* bits[31:16] of offset to the module TLS base address. */
2646 {"dtprel_g1", 0,
2647 0, /* adr_type */
2648 0,
2649 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2650 0,
2651 0,
2652 0},
2653
2654 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2655 {"dtprel_g1_nc", 0,
2656 0, /* adr_type */
2657 0,
2658 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2659 0,
2660 0,
2661 0},
2662
2663 /* bits[47:32] of offset to the module TLS base address. */
2664 {"dtprel_g2", 0,
2665 0, /* adr_type */
2666 0,
2667 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2668 0,
2669 0,
2670 0},
2671
2672 /* Lower 16 bit offset into GOT entry for a symbol */
2673 {"tlsdesc_off_g0_nc", 0,
2674 0, /* adr_type */
2675 0,
2676 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2677 0,
2678 0,
2679 0},
2680
2681 /* Higher 16 bit offset into GOT entry for a symbol */
2682 {"tlsdesc_off_g1", 0,
2683 0, /* adr_type */
2684 0,
2685 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2686 0,
2687 0,
2688 0},
2689
2690 /* Get to the page containing GOT TLS entry for a symbol */
2691 {"gottprel", 0,
2692 0, /* adr_type */
2693 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2694 0,
2695 0,
2696 0,
2697 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2698
2699 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2700 {"gottprel_lo12", 0,
2701 0, /* adr_type */
2702 0,
2703 0,
2704 0,
2705 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2706 0},
2707
2708 /* Get tp offset for a symbol. */
2709 {"tprel", 0,
2710 0, /* adr_type */
2711 0,
2712 0,
2713 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2714 0,
2715 0},
2716
2717 /* Get tp offset for a symbol. */
2718 {"tprel_lo12", 0,
2719 0, /* adr_type */
2720 0,
2721 0,
2722 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2723 0,
2724 0},
2725
2726 /* Get tp offset for a symbol. */
2727 {"tprel_hi12", 0,
2728 0, /* adr_type */
2729 0,
2730 0,
2731 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2732 0,
2733 0},
2734
2735 /* Get tp offset for a symbol. */
2736 {"tprel_lo12_nc", 0,
2737 0, /* adr_type */
2738 0,
2739 0,
2740 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2741 0,
2742 0},
2743
2744 /* Most significant bits 32-47 of address/value: MOVZ. */
2745 {"tprel_g2", 0,
2746 0, /* adr_type */
2747 0,
2748 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2749 0,
2750 0,
2751 0},
2752
2753 /* Most significant bits 16-31 of address/value: MOVZ. */
2754 {"tprel_g1", 0,
2755 0, /* adr_type */
2756 0,
2757 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2758 0,
2759 0,
2760 0},
2761
2762 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2763 {"tprel_g1_nc", 0,
2764 0, /* adr_type */
2765 0,
2766 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2767 0,
2768 0,
2769 0},
2770
2771 /* Most significant bits 0-15 of address/value: MOVZ. */
2772 {"tprel_g0", 0,
2773 0, /* adr_type */
2774 0,
2775 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2776 0,
2777 0,
2778 0},
2779
2780 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2781 {"tprel_g0_nc", 0,
2782 0, /* adr_type */
2783 0,
2784 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2785 0,
2786 0,
2787 0},
2788
2789 /* 15bit offset from got entry to base address of GOT table. */
2790 {"gotpage_lo15", 0,
2791 0,
2792 0,
2793 0,
2794 0,
2795 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2796 0},
2797
2798 /* 14bit offset from got entry to base address of GOT table. */
2799 {"gotpage_lo14", 0,
2800 0,
2801 0,
2802 0,
2803 0,
2804 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2805 0},
2806 };
2807
2808 /* Given the address of a pointer pointing to the textual name of a
2809 relocation as may appear in assembler source, attempt to find its
2810 details in reloc_table. The pointer will be updated to the character
2811 after the trailing colon. On failure, NULL will be returned;
2812 otherwise return the reloc_table_entry. */
2813
2814 static struct reloc_table_entry *
2815 find_reloc_table_entry (char **str)
2816 {
2817 unsigned int i;
2818 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2819 {
2820 int length = strlen (reloc_table[i].name);
2821
2822 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2823 && (*str)[length] == ':')
2824 {
2825 *str += (length + 1);
2826 return &reloc_table[i];
2827 }
2828 }
2829
2830 return NULL;
2831 }
2832
2833 /* Mode argument to parse_shift and parser_shifter_operand. */
2834 enum parse_shift_mode
2835 {
2836 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2837 "#imm{,lsl #n}" */
2838 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2839 "#imm" */
2840 SHIFTED_LSL, /* bare "lsl #n" */
2841 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2842 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2843 };
2844
2845 /* Parse a <shift> operator on an AArch64 data processing instruction.
2846 Return TRUE on success; otherwise return FALSE. */
2847 static bfd_boolean
2848 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2849 {
2850 const struct aarch64_name_value_pair *shift_op;
2851 enum aarch64_modifier_kind kind;
2852 expressionS exp;
2853 int exp_has_prefix;
2854 char *s = *str;
2855 char *p = s;
2856
2857 for (p = *str; ISALPHA (*p); p++)
2858 ;
2859
2860 if (p == *str)
2861 {
2862 set_syntax_error (_("shift expression expected"));
2863 return FALSE;
2864 }
2865
2866 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2867
2868 if (shift_op == NULL)
2869 {
2870 set_syntax_error (_("shift operator expected"));
2871 return FALSE;
2872 }
2873
2874 kind = aarch64_get_operand_modifier (shift_op);
2875
2876 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2877 {
2878 set_syntax_error (_("invalid use of 'MSL'"));
2879 return FALSE;
2880 }
2881
2882 switch (mode)
2883 {
2884 case SHIFTED_LOGIC_IMM:
2885 if (aarch64_extend_operator_p (kind) == TRUE)
2886 {
2887 set_syntax_error (_("extending shift is not permitted"));
2888 return FALSE;
2889 }
2890 break;
2891
2892 case SHIFTED_ARITH_IMM:
2893 if (kind == AARCH64_MOD_ROR)
2894 {
2895 set_syntax_error (_("'ROR' shift is not permitted"));
2896 return FALSE;
2897 }
2898 break;
2899
2900 case SHIFTED_LSL:
2901 if (kind != AARCH64_MOD_LSL)
2902 {
2903 set_syntax_error (_("only 'LSL' shift is permitted"));
2904 return FALSE;
2905 }
2906 break;
2907
2908 case SHIFTED_REG_OFFSET:
2909 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2910 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2911 {
2912 set_fatal_syntax_error
2913 (_("invalid shift for the register offset addressing mode"));
2914 return FALSE;
2915 }
2916 break;
2917
2918 case SHIFTED_LSL_MSL:
2919 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2920 {
2921 set_syntax_error (_("invalid shift operator"));
2922 return FALSE;
2923 }
2924 break;
2925
2926 default:
2927 abort ();
2928 }
2929
2930 /* Whitespace can appear here if the next thing is a bare digit. */
2931 skip_whitespace (p);
2932
2933 /* Parse shift amount. */
2934 exp_has_prefix = 0;
2935 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2936 exp.X_op = O_absent;
2937 else
2938 {
2939 if (is_immediate_prefix (*p))
2940 {
2941 p++;
2942 exp_has_prefix = 1;
2943 }
2944 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2945 }
2946 if (exp.X_op == O_absent)
2947 {
2948 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2949 {
2950 set_syntax_error (_("missing shift amount"));
2951 return FALSE;
2952 }
2953 operand->shifter.amount = 0;
2954 }
2955 else if (exp.X_op != O_constant)
2956 {
2957 set_syntax_error (_("constant shift amount required"));
2958 return FALSE;
2959 }
2960 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2961 {
2962 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2963 return FALSE;
2964 }
2965 else
2966 {
2967 operand->shifter.amount = exp.X_add_number;
2968 operand->shifter.amount_present = 1;
2969 }
2970
2971 operand->shifter.operator_present = 1;
2972 operand->shifter.kind = kind;
2973
2974 *str = p;
2975 return TRUE;
2976 }
2977
2978 /* Parse a <shifter_operand> for a data processing instruction:
2979
2980 #<immediate>
2981 #<immediate>, LSL #imm
2982
2983 Validation of immediate operands is deferred to md_apply_fix.
2984
2985 Return TRUE on success; otherwise return FALSE. */
2986
2987 static bfd_boolean
2988 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2989 enum parse_shift_mode mode)
2990 {
2991 char *p;
2992
2993 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2994 return FALSE;
2995
2996 p = *str;
2997
2998 /* Accept an immediate expression. */
2999 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3000 return FALSE;
3001
3002 /* Accept optional LSL for arithmetic immediate values. */
3003 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3004 if (! parse_shift (&p, operand, SHIFTED_LSL))
3005 return FALSE;
3006
3007 /* Not accept any shifter for logical immediate values. */
3008 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3009 && parse_shift (&p, operand, mode))
3010 {
3011 set_syntax_error (_("unexpected shift operator"));
3012 return FALSE;
3013 }
3014
3015 *str = p;
3016 return TRUE;
3017 }
3018
3019 /* Parse a <shifter_operand> for a data processing instruction:
3020
3021 <Rm>
3022 <Rm>, <shift>
3023 #<immediate>
3024 #<immediate>, LSL #imm
3025
3026 where <shift> is handled by parse_shift above, and the last two
3027 cases are handled by the function above.
3028
3029 Validation of immediate operands is deferred to md_apply_fix.
3030
3031 Return TRUE on success; otherwise return FALSE. */
3032
3033 static bfd_boolean
3034 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3035 enum parse_shift_mode mode)
3036 {
3037 int reg;
3038 int isreg32, isregzero;
3039 enum aarch64_operand_class opd_class
3040 = aarch64_get_operand_class (operand->type);
3041
3042 if ((reg =
3043 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
3044 {
3045 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3046 {
3047 set_syntax_error (_("unexpected register in the immediate operand"));
3048 return FALSE;
3049 }
3050
3051 if (!isregzero && reg == REG_SP)
3052 {
3053 set_syntax_error (BAD_SP);
3054 return FALSE;
3055 }
3056
3057 operand->reg.regno = reg;
3058 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
3059
3060 /* Accept optional shift operation on register. */
3061 if (! skip_past_comma (str))
3062 return TRUE;
3063
3064 if (! parse_shift (str, operand, mode))
3065 return FALSE;
3066
3067 return TRUE;
3068 }
3069 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3070 {
3071 set_syntax_error
3072 (_("integer register expected in the extended/shifted operand "
3073 "register"));
3074 return FALSE;
3075 }
3076
3077 /* We have a shifted immediate variable. */
3078 return parse_shifter_operand_imm (str, operand, mode);
3079 }
3080
3081 /* Return TRUE on success; return FALSE otherwise. */
3082
3083 static bfd_boolean
3084 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3085 enum parse_shift_mode mode)
3086 {
3087 char *p = *str;
3088
3089 /* Determine if we have the sequence of characters #: or just :
3090 coming next. If we do, then we check for a :rello: relocation
3091 modifier. If we don't, punt the whole lot to
3092 parse_shifter_operand. */
3093
3094 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3095 {
3096 struct reloc_table_entry *entry;
3097
3098 if (p[0] == '#')
3099 p += 2;
3100 else
3101 p++;
3102 *str = p;
3103
3104 /* Try to parse a relocation. Anything else is an error. */
3105 if (!(entry = find_reloc_table_entry (str)))
3106 {
3107 set_syntax_error (_("unknown relocation modifier"));
3108 return FALSE;
3109 }
3110
3111 if (entry->add_type == 0)
3112 {
3113 set_syntax_error
3114 (_("this relocation modifier is not allowed on this instruction"));
3115 return FALSE;
3116 }
3117
3118 /* Save str before we decompose it. */
3119 p = *str;
3120
3121 /* Next, we parse the expression. */
3122 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3123 return FALSE;
3124
3125 /* Record the relocation type (use the ADD variant here). */
3126 inst.reloc.type = entry->add_type;
3127 inst.reloc.pc_rel = entry->pc_rel;
3128
3129 /* If str is empty, we've reached the end, stop here. */
3130 if (**str == '\0')
3131 return TRUE;
3132
3133 /* Otherwise, we have a shifted reloc modifier, so rewind to
3134 recover the variable name and continue parsing for the shifter. */
3135 *str = p;
3136 return parse_shifter_operand_imm (str, operand, mode);
3137 }
3138
3139 return parse_shifter_operand (str, operand, mode);
3140 }
3141
3142 /* Parse all forms of an address expression. Information is written
3143 to *OPERAND and/or inst.reloc.
3144
3145 The A64 instruction set has the following addressing modes:
3146
3147 Offset
3148 [base] // in SIMD ld/st structure
3149 [base{,#0}] // in ld/st exclusive
3150 [base{,#imm}]
3151 [base,Xm{,LSL #imm}]
3152 [base,Xm,SXTX {#imm}]
3153 [base,Wm,(S|U)XTW {#imm}]
3154 Pre-indexed
3155 [base,#imm]!
3156 Post-indexed
3157 [base],#imm
3158 [base],Xm // in SIMD ld/st structure
3159 PC-relative (literal)
3160 label
3161 =immediate
3162
3163 (As a convenience, the notation "=immediate" is permitted in conjunction
3164 with the pc-relative literal load instructions to automatically place an
3165 immediate value or symbolic address in a nearby literal pool and generate
3166 a hidden label which references it.)
3167
3168 Upon a successful parsing, the address structure in *OPERAND will be
3169 filled in the following way:
3170
3171 .base_regno = <base>
3172 .offset.is_reg // 1 if the offset is a register
3173 .offset.imm = <imm>
3174 .offset.regno = <Rm>
3175
3176 For different addressing modes defined in the A64 ISA:
3177
3178 Offset
3179 .pcrel=0; .preind=1; .postind=0; .writeback=0
3180 Pre-indexed
3181 .pcrel=0; .preind=1; .postind=0; .writeback=1
3182 Post-indexed
3183 .pcrel=0; .preind=0; .postind=1; .writeback=1
3184 PC-relative (literal)
3185 .pcrel=1; .preind=1; .postind=0; .writeback=0
3186
3187 The shift/extension information, if any, will be stored in .shifter.
3188
3189 It is the caller's responsibility to check for addressing modes not
3190 supported by the instruction, and to set inst.reloc.type. */
3191
3192 static bfd_boolean
3193 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
3194 int accept_reg_post_index)
3195 {
3196 char *p = *str;
3197 int reg;
3198 int isreg32, isregzero;
3199 expressionS *exp = &inst.reloc.exp;
3200
3201 if (! skip_past_char (&p, '['))
3202 {
3203 /* =immediate or label. */
3204 operand->addr.pcrel = 1;
3205 operand->addr.preind = 1;
3206
3207 /* #:<reloc_op>:<symbol> */
3208 skip_past_char (&p, '#');
3209 if (reloc && skip_past_char (&p, ':'))
3210 {
3211 bfd_reloc_code_real_type ty;
3212 struct reloc_table_entry *entry;
3213
3214 /* Try to parse a relocation modifier. Anything else is
3215 an error. */
3216 entry = find_reloc_table_entry (&p);
3217 if (! entry)
3218 {
3219 set_syntax_error (_("unknown relocation modifier"));
3220 return FALSE;
3221 }
3222
3223 switch (operand->type)
3224 {
3225 case AARCH64_OPND_ADDR_PCREL21:
3226 /* adr */
3227 ty = entry->adr_type;
3228 break;
3229
3230 default:
3231 ty = entry->ld_literal_type;
3232 break;
3233 }
3234
3235 if (ty == 0)
3236 {
3237 set_syntax_error
3238 (_("this relocation modifier is not allowed on this "
3239 "instruction"));
3240 return FALSE;
3241 }
3242
3243 /* #:<reloc_op>: */
3244 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3245 {
3246 set_syntax_error (_("invalid relocation expression"));
3247 return FALSE;
3248 }
3249
3250 /* #:<reloc_op>:<expr> */
3251 /* Record the relocation type. */
3252 inst.reloc.type = ty;
3253 inst.reloc.pc_rel = entry->pc_rel;
3254 }
3255 else
3256 {
3257
3258 if (skip_past_char (&p, '='))
3259 /* =immediate; need to generate the literal in the literal pool. */
3260 inst.gen_lit_pool = 1;
3261
3262 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3263 {
3264 set_syntax_error (_("invalid address"));
3265 return FALSE;
3266 }
3267 }
3268
3269 *str = p;
3270 return TRUE;
3271 }
3272
3273 /* [ */
3274
3275 /* Accept SP and reject ZR */
3276 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3277 if (reg == PARSE_FAIL || isreg32)
3278 {
3279 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3280 return FALSE;
3281 }
3282 operand->addr.base_regno = reg;
3283
3284 /* [Xn */
3285 if (skip_past_comma (&p))
3286 {
3287 /* [Xn, */
3288 operand->addr.preind = 1;
3289
3290 /* Reject SP and accept ZR */
3291 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3292 if (reg != PARSE_FAIL)
3293 {
3294 /* [Xn,Rm */
3295 operand->addr.offset.regno = reg;
3296 operand->addr.offset.is_reg = 1;
3297 /* Shifted index. */
3298 if (skip_past_comma (&p))
3299 {
3300 /* [Xn,Rm, */
3301 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3302 /* Use the diagnostics set in parse_shift, so not set new
3303 error message here. */
3304 return FALSE;
3305 }
3306 /* We only accept:
3307 [base,Xm{,LSL #imm}]
3308 [base,Xm,SXTX {#imm}]
3309 [base,Wm,(S|U)XTW {#imm}] */
3310 if (operand->shifter.kind == AARCH64_MOD_NONE
3311 || operand->shifter.kind == AARCH64_MOD_LSL
3312 || operand->shifter.kind == AARCH64_MOD_SXTX)
3313 {
3314 if (isreg32)
3315 {
3316 set_syntax_error (_("invalid use of 32-bit register offset"));
3317 return FALSE;
3318 }
3319 }
3320 else if (!isreg32)
3321 {
3322 set_syntax_error (_("invalid use of 64-bit register offset"));
3323 return FALSE;
3324 }
3325 }
3326 else
3327 {
3328 /* [Xn,#:<reloc_op>:<symbol> */
3329 skip_past_char (&p, '#');
3330 if (reloc && skip_past_char (&p, ':'))
3331 {
3332 struct reloc_table_entry *entry;
3333
3334 /* Try to parse a relocation modifier. Anything else is
3335 an error. */
3336 if (!(entry = find_reloc_table_entry (&p)))
3337 {
3338 set_syntax_error (_("unknown relocation modifier"));
3339 return FALSE;
3340 }
3341
3342 if (entry->ldst_type == 0)
3343 {
3344 set_syntax_error
3345 (_("this relocation modifier is not allowed on this "
3346 "instruction"));
3347 return FALSE;
3348 }
3349
3350 /* [Xn,#:<reloc_op>: */
3351 /* We now have the group relocation table entry corresponding to
3352 the name in the assembler source. Next, we parse the
3353 expression. */
3354 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3355 {
3356 set_syntax_error (_("invalid relocation expression"));
3357 return FALSE;
3358 }
3359
3360 /* [Xn,#:<reloc_op>:<expr> */
3361 /* Record the load/store relocation type. */
3362 inst.reloc.type = entry->ldst_type;
3363 inst.reloc.pc_rel = entry->pc_rel;
3364 }
3365 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3366 {
3367 set_syntax_error (_("invalid expression in the address"));
3368 return FALSE;
3369 }
3370 /* [Xn,<expr> */
3371 }
3372 }
3373
3374 if (! skip_past_char (&p, ']'))
3375 {
3376 set_syntax_error (_("']' expected"));
3377 return FALSE;
3378 }
3379
3380 if (skip_past_char (&p, '!'))
3381 {
3382 if (operand->addr.preind && operand->addr.offset.is_reg)
3383 {
3384 set_syntax_error (_("register offset not allowed in pre-indexed "
3385 "addressing mode"));
3386 return FALSE;
3387 }
3388 /* [Xn]! */
3389 operand->addr.writeback = 1;
3390 }
3391 else if (skip_past_comma (&p))
3392 {
3393 /* [Xn], */
3394 operand->addr.postind = 1;
3395 operand->addr.writeback = 1;
3396
3397 if (operand->addr.preind)
3398 {
3399 set_syntax_error (_("cannot combine pre- and post-indexing"));
3400 return FALSE;
3401 }
3402
3403 if (accept_reg_post_index
3404 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3405 &isregzero)) != PARSE_FAIL)
3406 {
3407 /* [Xn],Xm */
3408 if (isreg32)
3409 {
3410 set_syntax_error (_("invalid 32-bit register offset"));
3411 return FALSE;
3412 }
3413 operand->addr.offset.regno = reg;
3414 operand->addr.offset.is_reg = 1;
3415 }
3416 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3417 {
3418 /* [Xn],#expr */
3419 set_syntax_error (_("invalid expression in the address"));
3420 return FALSE;
3421 }
3422 }
3423
3424 /* If at this point neither .preind nor .postind is set, we have a
3425 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3426 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3427 {
3428 if (operand->addr.writeback)
3429 {
3430 /* Reject [Rn]! */
3431 set_syntax_error (_("missing offset in the pre-indexed address"));
3432 return FALSE;
3433 }
3434 operand->addr.preind = 1;
3435 inst.reloc.exp.X_op = O_constant;
3436 inst.reloc.exp.X_add_number = 0;
3437 }
3438
3439 *str = p;
3440 return TRUE;
3441 }
3442
3443 /* Return TRUE on success; otherwise return FALSE. */
3444 static bfd_boolean
3445 parse_address (char **str, aarch64_opnd_info *operand,
3446 int accept_reg_post_index)
3447 {
3448 return parse_address_main (str, operand, 0, accept_reg_post_index);
3449 }
3450
3451 /* Return TRUE on success; otherwise return FALSE. */
3452 static bfd_boolean
3453 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3454 {
3455 return parse_address_main (str, operand, 1, 0);
3456 }
3457
3458 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3459 Return TRUE on success; otherwise return FALSE. */
3460 static bfd_boolean
3461 parse_half (char **str, int *internal_fixup_p)
3462 {
3463 char *p = *str;
3464
3465 skip_past_char (&p, '#');
3466
3467 gas_assert (internal_fixup_p);
3468 *internal_fixup_p = 0;
3469
3470 if (*p == ':')
3471 {
3472 struct reloc_table_entry *entry;
3473
3474 /* Try to parse a relocation. Anything else is an error. */
3475 ++p;
3476 if (!(entry = find_reloc_table_entry (&p)))
3477 {
3478 set_syntax_error (_("unknown relocation modifier"));
3479 return FALSE;
3480 }
3481
3482 if (entry->movw_type == 0)
3483 {
3484 set_syntax_error
3485 (_("this relocation modifier is not allowed on this instruction"));
3486 return FALSE;
3487 }
3488
3489 inst.reloc.type = entry->movw_type;
3490 }
3491 else
3492 *internal_fixup_p = 1;
3493
3494 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3495 return FALSE;
3496
3497 *str = p;
3498 return TRUE;
3499 }
3500
3501 /* Parse an operand for an ADRP instruction:
3502 ADRP <Xd>, <label>
3503 Return TRUE on success; otherwise return FALSE. */
3504
3505 static bfd_boolean
3506 parse_adrp (char **str)
3507 {
3508 char *p;
3509
3510 p = *str;
3511 if (*p == ':')
3512 {
3513 struct reloc_table_entry *entry;
3514
3515 /* Try to parse a relocation. Anything else is an error. */
3516 ++p;
3517 if (!(entry = find_reloc_table_entry (&p)))
3518 {
3519 set_syntax_error (_("unknown relocation modifier"));
3520 return FALSE;
3521 }
3522
3523 if (entry->adrp_type == 0)
3524 {
3525 set_syntax_error
3526 (_("this relocation modifier is not allowed on this instruction"));
3527 return FALSE;
3528 }
3529
3530 inst.reloc.type = entry->adrp_type;
3531 }
3532 else
3533 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3534
3535 inst.reloc.pc_rel = 1;
3536
3537 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3538 return FALSE;
3539
3540 *str = p;
3541 return TRUE;
3542 }
3543
3544 /* Miscellaneous. */
3545
3546 /* Parse an option for a preload instruction. Returns the encoding for the
3547 option, or PARSE_FAIL. */
3548
3549 static int
3550 parse_pldop (char **str)
3551 {
3552 char *p, *q;
3553 const struct aarch64_name_value_pair *o;
3554
3555 p = q = *str;
3556 while (ISALNUM (*q))
3557 q++;
3558
3559 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3560 if (!o)
3561 return PARSE_FAIL;
3562
3563 *str = q;
3564 return o->value;
3565 }
3566
3567 /* Parse an option for a barrier instruction. Returns the encoding for the
3568 option, or PARSE_FAIL. */
3569
3570 static int
3571 parse_barrier (char **str)
3572 {
3573 char *p, *q;
3574 const asm_barrier_opt *o;
3575
3576 p = q = *str;
3577 while (ISALPHA (*q))
3578 q++;
3579
3580 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3581 if (!o)
3582 return PARSE_FAIL;
3583
3584 *str = q;
3585 return o->value;
3586 }
3587
3588 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3589 return 0 if successful. Otherwise return PARSE_FAIL. */
3590
3591 static int
3592 parse_barrier_psb (char **str,
3593 const struct aarch64_name_value_pair ** hint_opt)
3594 {
3595 char *p, *q;
3596 const struct aarch64_name_value_pair *o;
3597
3598 p = q = *str;
3599 while (ISALPHA (*q))
3600 q++;
3601
3602 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3603 if (!o)
3604 {
3605 set_fatal_syntax_error
3606 ( _("unknown or missing option to PSB"));
3607 return PARSE_FAIL;
3608 }
3609
3610 if (o->value != 0x11)
3611 {
3612 /* PSB only accepts option name 'CSYNC'. */
3613 set_syntax_error
3614 (_("the specified option is not accepted for PSB"));
3615 return PARSE_FAIL;
3616 }
3617
3618 *str = q;
3619 *hint_opt = o;
3620 return 0;
3621 }
3622
3623 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3624 Returns the encoding for the option, or PARSE_FAIL.
3625
3626 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3627 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3628
3629 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3630 field, otherwise as a system register.
3631 */
3632
3633 static int
3634 parse_sys_reg (char **str, struct hash_control *sys_regs,
3635 int imple_defined_p, int pstatefield_p)
3636 {
3637 char *p, *q;
3638 char buf[32];
3639 const aarch64_sys_reg *o;
3640 int value;
3641
3642 p = buf;
3643 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3644 if (p < buf + 31)
3645 *p++ = TOLOWER (*q);
3646 *p = '\0';
3647 /* Assert that BUF be large enough. */
3648 gas_assert (p - buf == q - *str);
3649
3650 o = hash_find (sys_regs, buf);
3651 if (!o)
3652 {
3653 if (!imple_defined_p)
3654 return PARSE_FAIL;
3655 else
3656 {
3657 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3658 unsigned int op0, op1, cn, cm, op2;
3659
3660 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3661 != 5)
3662 return PARSE_FAIL;
3663 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3664 return PARSE_FAIL;
3665 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3666 }
3667 }
3668 else
3669 {
3670 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3671 as_bad (_("selected processor does not support PSTATE field "
3672 "name '%s'"), buf);
3673 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3674 as_bad (_("selected processor does not support system register "
3675 "name '%s'"), buf);
3676 if (aarch64_sys_reg_deprecated_p (o))
3677 as_warn (_("system register name '%s' is deprecated and may be "
3678 "removed in a future release"), buf);
3679 value = o->value;
3680 }
3681
3682 *str = q;
3683 return value;
3684 }
3685
3686 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3687 for the option, or NULL. */
3688
3689 static const aarch64_sys_ins_reg *
3690 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3691 {
3692 char *p, *q;
3693 char buf[32];
3694 const aarch64_sys_ins_reg *o;
3695
3696 p = buf;
3697 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3698 if (p < buf + 31)
3699 *p++ = TOLOWER (*q);
3700 *p = '\0';
3701
3702 o = hash_find (sys_ins_regs, buf);
3703 if (!o)
3704 return NULL;
3705
3706 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3707 as_bad (_("selected processor does not support system register "
3708 "name '%s'"), buf);
3709
3710 *str = q;
3711 return o;
3712 }
3713 \f
3714 #define po_char_or_fail(chr) do { \
3715 if (! skip_past_char (&str, chr)) \
3716 goto failure; \
3717 } while (0)
3718
3719 #define po_reg_or_fail(regtype) do { \
3720 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3721 if (val == PARSE_FAIL) \
3722 { \
3723 set_default_error (); \
3724 goto failure; \
3725 } \
3726 } while (0)
3727
3728 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3729 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3730 &isreg32, &isregzero); \
3731 if (val == PARSE_FAIL) \
3732 { \
3733 set_default_error (); \
3734 goto failure; \
3735 } \
3736 info->reg.regno = val; \
3737 if (isreg32) \
3738 info->qualifier = AARCH64_OPND_QLF_W; \
3739 else \
3740 info->qualifier = AARCH64_OPND_QLF_X; \
3741 } while (0)
3742
3743 #define po_imm_nc_or_fail() do { \
3744 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3745 goto failure; \
3746 } while (0)
3747
3748 #define po_imm_or_fail(min, max) do { \
3749 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3750 goto failure; \
3751 if (val < min || val > max) \
3752 { \
3753 set_fatal_syntax_error (_("immediate value out of range "\
3754 #min " to "#max)); \
3755 goto failure; \
3756 } \
3757 } while (0)
3758
3759 #define po_misc_or_fail(expr) do { \
3760 if (!expr) \
3761 goto failure; \
3762 } while (0)
3763 \f
3764 /* encode the 12-bit imm field of Add/sub immediate */
3765 static inline uint32_t
3766 encode_addsub_imm (uint32_t imm)
3767 {
3768 return imm << 10;
3769 }
3770
3771 /* encode the shift amount field of Add/sub immediate */
3772 static inline uint32_t
3773 encode_addsub_imm_shift_amount (uint32_t cnt)
3774 {
3775 return cnt << 22;
3776 }
3777
3778
3779 /* encode the imm field of Adr instruction */
3780 static inline uint32_t
3781 encode_adr_imm (uint32_t imm)
3782 {
3783 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3784 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3785 }
3786
3787 /* encode the immediate field of Move wide immediate */
3788 static inline uint32_t
3789 encode_movw_imm (uint32_t imm)
3790 {
3791 return imm << 5;
3792 }
3793
3794 /* encode the 26-bit offset of unconditional branch */
3795 static inline uint32_t
3796 encode_branch_ofs_26 (uint32_t ofs)
3797 {
3798 return ofs & ((1 << 26) - 1);
3799 }
3800
3801 /* encode the 19-bit offset of conditional branch and compare & branch */
3802 static inline uint32_t
3803 encode_cond_branch_ofs_19 (uint32_t ofs)
3804 {
3805 return (ofs & ((1 << 19) - 1)) << 5;
3806 }
3807
3808 /* encode the 19-bit offset of ld literal */
3809 static inline uint32_t
3810 encode_ld_lit_ofs_19 (uint32_t ofs)
3811 {
3812 return (ofs & ((1 << 19) - 1)) << 5;
3813 }
3814
3815 /* Encode the 14-bit offset of test & branch. */
3816 static inline uint32_t
3817 encode_tst_branch_ofs_14 (uint32_t ofs)
3818 {
3819 return (ofs & ((1 << 14) - 1)) << 5;
3820 }
3821
3822 /* Encode the 16-bit imm field of svc/hvc/smc. */
3823 static inline uint32_t
3824 encode_svc_imm (uint32_t imm)
3825 {
3826 return imm << 5;
3827 }
3828
3829 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3830 static inline uint32_t
3831 reencode_addsub_switch_add_sub (uint32_t opcode)
3832 {
3833 return opcode ^ (1 << 30);
3834 }
3835
3836 static inline uint32_t
3837 reencode_movzn_to_movz (uint32_t opcode)
3838 {
3839 return opcode | (1 << 30);
3840 }
3841
3842 static inline uint32_t
3843 reencode_movzn_to_movn (uint32_t opcode)
3844 {
3845 return opcode & ~(1 << 30);
3846 }
3847
3848 /* Overall per-instruction processing. */
3849
3850 /* We need to be able to fix up arbitrary expressions in some statements.
3851 This is so that we can handle symbols that are an arbitrary distance from
3852 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3853 which returns part of an address in a form which will be valid for
3854 a data instruction. We do this by pushing the expression into a symbol
3855 in the expr_section, and creating a fix for that. */
3856
3857 static fixS *
3858 fix_new_aarch64 (fragS * frag,
3859 int where,
3860 short int size, expressionS * exp, int pc_rel, int reloc)
3861 {
3862 fixS *new_fix;
3863
3864 switch (exp->X_op)
3865 {
3866 case O_constant:
3867 case O_symbol:
3868 case O_add:
3869 case O_subtract:
3870 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3871 break;
3872
3873 default:
3874 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3875 pc_rel, reloc);
3876 break;
3877 }
3878 return new_fix;
3879 }
3880 \f
3881 /* Diagnostics on operands errors. */
3882
3883 /* By default, output verbose error message.
3884 Disable the verbose error message by -mno-verbose-error. */
3885 static int verbose_error_p = 1;
3886
3887 #ifdef DEBUG_AARCH64
3888 /* N.B. this is only for the purpose of debugging. */
3889 const char* operand_mismatch_kind_names[] =
3890 {
3891 "AARCH64_OPDE_NIL",
3892 "AARCH64_OPDE_RECOVERABLE",
3893 "AARCH64_OPDE_SYNTAX_ERROR",
3894 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3895 "AARCH64_OPDE_INVALID_VARIANT",
3896 "AARCH64_OPDE_OUT_OF_RANGE",
3897 "AARCH64_OPDE_UNALIGNED",
3898 "AARCH64_OPDE_REG_LIST",
3899 "AARCH64_OPDE_OTHER_ERROR",
3900 };
3901 #endif /* DEBUG_AARCH64 */
3902
3903 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3904
3905 When multiple errors of different kinds are found in the same assembly
3906 line, only the error of the highest severity will be picked up for
3907 issuing the diagnostics. */
3908
3909 static inline bfd_boolean
3910 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3911 enum aarch64_operand_error_kind rhs)
3912 {
3913 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3914 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3915 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3916 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3917 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3918 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3919 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3920 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3921 return lhs > rhs;
3922 }
3923
3924 /* Helper routine to get the mnemonic name from the assembly instruction
3925 line; should only be called for the diagnosis purpose, as there is
3926 string copy operation involved, which may affect the runtime
3927 performance if used in elsewhere. */
3928
3929 static const char*
3930 get_mnemonic_name (const char *str)
3931 {
3932 static char mnemonic[32];
3933 char *ptr;
3934
3935 /* Get the first 15 bytes and assume that the full name is included. */
3936 strncpy (mnemonic, str, 31);
3937 mnemonic[31] = '\0';
3938
3939 /* Scan up to the end of the mnemonic, which must end in white space,
3940 '.', or end of string. */
3941 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3942 ;
3943
3944 *ptr = '\0';
3945
3946 /* Append '...' to the truncated long name. */
3947 if (ptr - mnemonic == 31)
3948 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3949
3950 return mnemonic;
3951 }
3952
3953 static void
3954 reset_aarch64_instruction (aarch64_instruction *instruction)
3955 {
3956 memset (instruction, '\0', sizeof (aarch64_instruction));
3957 instruction->reloc.type = BFD_RELOC_UNUSED;
3958 }
3959
3960 /* Data strutures storing one user error in the assembly code related to
3961 operands. */
3962
3963 struct operand_error_record
3964 {
3965 const aarch64_opcode *opcode;
3966 aarch64_operand_error detail;
3967 struct operand_error_record *next;
3968 };
3969
3970 typedef struct operand_error_record operand_error_record;
3971
3972 struct operand_errors
3973 {
3974 operand_error_record *head;
3975 operand_error_record *tail;
3976 };
3977
3978 typedef struct operand_errors operand_errors;
3979
3980 /* Top-level data structure reporting user errors for the current line of
3981 the assembly code.
3982 The way md_assemble works is that all opcodes sharing the same mnemonic
3983 name are iterated to find a match to the assembly line. In this data
3984 structure, each of the such opcodes will have one operand_error_record
3985 allocated and inserted. In other words, excessive errors related with
3986 a single opcode are disregarded. */
3987 operand_errors operand_error_report;
3988
3989 /* Free record nodes. */
3990 static operand_error_record *free_opnd_error_record_nodes = NULL;
3991
3992 /* Initialize the data structure that stores the operand mismatch
3993 information on assembling one line of the assembly code. */
3994 static void
3995 init_operand_error_report (void)
3996 {
3997 if (operand_error_report.head != NULL)
3998 {
3999 gas_assert (operand_error_report.tail != NULL);
4000 operand_error_report.tail->next = free_opnd_error_record_nodes;
4001 free_opnd_error_record_nodes = operand_error_report.head;
4002 operand_error_report.head = NULL;
4003 operand_error_report.tail = NULL;
4004 return;
4005 }
4006 gas_assert (operand_error_report.tail == NULL);
4007 }
4008
4009 /* Return TRUE if some operand error has been recorded during the
4010 parsing of the current assembly line using the opcode *OPCODE;
4011 otherwise return FALSE. */
4012 static inline bfd_boolean
4013 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4014 {
4015 operand_error_record *record = operand_error_report.head;
4016 return record && record->opcode == opcode;
4017 }
4018
4019 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4020 OPCODE field is initialized with OPCODE.
4021 N.B. only one record for each opcode, i.e. the maximum of one error is
4022 recorded for each instruction template. */
4023
4024 static void
4025 add_operand_error_record (const operand_error_record* new_record)
4026 {
4027 const aarch64_opcode *opcode = new_record->opcode;
4028 operand_error_record* record = operand_error_report.head;
4029
4030 /* The record may have been created for this opcode. If not, we need
4031 to prepare one. */
4032 if (! opcode_has_operand_error_p (opcode))
4033 {
4034 /* Get one empty record. */
4035 if (free_opnd_error_record_nodes == NULL)
4036 {
4037 record = XNEW (operand_error_record);
4038 }
4039 else
4040 {
4041 record = free_opnd_error_record_nodes;
4042 free_opnd_error_record_nodes = record->next;
4043 }
4044 record->opcode = opcode;
4045 /* Insert at the head. */
4046 record->next = operand_error_report.head;
4047 operand_error_report.head = record;
4048 if (operand_error_report.tail == NULL)
4049 operand_error_report.tail = record;
4050 }
4051 else if (record->detail.kind != AARCH64_OPDE_NIL
4052 && record->detail.index <= new_record->detail.index
4053 && operand_error_higher_severity_p (record->detail.kind,
4054 new_record->detail.kind))
4055 {
4056 /* In the case of multiple errors found on operands related with a
4057 single opcode, only record the error of the leftmost operand and
4058 only if the error is of higher severity. */
4059 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4060 " the existing error %s on operand %d",
4061 operand_mismatch_kind_names[new_record->detail.kind],
4062 new_record->detail.index,
4063 operand_mismatch_kind_names[record->detail.kind],
4064 record->detail.index);
4065 return;
4066 }
4067
4068 record->detail = new_record->detail;
4069 }
4070
4071 static inline void
4072 record_operand_error_info (const aarch64_opcode *opcode,
4073 aarch64_operand_error *error_info)
4074 {
4075 operand_error_record record;
4076 record.opcode = opcode;
4077 record.detail = *error_info;
4078 add_operand_error_record (&record);
4079 }
4080
4081 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4082 error message *ERROR, for operand IDX (count from 0). */
4083
4084 static void
4085 record_operand_error (const aarch64_opcode *opcode, int idx,
4086 enum aarch64_operand_error_kind kind,
4087 const char* error)
4088 {
4089 aarch64_operand_error info;
4090 memset(&info, 0, sizeof (info));
4091 info.index = idx;
4092 info.kind = kind;
4093 info.error = error;
4094 record_operand_error_info (opcode, &info);
4095 }
4096
4097 static void
4098 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4099 enum aarch64_operand_error_kind kind,
4100 const char* error, const int *extra_data)
4101 {
4102 aarch64_operand_error info;
4103 info.index = idx;
4104 info.kind = kind;
4105 info.error = error;
4106 info.data[0] = extra_data[0];
4107 info.data[1] = extra_data[1];
4108 info.data[2] = extra_data[2];
4109 record_operand_error_info (opcode, &info);
4110 }
4111
4112 static void
4113 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4114 const char* error, int lower_bound,
4115 int upper_bound)
4116 {
4117 int data[3] = {lower_bound, upper_bound, 0};
4118 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4119 error, data);
4120 }
4121
4122 /* Remove the operand error record for *OPCODE. */
4123 static void ATTRIBUTE_UNUSED
4124 remove_operand_error_record (const aarch64_opcode *opcode)
4125 {
4126 if (opcode_has_operand_error_p (opcode))
4127 {
4128 operand_error_record* record = operand_error_report.head;
4129 gas_assert (record != NULL && operand_error_report.tail != NULL);
4130 operand_error_report.head = record->next;
4131 record->next = free_opnd_error_record_nodes;
4132 free_opnd_error_record_nodes = record;
4133 if (operand_error_report.head == NULL)
4134 {
4135 gas_assert (operand_error_report.tail == record);
4136 operand_error_report.tail = NULL;
4137 }
4138 }
4139 }
4140
4141 /* Given the instruction in *INSTR, return the index of the best matched
4142 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4143
4144 Return -1 if there is no qualifier sequence; return the first match
4145 if there is multiple matches found. */
4146
4147 static int
4148 find_best_match (const aarch64_inst *instr,
4149 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4150 {
4151 int i, num_opnds, max_num_matched, idx;
4152
4153 num_opnds = aarch64_num_of_operands (instr->opcode);
4154 if (num_opnds == 0)
4155 {
4156 DEBUG_TRACE ("no operand");
4157 return -1;
4158 }
4159
4160 max_num_matched = 0;
4161 idx = -1;
4162
4163 /* For each pattern. */
4164 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4165 {
4166 int j, num_matched;
4167 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4168
4169 /* Most opcodes has much fewer patterns in the list. */
4170 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4171 {
4172 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4173 if (i != 0 && idx == -1)
4174 /* If nothing has been matched, return the 1st sequence. */
4175 idx = 0;
4176 break;
4177 }
4178
4179 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4180 if (*qualifiers == instr->operands[j].qualifier)
4181 ++num_matched;
4182
4183 if (num_matched > max_num_matched)
4184 {
4185 max_num_matched = num_matched;
4186 idx = i;
4187 }
4188 }
4189
4190 DEBUG_TRACE ("return with %d", idx);
4191 return idx;
4192 }
4193
4194 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4195 corresponding operands in *INSTR. */
4196
4197 static inline void
4198 assign_qualifier_sequence (aarch64_inst *instr,
4199 const aarch64_opnd_qualifier_t *qualifiers)
4200 {
4201 int i = 0;
4202 int num_opnds = aarch64_num_of_operands (instr->opcode);
4203 gas_assert (num_opnds);
4204 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4205 instr->operands[i].qualifier = *qualifiers;
4206 }
4207
4208 /* Print operands for the diagnosis purpose. */
4209
4210 static void
4211 print_operands (char *buf, const aarch64_opcode *opcode,
4212 const aarch64_opnd_info *opnds)
4213 {
4214 int i;
4215
4216 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4217 {
4218 char str[128];
4219
4220 /* We regard the opcode operand info more, however we also look into
4221 the inst->operands to support the disassembling of the optional
4222 operand.
4223 The two operand code should be the same in all cases, apart from
4224 when the operand can be optional. */
4225 if (opcode->operands[i] == AARCH64_OPND_NIL
4226 || opnds[i].type == AARCH64_OPND_NIL)
4227 break;
4228
4229 /* Generate the operand string in STR. */
4230 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4231
4232 /* Delimiter. */
4233 if (str[0] != '\0')
4234 strcat (buf, i == 0 ? " " : ",");
4235
4236 /* Append the operand string. */
4237 strcat (buf, str);
4238 }
4239 }
4240
4241 /* Send to stderr a string as information. */
4242
4243 static void
4244 output_info (const char *format, ...)
4245 {
4246 const char *file;
4247 unsigned int line;
4248 va_list args;
4249
4250 file = as_where (&line);
4251 if (file)
4252 {
4253 if (line != 0)
4254 fprintf (stderr, "%s:%u: ", file, line);
4255 else
4256 fprintf (stderr, "%s: ", file);
4257 }
4258 fprintf (stderr, _("Info: "));
4259 va_start (args, format);
4260 vfprintf (stderr, format, args);
4261 va_end (args);
4262 (void) putc ('\n', stderr);
4263 }
4264
4265 /* Output one operand error record. */
4266
4267 static void
4268 output_operand_error_record (const operand_error_record *record, char *str)
4269 {
4270 const aarch64_operand_error *detail = &record->detail;
4271 int idx = detail->index;
4272 const aarch64_opcode *opcode = record->opcode;
4273 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4274 : AARCH64_OPND_NIL);
4275
4276 switch (detail->kind)
4277 {
4278 case AARCH64_OPDE_NIL:
4279 gas_assert (0);
4280 break;
4281
4282 case AARCH64_OPDE_SYNTAX_ERROR:
4283 case AARCH64_OPDE_RECOVERABLE:
4284 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4285 case AARCH64_OPDE_OTHER_ERROR:
4286 /* Use the prepared error message if there is, otherwise use the
4287 operand description string to describe the error. */
4288 if (detail->error != NULL)
4289 {
4290 if (idx < 0)
4291 as_bad (_("%s -- `%s'"), detail->error, str);
4292 else
4293 as_bad (_("%s at operand %d -- `%s'"),
4294 detail->error, idx + 1, str);
4295 }
4296 else
4297 {
4298 gas_assert (idx >= 0);
4299 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4300 aarch64_get_operand_desc (opd_code), str);
4301 }
4302 break;
4303
4304 case AARCH64_OPDE_INVALID_VARIANT:
4305 as_bad (_("operand mismatch -- `%s'"), str);
4306 if (verbose_error_p)
4307 {
4308 /* We will try to correct the erroneous instruction and also provide
4309 more information e.g. all other valid variants.
4310
4311 The string representation of the corrected instruction and other
4312 valid variants are generated by
4313
4314 1) obtaining the intermediate representation of the erroneous
4315 instruction;
4316 2) manipulating the IR, e.g. replacing the operand qualifier;
4317 3) printing out the instruction by calling the printer functions
4318 shared with the disassembler.
4319
4320 The limitation of this method is that the exact input assembly
4321 line cannot be accurately reproduced in some cases, for example an
4322 optional operand present in the actual assembly line will be
4323 omitted in the output; likewise for the optional syntax rules,
4324 e.g. the # before the immediate. Another limitation is that the
4325 assembly symbols and relocation operations in the assembly line
4326 currently cannot be printed out in the error report. Last but not
4327 least, when there is other error(s) co-exist with this error, the
4328 'corrected' instruction may be still incorrect, e.g. given
4329 'ldnp h0,h1,[x0,#6]!'
4330 this diagnosis will provide the version:
4331 'ldnp s0,s1,[x0,#6]!'
4332 which is still not right. */
4333 size_t len = strlen (get_mnemonic_name (str));
4334 int i, qlf_idx;
4335 bfd_boolean result;
4336 char buf[2048];
4337 aarch64_inst *inst_base = &inst.base;
4338 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4339
4340 /* Init inst. */
4341 reset_aarch64_instruction (&inst);
4342 inst_base->opcode = opcode;
4343
4344 /* Reset the error report so that there is no side effect on the
4345 following operand parsing. */
4346 init_operand_error_report ();
4347
4348 /* Fill inst. */
4349 result = parse_operands (str + len, opcode)
4350 && programmer_friendly_fixup (&inst);
4351 gas_assert (result);
4352 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4353 NULL, NULL);
4354 gas_assert (!result);
4355
4356 /* Find the most matched qualifier sequence. */
4357 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4358 gas_assert (qlf_idx > -1);
4359
4360 /* Assign the qualifiers. */
4361 assign_qualifier_sequence (inst_base,
4362 opcode->qualifiers_list[qlf_idx]);
4363
4364 /* Print the hint. */
4365 output_info (_(" did you mean this?"));
4366 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4367 print_operands (buf, opcode, inst_base->operands);
4368 output_info (_(" %s"), buf);
4369
4370 /* Print out other variant(s) if there is any. */
4371 if (qlf_idx != 0 ||
4372 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4373 output_info (_(" other valid variant(s):"));
4374
4375 /* For each pattern. */
4376 qualifiers_list = opcode->qualifiers_list;
4377 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4378 {
4379 /* Most opcodes has much fewer patterns in the list.
4380 First NIL qualifier indicates the end in the list. */
4381 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4382 break;
4383
4384 if (i != qlf_idx)
4385 {
4386 /* Mnemonics name. */
4387 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4388
4389 /* Assign the qualifiers. */
4390 assign_qualifier_sequence (inst_base, *qualifiers_list);
4391
4392 /* Print instruction. */
4393 print_operands (buf, opcode, inst_base->operands);
4394
4395 output_info (_(" %s"), buf);
4396 }
4397 }
4398 }
4399 break;
4400
4401 case AARCH64_OPDE_OUT_OF_RANGE:
4402 if (detail->data[0] != detail->data[1])
4403 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4404 detail->error ? detail->error : _("immediate value"),
4405 detail->data[0], detail->data[1], idx + 1, str);
4406 else
4407 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4408 detail->error ? detail->error : _("immediate value"),
4409 detail->data[0], idx + 1, str);
4410 break;
4411
4412 case AARCH64_OPDE_REG_LIST:
4413 if (detail->data[0] == 1)
4414 as_bad (_("invalid number of registers in the list; "
4415 "only 1 register is expected at operand %d -- `%s'"),
4416 idx + 1, str);
4417 else
4418 as_bad (_("invalid number of registers in the list; "
4419 "%d registers are expected at operand %d -- `%s'"),
4420 detail->data[0], idx + 1, str);
4421 break;
4422
4423 case AARCH64_OPDE_UNALIGNED:
4424 as_bad (_("immediate value should be a multiple of "
4425 "%d at operand %d -- `%s'"),
4426 detail->data[0], idx + 1, str);
4427 break;
4428
4429 default:
4430 gas_assert (0);
4431 break;
4432 }
4433 }
4434
4435 /* Process and output the error message about the operand mismatching.
4436
4437 When this function is called, the operand error information had
4438 been collected for an assembly line and there will be multiple
4439 errors in the case of mulitple instruction templates; output the
4440 error message that most closely describes the problem. */
4441
4442 static void
4443 output_operand_error_report (char *str)
4444 {
4445 int largest_error_pos;
4446 const char *msg = NULL;
4447 enum aarch64_operand_error_kind kind;
4448 operand_error_record *curr;
4449 operand_error_record *head = operand_error_report.head;
4450 operand_error_record *record = NULL;
4451
4452 /* No error to report. */
4453 if (head == NULL)
4454 return;
4455
4456 gas_assert (head != NULL && operand_error_report.tail != NULL);
4457
4458 /* Only one error. */
4459 if (head == operand_error_report.tail)
4460 {
4461 DEBUG_TRACE ("single opcode entry with error kind: %s",
4462 operand_mismatch_kind_names[head->detail.kind]);
4463 output_operand_error_record (head, str);
4464 return;
4465 }
4466
4467 /* Find the error kind of the highest severity. */
4468 DEBUG_TRACE ("multiple opcode entres with error kind");
4469 kind = AARCH64_OPDE_NIL;
4470 for (curr = head; curr != NULL; curr = curr->next)
4471 {
4472 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4473 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4474 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4475 kind = curr->detail.kind;
4476 }
4477 gas_assert (kind != AARCH64_OPDE_NIL);
4478
4479 /* Pick up one of errors of KIND to report. */
4480 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4481 for (curr = head; curr != NULL; curr = curr->next)
4482 {
4483 if (curr->detail.kind != kind)
4484 continue;
4485 /* If there are multiple errors, pick up the one with the highest
4486 mismatching operand index. In the case of multiple errors with
4487 the equally highest operand index, pick up the first one or the
4488 first one with non-NULL error message. */
4489 if (curr->detail.index > largest_error_pos
4490 || (curr->detail.index == largest_error_pos && msg == NULL
4491 && curr->detail.error != NULL))
4492 {
4493 largest_error_pos = curr->detail.index;
4494 record = curr;
4495 msg = record->detail.error;
4496 }
4497 }
4498
4499 gas_assert (largest_error_pos != -2 && record != NULL);
4500 DEBUG_TRACE ("Pick up error kind %s to report",
4501 operand_mismatch_kind_names[record->detail.kind]);
4502
4503 /* Output. */
4504 output_operand_error_record (record, str);
4505 }
4506 \f
4507 /* Write an AARCH64 instruction to buf - always little-endian. */
4508 static void
4509 put_aarch64_insn (char *buf, uint32_t insn)
4510 {
4511 unsigned char *where = (unsigned char *) buf;
4512 where[0] = insn;
4513 where[1] = insn >> 8;
4514 where[2] = insn >> 16;
4515 where[3] = insn >> 24;
4516 }
4517
4518 static uint32_t
4519 get_aarch64_insn (char *buf)
4520 {
4521 unsigned char *where = (unsigned char *) buf;
4522 uint32_t result;
4523 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4524 return result;
4525 }
4526
4527 static void
4528 output_inst (struct aarch64_inst *new_inst)
4529 {
4530 char *to = NULL;
4531
4532 to = frag_more (INSN_SIZE);
4533
4534 frag_now->tc_frag_data.recorded = 1;
4535
4536 put_aarch64_insn (to, inst.base.value);
4537
4538 if (inst.reloc.type != BFD_RELOC_UNUSED)
4539 {
4540 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4541 INSN_SIZE, &inst.reloc.exp,
4542 inst.reloc.pc_rel,
4543 inst.reloc.type);
4544 DEBUG_TRACE ("Prepared relocation fix up");
4545 /* Don't check the addend value against the instruction size,
4546 that's the job of our code in md_apply_fix(). */
4547 fixp->fx_no_overflow = 1;
4548 if (new_inst != NULL)
4549 fixp->tc_fix_data.inst = new_inst;
4550 if (aarch64_gas_internal_fixup_p ())
4551 {
4552 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4553 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4554 fixp->fx_addnumber = inst.reloc.flags;
4555 }
4556 }
4557
4558 dwarf2_emit_insn (INSN_SIZE);
4559 }
4560
4561 /* Link together opcodes of the same name. */
4562
4563 struct templates
4564 {
4565 aarch64_opcode *opcode;
4566 struct templates *next;
4567 };
4568
4569 typedef struct templates templates;
4570
4571 static templates *
4572 lookup_mnemonic (const char *start, int len)
4573 {
4574 templates *templ = NULL;
4575
4576 templ = hash_find_n (aarch64_ops_hsh, start, len);
4577 return templ;
4578 }
4579
4580 /* Subroutine of md_assemble, responsible for looking up the primary
4581 opcode from the mnemonic the user wrote. STR points to the
4582 beginning of the mnemonic. */
4583
4584 static templates *
4585 opcode_lookup (char **str)
4586 {
4587 char *end, *base;
4588 const aarch64_cond *cond;
4589 char condname[16];
4590 int len;
4591
4592 /* Scan up to the end of the mnemonic, which must end in white space,
4593 '.', or end of string. */
4594 for (base = end = *str; is_part_of_name(*end); end++)
4595 if (*end == '.')
4596 break;
4597
4598 if (end == base)
4599 return 0;
4600
4601 inst.cond = COND_ALWAYS;
4602
4603 /* Handle a possible condition. */
4604 if (end[0] == '.')
4605 {
4606 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4607 if (cond)
4608 {
4609 inst.cond = cond->value;
4610 *str = end + 3;
4611 }
4612 else
4613 {
4614 *str = end;
4615 return 0;
4616 }
4617 }
4618 else
4619 *str = end;
4620
4621 len = end - base;
4622
4623 if (inst.cond == COND_ALWAYS)
4624 {
4625 /* Look for unaffixed mnemonic. */
4626 return lookup_mnemonic (base, len);
4627 }
4628 else if (len <= 13)
4629 {
4630 /* append ".c" to mnemonic if conditional */
4631 memcpy (condname, base, len);
4632 memcpy (condname + len, ".c", 2);
4633 base = condname;
4634 len += 2;
4635 return lookup_mnemonic (base, len);
4636 }
4637
4638 return NULL;
4639 }
4640
4641 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4642 to a corresponding operand qualifier. */
4643
4644 static inline aarch64_opnd_qualifier_t
4645 vectype_to_qualifier (const struct vector_type_el *vectype)
4646 {
4647 /* Element size in bytes indexed by vector_el_type. */
4648 const unsigned char ele_size[5]
4649 = {1, 2, 4, 8, 16};
4650 const unsigned int ele_base [5] =
4651 {
4652 AARCH64_OPND_QLF_V_8B,
4653 AARCH64_OPND_QLF_V_2H,
4654 AARCH64_OPND_QLF_V_2S,
4655 AARCH64_OPND_QLF_V_1D,
4656 AARCH64_OPND_QLF_V_1Q
4657 };
4658
4659 if (!vectype->defined || vectype->type == NT_invtype)
4660 goto vectype_conversion_fail;
4661
4662 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4663
4664 if (vectype->defined & NTA_HASINDEX)
4665 /* Vector element register. */
4666 return AARCH64_OPND_QLF_S_B + vectype->type;
4667 else
4668 {
4669 /* Vector register. */
4670 int reg_size = ele_size[vectype->type] * vectype->width;
4671 unsigned offset;
4672 unsigned shift;
4673 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4674 goto vectype_conversion_fail;
4675
4676 /* The conversion is by calculating the offset from the base operand
4677 qualifier for the vector type. The operand qualifiers are regular
4678 enough that the offset can established by shifting the vector width by
4679 a vector-type dependent amount. */
4680 shift = 0;
4681 if (vectype->type == NT_b)
4682 shift = 4;
4683 else if (vectype->type == NT_h || vectype->type == NT_s)
4684 shift = 2;
4685 else if (vectype->type >= NT_d)
4686 shift = 1;
4687 else
4688 gas_assert (0);
4689
4690 offset = ele_base [vectype->type] + (vectype->width >> shift);
4691 gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4692 && offset <= AARCH64_OPND_QLF_V_1Q);
4693 return offset;
4694 }
4695
4696 vectype_conversion_fail:
4697 first_error (_("bad vector arrangement type"));
4698 return AARCH64_OPND_QLF_NIL;
4699 }
4700
4701 /* Process an optional operand that is found omitted from the assembly line.
4702 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4703 instruction's opcode entry while IDX is the index of this omitted operand.
4704 */
4705
4706 static void
4707 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4708 int idx, aarch64_opnd_info *operand)
4709 {
4710 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4711 gas_assert (optional_operand_p (opcode, idx));
4712 gas_assert (!operand->present);
4713
4714 switch (type)
4715 {
4716 case AARCH64_OPND_Rd:
4717 case AARCH64_OPND_Rn:
4718 case AARCH64_OPND_Rm:
4719 case AARCH64_OPND_Rt:
4720 case AARCH64_OPND_Rt2:
4721 case AARCH64_OPND_Rs:
4722 case AARCH64_OPND_Ra:
4723 case AARCH64_OPND_Rt_SYS:
4724 case AARCH64_OPND_Rd_SP:
4725 case AARCH64_OPND_Rn_SP:
4726 case AARCH64_OPND_Fd:
4727 case AARCH64_OPND_Fn:
4728 case AARCH64_OPND_Fm:
4729 case AARCH64_OPND_Fa:
4730 case AARCH64_OPND_Ft:
4731 case AARCH64_OPND_Ft2:
4732 case AARCH64_OPND_Sd:
4733 case AARCH64_OPND_Sn:
4734 case AARCH64_OPND_Sm:
4735 case AARCH64_OPND_Vd:
4736 case AARCH64_OPND_Vn:
4737 case AARCH64_OPND_Vm:
4738 case AARCH64_OPND_VdD1:
4739 case AARCH64_OPND_VnD1:
4740 operand->reg.regno = default_value;
4741 break;
4742
4743 case AARCH64_OPND_Ed:
4744 case AARCH64_OPND_En:
4745 case AARCH64_OPND_Em:
4746 operand->reglane.regno = default_value;
4747 break;
4748
4749 case AARCH64_OPND_IDX:
4750 case AARCH64_OPND_BIT_NUM:
4751 case AARCH64_OPND_IMMR:
4752 case AARCH64_OPND_IMMS:
4753 case AARCH64_OPND_SHLL_IMM:
4754 case AARCH64_OPND_IMM_VLSL:
4755 case AARCH64_OPND_IMM_VLSR:
4756 case AARCH64_OPND_CCMP_IMM:
4757 case AARCH64_OPND_FBITS:
4758 case AARCH64_OPND_UIMM4:
4759 case AARCH64_OPND_UIMM3_OP1:
4760 case AARCH64_OPND_UIMM3_OP2:
4761 case AARCH64_OPND_IMM:
4762 case AARCH64_OPND_WIDTH:
4763 case AARCH64_OPND_UIMM7:
4764 case AARCH64_OPND_NZCV:
4765 operand->imm.value = default_value;
4766 break;
4767
4768 case AARCH64_OPND_EXCEPTION:
4769 inst.reloc.type = BFD_RELOC_UNUSED;
4770 break;
4771
4772 case AARCH64_OPND_BARRIER_ISB:
4773 operand->barrier = aarch64_barrier_options + default_value;
4774
4775 default:
4776 break;
4777 }
4778 }
4779
4780 /* Process the relocation type for move wide instructions.
4781 Return TRUE on success; otherwise return FALSE. */
4782
4783 static bfd_boolean
4784 process_movw_reloc_info (void)
4785 {
4786 int is32;
4787 unsigned shift;
4788
4789 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4790
4791 if (inst.base.opcode->op == OP_MOVK)
4792 switch (inst.reloc.type)
4793 {
4794 case BFD_RELOC_AARCH64_MOVW_G0_S:
4795 case BFD_RELOC_AARCH64_MOVW_G1_S:
4796 case BFD_RELOC_AARCH64_MOVW_G2_S:
4797 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4798 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4799 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4800 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4801 set_syntax_error
4802 (_("the specified relocation type is not allowed for MOVK"));
4803 return FALSE;
4804 default:
4805 break;
4806 }
4807
4808 switch (inst.reloc.type)
4809 {
4810 case BFD_RELOC_AARCH64_MOVW_G0:
4811 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4812 case BFD_RELOC_AARCH64_MOVW_G0_S:
4813 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4814 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4815 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4816 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
4817 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
4818 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
4819 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4820 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4821 shift = 0;
4822 break;
4823 case BFD_RELOC_AARCH64_MOVW_G1:
4824 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4825 case BFD_RELOC_AARCH64_MOVW_G1_S:
4826 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4827 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4828 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4829 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
4830 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
4831 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
4832 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4833 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4834 shift = 16;
4835 break;
4836 case BFD_RELOC_AARCH64_MOVW_G2:
4837 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4838 case BFD_RELOC_AARCH64_MOVW_G2_S:
4839 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
4840 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4841 if (is32)
4842 {
4843 set_fatal_syntax_error
4844 (_("the specified relocation type is not allowed for 32-bit "
4845 "register"));
4846 return FALSE;
4847 }
4848 shift = 32;
4849 break;
4850 case BFD_RELOC_AARCH64_MOVW_G3:
4851 if (is32)
4852 {
4853 set_fatal_syntax_error
4854 (_("the specified relocation type is not allowed for 32-bit "
4855 "register"));
4856 return FALSE;
4857 }
4858 shift = 48;
4859 break;
4860 default:
4861 /* More cases should be added when more MOVW-related relocation types
4862 are supported in GAS. */
4863 gas_assert (aarch64_gas_internal_fixup_p ());
4864 /* The shift amount should have already been set by the parser. */
4865 return TRUE;
4866 }
4867 inst.base.operands[1].shifter.amount = shift;
4868 return TRUE;
4869 }
4870
4871 /* A primitive log caculator. */
4872
4873 static inline unsigned int
4874 get_logsz (unsigned int size)
4875 {
4876 const unsigned char ls[16] =
4877 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4878 if (size > 16)
4879 {
4880 gas_assert (0);
4881 return -1;
4882 }
4883 gas_assert (ls[size - 1] != (unsigned char)-1);
4884 return ls[size - 1];
4885 }
4886
4887 /* Determine and return the real reloc type code for an instruction
4888 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4889
4890 static inline bfd_reloc_code_real_type
4891 ldst_lo12_determine_real_reloc_type (void)
4892 {
4893 unsigned logsz;
4894 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4895 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4896
4897 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
4898 {
4899 BFD_RELOC_AARCH64_LDST8_LO12,
4900 BFD_RELOC_AARCH64_LDST16_LO12,
4901 BFD_RELOC_AARCH64_LDST32_LO12,
4902 BFD_RELOC_AARCH64_LDST64_LO12,
4903 BFD_RELOC_AARCH64_LDST128_LO12
4904 },
4905 {
4906 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
4907 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
4908 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
4909 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
4910 BFD_RELOC_AARCH64_NONE
4911 },
4912 {
4913 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
4914 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
4915 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
4916 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
4917 BFD_RELOC_AARCH64_NONE
4918 }
4919 };
4920
4921 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
4922 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4923 || (inst.reloc.type
4924 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
4925 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4926
4927 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4928 opd1_qlf =
4929 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4930 1, opd0_qlf, 0);
4931 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4932
4933 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4934 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
4935 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
4936 gas_assert (logsz <= 3);
4937 else
4938 gas_assert (logsz <= 4);
4939
4940 /* In reloc.c, these pseudo relocation types should be defined in similar
4941 order as above reloc_ldst_lo12 array. Because the array index calcuation
4942 below relies on this. */
4943 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
4944 }
4945
4946 /* Check whether a register list REGINFO is valid. The registers must be
4947 numbered in increasing order (modulo 32), in increments of one or two.
4948
4949 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4950 increments of two.
4951
4952 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4953
4954 static bfd_boolean
4955 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4956 {
4957 uint32_t i, nb_regs, prev_regno, incr;
4958
4959 nb_regs = 1 + (reginfo & 0x3);
4960 reginfo >>= 2;
4961 prev_regno = reginfo & 0x1f;
4962 incr = accept_alternate ? 2 : 1;
4963
4964 for (i = 1; i < nb_regs; ++i)
4965 {
4966 uint32_t curr_regno;
4967 reginfo >>= 5;
4968 curr_regno = reginfo & 0x1f;
4969 if (curr_regno != ((prev_regno + incr) & 0x1f))
4970 return FALSE;
4971 prev_regno = curr_regno;
4972 }
4973
4974 return TRUE;
4975 }
4976
4977 /* Generic instruction operand parser. This does no encoding and no
4978 semantic validation; it merely squirrels values away in the inst
4979 structure. Returns TRUE or FALSE depending on whether the
4980 specified grammar matched. */
4981
4982 static bfd_boolean
4983 parse_operands (char *str, const aarch64_opcode *opcode)
4984 {
4985 int i;
4986 char *backtrack_pos = 0;
4987 const enum aarch64_opnd *operands = opcode->operands;
4988 aarch64_reg_type imm_reg_type;
4989
4990 clear_error ();
4991 skip_whitespace (str);
4992
4993 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
4994
4995 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4996 {
4997 int64_t val;
4998 int isreg32, isregzero;
4999 int comma_skipped_p = 0;
5000 aarch64_reg_type rtype;
5001 struct vector_type_el vectype;
5002 aarch64_opnd_info *info = &inst.base.operands[i];
5003
5004 DEBUG_TRACE ("parse operand %d", i);
5005
5006 /* Assign the operand code. */
5007 info->type = operands[i];
5008
5009 if (optional_operand_p (opcode, i))
5010 {
5011 /* Remember where we are in case we need to backtrack. */
5012 gas_assert (!backtrack_pos);
5013 backtrack_pos = str;
5014 }
5015
5016 /* Expect comma between operands; the backtrack mechanizm will take
5017 care of cases of omitted optional operand. */
5018 if (i > 0 && ! skip_past_char (&str, ','))
5019 {
5020 set_syntax_error (_("comma expected between operands"));
5021 goto failure;
5022 }
5023 else
5024 comma_skipped_p = 1;
5025
5026 switch (operands[i])
5027 {
5028 case AARCH64_OPND_Rd:
5029 case AARCH64_OPND_Rn:
5030 case AARCH64_OPND_Rm:
5031 case AARCH64_OPND_Rt:
5032 case AARCH64_OPND_Rt2:
5033 case AARCH64_OPND_Rs:
5034 case AARCH64_OPND_Ra:
5035 case AARCH64_OPND_Rt_SYS:
5036 case AARCH64_OPND_PAIRREG:
5037 po_int_reg_or_fail (1, 0);
5038 break;
5039
5040 case AARCH64_OPND_Rd_SP:
5041 case AARCH64_OPND_Rn_SP:
5042 po_int_reg_or_fail (0, 1);
5043 break;
5044
5045 case AARCH64_OPND_Rm_EXT:
5046 case AARCH64_OPND_Rm_SFT:
5047 po_misc_or_fail (parse_shifter_operand
5048 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5049 ? SHIFTED_ARITH_IMM
5050 : SHIFTED_LOGIC_IMM)));
5051 if (!info->shifter.operator_present)
5052 {
5053 /* Default to LSL if not present. Libopcodes prefers shifter
5054 kind to be explicit. */
5055 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5056 info->shifter.kind = AARCH64_MOD_LSL;
5057 /* For Rm_EXT, libopcodes will carry out further check on whether
5058 or not stack pointer is used in the instruction (Recall that
5059 "the extend operator is not optional unless at least one of
5060 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5061 }
5062 break;
5063
5064 case AARCH64_OPND_Fd:
5065 case AARCH64_OPND_Fn:
5066 case AARCH64_OPND_Fm:
5067 case AARCH64_OPND_Fa:
5068 case AARCH64_OPND_Ft:
5069 case AARCH64_OPND_Ft2:
5070 case AARCH64_OPND_Sd:
5071 case AARCH64_OPND_Sn:
5072 case AARCH64_OPND_Sm:
5073 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5074 if (val == PARSE_FAIL)
5075 {
5076 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5077 goto failure;
5078 }
5079 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5080
5081 info->reg.regno = val;
5082 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5083 break;
5084
5085 case AARCH64_OPND_Vd:
5086 case AARCH64_OPND_Vn:
5087 case AARCH64_OPND_Vm:
5088 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5089 if (val == PARSE_FAIL)
5090 {
5091 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5092 goto failure;
5093 }
5094 if (vectype.defined & NTA_HASINDEX)
5095 goto failure;
5096
5097 info->reg.regno = val;
5098 info->qualifier = vectype_to_qualifier (&vectype);
5099 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5100 goto failure;
5101 break;
5102
5103 case AARCH64_OPND_VdD1:
5104 case AARCH64_OPND_VnD1:
5105 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5106 if (val == PARSE_FAIL)
5107 {
5108 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5109 goto failure;
5110 }
5111 if (vectype.type != NT_d || vectype.index != 1)
5112 {
5113 set_fatal_syntax_error
5114 (_("the top half of a 128-bit FP/SIMD register is expected"));
5115 goto failure;
5116 }
5117 info->reg.regno = val;
5118 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5119 here; it is correct for the purpose of encoding/decoding since
5120 only the register number is explicitly encoded in the related
5121 instructions, although this appears a bit hacky. */
5122 info->qualifier = AARCH64_OPND_QLF_S_D;
5123 break;
5124
5125 case AARCH64_OPND_Ed:
5126 case AARCH64_OPND_En:
5127 case AARCH64_OPND_Em:
5128 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5129 if (val == PARSE_FAIL)
5130 {
5131 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5132 goto failure;
5133 }
5134 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5135 goto failure;
5136
5137 info->reglane.regno = val;
5138 info->reglane.index = vectype.index;
5139 info->qualifier = vectype_to_qualifier (&vectype);
5140 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5141 goto failure;
5142 break;
5143
5144 case AARCH64_OPND_LVn:
5145 case AARCH64_OPND_LVt:
5146 case AARCH64_OPND_LVt_AL:
5147 case AARCH64_OPND_LEt:
5148 if ((val = parse_vector_reg_list (&str, REG_TYPE_VN,
5149 &vectype)) == PARSE_FAIL)
5150 goto failure;
5151 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5152 {
5153 set_fatal_syntax_error (_("invalid register list"));
5154 goto failure;
5155 }
5156 info->reglist.first_regno = (val >> 2) & 0x1f;
5157 info->reglist.num_regs = (val & 0x3) + 1;
5158 if (operands[i] == AARCH64_OPND_LEt)
5159 {
5160 if (!(vectype.defined & NTA_HASINDEX))
5161 goto failure;
5162 info->reglist.has_index = 1;
5163 info->reglist.index = vectype.index;
5164 }
5165 else if (!(vectype.defined & NTA_HASTYPE))
5166 goto failure;
5167 info->qualifier = vectype_to_qualifier (&vectype);
5168 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5169 goto failure;
5170 break;
5171
5172 case AARCH64_OPND_Cn:
5173 case AARCH64_OPND_Cm:
5174 po_reg_or_fail (REG_TYPE_CN);
5175 if (val > 15)
5176 {
5177 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5178 goto failure;
5179 }
5180 inst.base.operands[i].reg.regno = val;
5181 break;
5182
5183 case AARCH64_OPND_SHLL_IMM:
5184 case AARCH64_OPND_IMM_VLSR:
5185 po_imm_or_fail (1, 64);
5186 info->imm.value = val;
5187 break;
5188
5189 case AARCH64_OPND_CCMP_IMM:
5190 case AARCH64_OPND_FBITS:
5191 case AARCH64_OPND_UIMM4:
5192 case AARCH64_OPND_UIMM3_OP1:
5193 case AARCH64_OPND_UIMM3_OP2:
5194 case AARCH64_OPND_IMM_VLSL:
5195 case AARCH64_OPND_IMM:
5196 case AARCH64_OPND_WIDTH:
5197 po_imm_nc_or_fail ();
5198 info->imm.value = val;
5199 break;
5200
5201 case AARCH64_OPND_UIMM7:
5202 po_imm_or_fail (0, 127);
5203 info->imm.value = val;
5204 break;
5205
5206 case AARCH64_OPND_IDX:
5207 case AARCH64_OPND_BIT_NUM:
5208 case AARCH64_OPND_IMMR:
5209 case AARCH64_OPND_IMMS:
5210 po_imm_or_fail (0, 63);
5211 info->imm.value = val;
5212 break;
5213
5214 case AARCH64_OPND_IMM0:
5215 po_imm_nc_or_fail ();
5216 if (val != 0)
5217 {
5218 set_fatal_syntax_error (_("immediate zero expected"));
5219 goto failure;
5220 }
5221 info->imm.value = 0;
5222 break;
5223
5224 case AARCH64_OPND_FPIMM0:
5225 {
5226 int qfloat;
5227 bfd_boolean res1 = FALSE, res2 = FALSE;
5228 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5229 it is probably not worth the effort to support it. */
5230 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5231 imm_reg_type))
5232 && (error_p ()
5233 || !(res2 = parse_constant_immediate (&str, &val,
5234 imm_reg_type))))
5235 goto failure;
5236 if ((res1 && qfloat == 0) || (res2 && val == 0))
5237 {
5238 info->imm.value = 0;
5239 info->imm.is_fp = 1;
5240 break;
5241 }
5242 set_fatal_syntax_error (_("immediate zero expected"));
5243 goto failure;
5244 }
5245
5246 case AARCH64_OPND_IMM_MOV:
5247 {
5248 char *saved = str;
5249 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5250 reg_name_p (str, REG_TYPE_VN))
5251 goto failure;
5252 str = saved;
5253 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5254 GE_OPT_PREFIX, 1));
5255 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5256 later. fix_mov_imm_insn will try to determine a machine
5257 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5258 message if the immediate cannot be moved by a single
5259 instruction. */
5260 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5261 inst.base.operands[i].skip = 1;
5262 }
5263 break;
5264
5265 case AARCH64_OPND_SIMD_IMM:
5266 case AARCH64_OPND_SIMD_IMM_SFT:
5267 if (! parse_big_immediate (&str, &val, imm_reg_type))
5268 goto failure;
5269 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5270 /* addr_off_p */ 0,
5271 /* need_libopcodes_p */ 1,
5272 /* skip_p */ 1);
5273 /* Parse shift.
5274 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5275 shift, we don't check it here; we leave the checking to
5276 the libopcodes (operand_general_constraint_met_p). By
5277 doing this, we achieve better diagnostics. */
5278 if (skip_past_comma (&str)
5279 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5280 goto failure;
5281 if (!info->shifter.operator_present
5282 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5283 {
5284 /* Default to LSL if not present. Libopcodes prefers shifter
5285 kind to be explicit. */
5286 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5287 info->shifter.kind = AARCH64_MOD_LSL;
5288 }
5289 break;
5290
5291 case AARCH64_OPND_FPIMM:
5292 case AARCH64_OPND_SIMD_FPIMM:
5293 {
5294 int qfloat;
5295 bfd_boolean dp_p
5296 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5297 == 8);
5298 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5299 || qfloat == 0)
5300 {
5301 if (!error_p ())
5302 set_fatal_syntax_error (_("invalid floating-point"
5303 " constant"));
5304 goto failure;
5305 }
5306 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5307 inst.base.operands[i].imm.is_fp = 1;
5308 }
5309 break;
5310
5311 case AARCH64_OPND_LIMM:
5312 po_misc_or_fail (parse_shifter_operand (&str, info,
5313 SHIFTED_LOGIC_IMM));
5314 if (info->shifter.operator_present)
5315 {
5316 set_fatal_syntax_error
5317 (_("shift not allowed for bitmask immediate"));
5318 goto failure;
5319 }
5320 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5321 /* addr_off_p */ 0,
5322 /* need_libopcodes_p */ 1,
5323 /* skip_p */ 1);
5324 break;
5325
5326 case AARCH64_OPND_AIMM:
5327 if (opcode->op == OP_ADD)
5328 /* ADD may have relocation types. */
5329 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5330 SHIFTED_ARITH_IMM));
5331 else
5332 po_misc_or_fail (parse_shifter_operand (&str, info,
5333 SHIFTED_ARITH_IMM));
5334 switch (inst.reloc.type)
5335 {
5336 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5337 info->shifter.amount = 12;
5338 break;
5339 case BFD_RELOC_UNUSED:
5340 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5341 if (info->shifter.kind != AARCH64_MOD_NONE)
5342 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5343 inst.reloc.pc_rel = 0;
5344 break;
5345 default:
5346 break;
5347 }
5348 info->imm.value = 0;
5349 if (!info->shifter.operator_present)
5350 {
5351 /* Default to LSL if not present. Libopcodes prefers shifter
5352 kind to be explicit. */
5353 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5354 info->shifter.kind = AARCH64_MOD_LSL;
5355 }
5356 break;
5357
5358 case AARCH64_OPND_HALF:
5359 {
5360 /* #<imm16> or relocation. */
5361 int internal_fixup_p;
5362 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5363 if (internal_fixup_p)
5364 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5365 skip_whitespace (str);
5366 if (skip_past_comma (&str))
5367 {
5368 /* {, LSL #<shift>} */
5369 if (! aarch64_gas_internal_fixup_p ())
5370 {
5371 set_fatal_syntax_error (_("can't mix relocation modifier "
5372 "with explicit shift"));
5373 goto failure;
5374 }
5375 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5376 }
5377 else
5378 inst.base.operands[i].shifter.amount = 0;
5379 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5380 inst.base.operands[i].imm.value = 0;
5381 if (! process_movw_reloc_info ())
5382 goto failure;
5383 }
5384 break;
5385
5386 case AARCH64_OPND_EXCEPTION:
5387 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5388 imm_reg_type));
5389 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5390 /* addr_off_p */ 0,
5391 /* need_libopcodes_p */ 0,
5392 /* skip_p */ 1);
5393 break;
5394
5395 case AARCH64_OPND_NZCV:
5396 {
5397 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5398 if (nzcv != NULL)
5399 {
5400 str += 4;
5401 info->imm.value = nzcv->value;
5402 break;
5403 }
5404 po_imm_or_fail (0, 15);
5405 info->imm.value = val;
5406 }
5407 break;
5408
5409 case AARCH64_OPND_COND:
5410 case AARCH64_OPND_COND1:
5411 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5412 str += 2;
5413 if (info->cond == NULL)
5414 {
5415 set_syntax_error (_("invalid condition"));
5416 goto failure;
5417 }
5418 else if (operands[i] == AARCH64_OPND_COND1
5419 && (info->cond->value & 0xe) == 0xe)
5420 {
5421 /* Not allow AL or NV. */
5422 set_default_error ();
5423 goto failure;
5424 }
5425 break;
5426
5427 case AARCH64_OPND_ADDR_ADRP:
5428 po_misc_or_fail (parse_adrp (&str));
5429 /* Clear the value as operand needs to be relocated. */
5430 info->imm.value = 0;
5431 break;
5432
5433 case AARCH64_OPND_ADDR_PCREL14:
5434 case AARCH64_OPND_ADDR_PCREL19:
5435 case AARCH64_OPND_ADDR_PCREL21:
5436 case AARCH64_OPND_ADDR_PCREL26:
5437 po_misc_or_fail (parse_address_reloc (&str, info));
5438 if (!info->addr.pcrel)
5439 {
5440 set_syntax_error (_("invalid pc-relative address"));
5441 goto failure;
5442 }
5443 if (inst.gen_lit_pool
5444 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5445 {
5446 /* Only permit "=value" in the literal load instructions.
5447 The literal will be generated by programmer_friendly_fixup. */
5448 set_syntax_error (_("invalid use of \"=immediate\""));
5449 goto failure;
5450 }
5451 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5452 {
5453 set_syntax_error (_("unrecognized relocation suffix"));
5454 goto failure;
5455 }
5456 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5457 {
5458 info->imm.value = inst.reloc.exp.X_add_number;
5459 inst.reloc.type = BFD_RELOC_UNUSED;
5460 }
5461 else
5462 {
5463 info->imm.value = 0;
5464 if (inst.reloc.type == BFD_RELOC_UNUSED)
5465 switch (opcode->iclass)
5466 {
5467 case compbranch:
5468 case condbranch:
5469 /* e.g. CBZ or B.COND */
5470 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5471 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5472 break;
5473 case testbranch:
5474 /* e.g. TBZ */
5475 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5476 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5477 break;
5478 case branch_imm:
5479 /* e.g. B or BL */
5480 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5481 inst.reloc.type =
5482 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5483 : BFD_RELOC_AARCH64_JUMP26;
5484 break;
5485 case loadlit:
5486 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5487 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5488 break;
5489 case pcreladdr:
5490 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5491 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5492 break;
5493 default:
5494 gas_assert (0);
5495 abort ();
5496 }
5497 inst.reloc.pc_rel = 1;
5498 }
5499 break;
5500
5501 case AARCH64_OPND_ADDR_SIMPLE:
5502 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5503 /* [<Xn|SP>{, #<simm>}] */
5504 po_char_or_fail ('[');
5505 po_reg_or_fail (REG_TYPE_R64_SP);
5506 /* Accept optional ", #0". */
5507 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5508 && skip_past_char (&str, ','))
5509 {
5510 skip_past_char (&str, '#');
5511 if (! skip_past_char (&str, '0'))
5512 {
5513 set_fatal_syntax_error
5514 (_("the optional immediate offset can only be 0"));
5515 goto failure;
5516 }
5517 }
5518 po_char_or_fail (']');
5519 info->addr.base_regno = val;
5520 break;
5521
5522 case AARCH64_OPND_ADDR_REGOFF:
5523 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5524 po_misc_or_fail (parse_address (&str, info, 0));
5525 if (info->addr.pcrel || !info->addr.offset.is_reg
5526 || !info->addr.preind || info->addr.postind
5527 || info->addr.writeback)
5528 {
5529 set_syntax_error (_("invalid addressing mode"));
5530 goto failure;
5531 }
5532 if (!info->shifter.operator_present)
5533 {
5534 /* Default to LSL if not present. Libopcodes prefers shifter
5535 kind to be explicit. */
5536 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5537 info->shifter.kind = AARCH64_MOD_LSL;
5538 }
5539 /* Qualifier to be deduced by libopcodes. */
5540 break;
5541
5542 case AARCH64_OPND_ADDR_SIMM7:
5543 po_misc_or_fail (parse_address (&str, info, 0));
5544 if (info->addr.pcrel || info->addr.offset.is_reg
5545 || (!info->addr.preind && !info->addr.postind))
5546 {
5547 set_syntax_error (_("invalid addressing mode"));
5548 goto failure;
5549 }
5550 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5551 /* addr_off_p */ 1,
5552 /* need_libopcodes_p */ 1,
5553 /* skip_p */ 0);
5554 break;
5555
5556 case AARCH64_OPND_ADDR_SIMM9:
5557 case AARCH64_OPND_ADDR_SIMM9_2:
5558 po_misc_or_fail (parse_address_reloc (&str, info));
5559 if (info->addr.pcrel || info->addr.offset.is_reg
5560 || (!info->addr.preind && !info->addr.postind)
5561 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5562 && info->addr.writeback))
5563 {
5564 set_syntax_error (_("invalid addressing mode"));
5565 goto failure;
5566 }
5567 if (inst.reloc.type != BFD_RELOC_UNUSED)
5568 {
5569 set_syntax_error (_("relocation not allowed"));
5570 goto failure;
5571 }
5572 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5573 /* addr_off_p */ 1,
5574 /* need_libopcodes_p */ 1,
5575 /* skip_p */ 0);
5576 break;
5577
5578 case AARCH64_OPND_ADDR_UIMM12:
5579 po_misc_or_fail (parse_address_reloc (&str, info));
5580 if (info->addr.pcrel || info->addr.offset.is_reg
5581 || !info->addr.preind || info->addr.writeback)
5582 {
5583 set_syntax_error (_("invalid addressing mode"));
5584 goto failure;
5585 }
5586 if (inst.reloc.type == BFD_RELOC_UNUSED)
5587 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5588 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5589 || (inst.reloc.type
5590 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
5591 || (inst.reloc.type
5592 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
5593 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5594 /* Leave qualifier to be determined by libopcodes. */
5595 break;
5596
5597 case AARCH64_OPND_SIMD_ADDR_POST:
5598 /* [<Xn|SP>], <Xm|#<amount>> */
5599 po_misc_or_fail (parse_address (&str, info, 1));
5600 if (!info->addr.postind || !info->addr.writeback)
5601 {
5602 set_syntax_error (_("invalid addressing mode"));
5603 goto failure;
5604 }
5605 if (!info->addr.offset.is_reg)
5606 {
5607 if (inst.reloc.exp.X_op == O_constant)
5608 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5609 else
5610 {
5611 set_fatal_syntax_error
5612 (_("writeback value should be an immediate constant"));
5613 goto failure;
5614 }
5615 }
5616 /* No qualifier. */
5617 break;
5618
5619 case AARCH64_OPND_SYSREG:
5620 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5621 == PARSE_FAIL)
5622 {
5623 set_syntax_error (_("unknown or missing system register name"));
5624 goto failure;
5625 }
5626 inst.base.operands[i].sysreg = val;
5627 break;
5628
5629 case AARCH64_OPND_PSTATEFIELD:
5630 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5631 == PARSE_FAIL)
5632 {
5633 set_syntax_error (_("unknown or missing PSTATE field name"));
5634 goto failure;
5635 }
5636 inst.base.operands[i].pstatefield = val;
5637 break;
5638
5639 case AARCH64_OPND_SYSREG_IC:
5640 inst.base.operands[i].sysins_op =
5641 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5642 goto sys_reg_ins;
5643 case AARCH64_OPND_SYSREG_DC:
5644 inst.base.operands[i].sysins_op =
5645 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5646 goto sys_reg_ins;
5647 case AARCH64_OPND_SYSREG_AT:
5648 inst.base.operands[i].sysins_op =
5649 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5650 goto sys_reg_ins;
5651 case AARCH64_OPND_SYSREG_TLBI:
5652 inst.base.operands[i].sysins_op =
5653 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5654 sys_reg_ins:
5655 if (inst.base.operands[i].sysins_op == NULL)
5656 {
5657 set_fatal_syntax_error ( _("unknown or missing operation name"));
5658 goto failure;
5659 }
5660 break;
5661
5662 case AARCH64_OPND_BARRIER:
5663 case AARCH64_OPND_BARRIER_ISB:
5664 val = parse_barrier (&str);
5665 if (val != PARSE_FAIL
5666 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5667 {
5668 /* ISB only accepts options name 'sy'. */
5669 set_syntax_error
5670 (_("the specified option is not accepted in ISB"));
5671 /* Turn off backtrack as this optional operand is present. */
5672 backtrack_pos = 0;
5673 goto failure;
5674 }
5675 /* This is an extension to accept a 0..15 immediate. */
5676 if (val == PARSE_FAIL)
5677 po_imm_or_fail (0, 15);
5678 info->barrier = aarch64_barrier_options + val;
5679 break;
5680
5681 case AARCH64_OPND_PRFOP:
5682 val = parse_pldop (&str);
5683 /* This is an extension to accept a 0..31 immediate. */
5684 if (val == PARSE_FAIL)
5685 po_imm_or_fail (0, 31);
5686 inst.base.operands[i].prfop = aarch64_prfops + val;
5687 break;
5688
5689 case AARCH64_OPND_BARRIER_PSB:
5690 val = parse_barrier_psb (&str, &(info->hint_option));
5691 if (val == PARSE_FAIL)
5692 goto failure;
5693 break;
5694
5695 default:
5696 as_fatal (_("unhandled operand code %d"), operands[i]);
5697 }
5698
5699 /* If we get here, this operand was successfully parsed. */
5700 inst.base.operands[i].present = 1;
5701 continue;
5702
5703 failure:
5704 /* The parse routine should already have set the error, but in case
5705 not, set a default one here. */
5706 if (! error_p ())
5707 set_default_error ();
5708
5709 if (! backtrack_pos)
5710 goto parse_operands_return;
5711
5712 {
5713 /* We reach here because this operand is marked as optional, and
5714 either no operand was supplied or the operand was supplied but it
5715 was syntactically incorrect. In the latter case we report an
5716 error. In the former case we perform a few more checks before
5717 dropping through to the code to insert the default operand. */
5718
5719 char *tmp = backtrack_pos;
5720 char endchar = END_OF_INSN;
5721
5722 if (i != (aarch64_num_of_operands (opcode) - 1))
5723 endchar = ',';
5724 skip_past_char (&tmp, ',');
5725
5726 if (*tmp != endchar)
5727 /* The user has supplied an operand in the wrong format. */
5728 goto parse_operands_return;
5729
5730 /* Make sure there is not a comma before the optional operand.
5731 For example the fifth operand of 'sys' is optional:
5732
5733 sys #0,c0,c0,#0, <--- wrong
5734 sys #0,c0,c0,#0 <--- correct. */
5735 if (comma_skipped_p && i && endchar == END_OF_INSN)
5736 {
5737 set_fatal_syntax_error
5738 (_("unexpected comma before the omitted optional operand"));
5739 goto parse_operands_return;
5740 }
5741 }
5742
5743 /* Reaching here means we are dealing with an optional operand that is
5744 omitted from the assembly line. */
5745 gas_assert (optional_operand_p (opcode, i));
5746 info->present = 0;
5747 process_omitted_operand (operands[i], opcode, i, info);
5748
5749 /* Try again, skipping the optional operand at backtrack_pos. */
5750 str = backtrack_pos;
5751 backtrack_pos = 0;
5752
5753 /* Clear any error record after the omitted optional operand has been
5754 successfully handled. */
5755 clear_error ();
5756 }
5757
5758 /* Check if we have parsed all the operands. */
5759 if (*str != '\0' && ! error_p ())
5760 {
5761 /* Set I to the index of the last present operand; this is
5762 for the purpose of diagnostics. */
5763 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5764 ;
5765 set_fatal_syntax_error
5766 (_("unexpected characters following instruction"));
5767 }
5768
5769 parse_operands_return:
5770
5771 if (error_p ())
5772 {
5773 DEBUG_TRACE ("parsing FAIL: %s - %s",
5774 operand_mismatch_kind_names[get_error_kind ()],
5775 get_error_message ());
5776 /* Record the operand error properly; this is useful when there
5777 are multiple instruction templates for a mnemonic name, so that
5778 later on, we can select the error that most closely describes
5779 the problem. */
5780 record_operand_error (opcode, i, get_error_kind (),
5781 get_error_message ());
5782 return FALSE;
5783 }
5784 else
5785 {
5786 DEBUG_TRACE ("parsing SUCCESS");
5787 return TRUE;
5788 }
5789 }
5790
5791 /* It does some fix-up to provide some programmer friendly feature while
5792 keeping the libopcodes happy, i.e. libopcodes only accepts
5793 the preferred architectural syntax.
5794 Return FALSE if there is any failure; otherwise return TRUE. */
5795
5796 static bfd_boolean
5797 programmer_friendly_fixup (aarch64_instruction *instr)
5798 {
5799 aarch64_inst *base = &instr->base;
5800 const aarch64_opcode *opcode = base->opcode;
5801 enum aarch64_op op = opcode->op;
5802 aarch64_opnd_info *operands = base->operands;
5803
5804 DEBUG_TRACE ("enter");
5805
5806 switch (opcode->iclass)
5807 {
5808 case testbranch:
5809 /* TBNZ Xn|Wn, #uimm6, label
5810 Test and Branch Not Zero: conditionally jumps to label if bit number
5811 uimm6 in register Xn is not zero. The bit number implies the width of
5812 the register, which may be written and should be disassembled as Wn if
5813 uimm is less than 32. */
5814 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5815 {
5816 if (operands[1].imm.value >= 32)
5817 {
5818 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5819 0, 31);
5820 return FALSE;
5821 }
5822 operands[0].qualifier = AARCH64_OPND_QLF_X;
5823 }
5824 break;
5825 case loadlit:
5826 /* LDR Wt, label | =value
5827 As a convenience assemblers will typically permit the notation
5828 "=value" in conjunction with the pc-relative literal load instructions
5829 to automatically place an immediate value or symbolic address in a
5830 nearby literal pool and generate a hidden label which references it.
5831 ISREG has been set to 0 in the case of =value. */
5832 if (instr->gen_lit_pool
5833 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5834 {
5835 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5836 if (op == OP_LDRSW_LIT)
5837 size = 4;
5838 if (instr->reloc.exp.X_op != O_constant
5839 && instr->reloc.exp.X_op != O_big
5840 && instr->reloc.exp.X_op != O_symbol)
5841 {
5842 record_operand_error (opcode, 1,
5843 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5844 _("constant expression expected"));
5845 return FALSE;
5846 }
5847 if (! add_to_lit_pool (&instr->reloc.exp, size))
5848 {
5849 record_operand_error (opcode, 1,
5850 AARCH64_OPDE_OTHER_ERROR,
5851 _("literal pool insertion failed"));
5852 return FALSE;
5853 }
5854 }
5855 break;
5856 case log_shift:
5857 case bitfield:
5858 /* UXT[BHW] Wd, Wn
5859 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5860 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5861 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5862 A programmer-friendly assembler should accept a destination Xd in
5863 place of Wd, however that is not the preferred form for disassembly.
5864 */
5865 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5866 && operands[1].qualifier == AARCH64_OPND_QLF_W
5867 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5868 operands[0].qualifier = AARCH64_OPND_QLF_W;
5869 break;
5870
5871 case addsub_ext:
5872 {
5873 /* In the 64-bit form, the final register operand is written as Wm
5874 for all but the (possibly omitted) UXTX/LSL and SXTX
5875 operators.
5876 As a programmer-friendly assembler, we accept e.g.
5877 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5878 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5879 int idx = aarch64_operand_index (opcode->operands,
5880 AARCH64_OPND_Rm_EXT);
5881 gas_assert (idx == 1 || idx == 2);
5882 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5883 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5884 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5885 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5886 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5887 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5888 }
5889 break;
5890
5891 default:
5892 break;
5893 }
5894
5895 DEBUG_TRACE ("exit with SUCCESS");
5896 return TRUE;
5897 }
5898
5899 /* Check for loads and stores that will cause unpredictable behavior. */
5900
5901 static void
5902 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5903 {
5904 aarch64_inst *base = &instr->base;
5905 const aarch64_opcode *opcode = base->opcode;
5906 const aarch64_opnd_info *opnds = base->operands;
5907 switch (opcode->iclass)
5908 {
5909 case ldst_pos:
5910 case ldst_imm9:
5911 case ldst_unscaled:
5912 case ldst_unpriv:
5913 /* Loading/storing the base register is unpredictable if writeback. */
5914 if ((aarch64_get_operand_class (opnds[0].type)
5915 == AARCH64_OPND_CLASS_INT_REG)
5916 && opnds[0].reg.regno == opnds[1].addr.base_regno
5917 && opnds[1].addr.base_regno != REG_SP
5918 && opnds[1].addr.writeback)
5919 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5920 break;
5921 case ldstpair_off:
5922 case ldstnapair_offs:
5923 case ldstpair_indexed:
5924 /* Loading/storing the base register is unpredictable if writeback. */
5925 if ((aarch64_get_operand_class (opnds[0].type)
5926 == AARCH64_OPND_CLASS_INT_REG)
5927 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5928 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5929 && opnds[2].addr.base_regno != REG_SP
5930 && opnds[2].addr.writeback)
5931 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5932 /* Load operations must load different registers. */
5933 if ((opcode->opcode & (1 << 22))
5934 && opnds[0].reg.regno == opnds[1].reg.regno)
5935 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5936 break;
5937 default:
5938 break;
5939 }
5940 }
5941
5942 /* A wrapper function to interface with libopcodes on encoding and
5943 record the error message if there is any.
5944
5945 Return TRUE on success; otherwise return FALSE. */
5946
5947 static bfd_boolean
5948 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5949 aarch64_insn *code)
5950 {
5951 aarch64_operand_error error_info;
5952 error_info.kind = AARCH64_OPDE_NIL;
5953 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5954 return TRUE;
5955 else
5956 {
5957 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5958 record_operand_error_info (opcode, &error_info);
5959 return FALSE;
5960 }
5961 }
5962
5963 #ifdef DEBUG_AARCH64
5964 static inline void
5965 dump_opcode_operands (const aarch64_opcode *opcode)
5966 {
5967 int i = 0;
5968 while (opcode->operands[i] != AARCH64_OPND_NIL)
5969 {
5970 aarch64_verbose ("\t\t opnd%d: %s", i,
5971 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5972 ? aarch64_get_operand_name (opcode->operands[i])
5973 : aarch64_get_operand_desc (opcode->operands[i]));
5974 ++i;
5975 }
5976 }
5977 #endif /* DEBUG_AARCH64 */
5978
5979 /* This is the guts of the machine-dependent assembler. STR points to a
5980 machine dependent instruction. This function is supposed to emit
5981 the frags/bytes it assembles to. */
5982
5983 void
5984 md_assemble (char *str)
5985 {
5986 char *p = str;
5987 templates *template;
5988 aarch64_opcode *opcode;
5989 aarch64_inst *inst_base;
5990 unsigned saved_cond;
5991
5992 /* Align the previous label if needed. */
5993 if (last_label_seen != NULL)
5994 {
5995 symbol_set_frag (last_label_seen, frag_now);
5996 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5997 S_SET_SEGMENT (last_label_seen, now_seg);
5998 }
5999
6000 inst.reloc.type = BFD_RELOC_UNUSED;
6001
6002 DEBUG_TRACE ("\n\n");
6003 DEBUG_TRACE ("==============================");
6004 DEBUG_TRACE ("Enter md_assemble with %s", str);
6005
6006 template = opcode_lookup (&p);
6007 if (!template)
6008 {
6009 /* It wasn't an instruction, but it might be a register alias of
6010 the form alias .req reg directive. */
6011 if (!create_register_alias (str, p))
6012 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6013 str);
6014 return;
6015 }
6016
6017 skip_whitespace (p);
6018 if (*p == ',')
6019 {
6020 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6021 get_mnemonic_name (str), str);
6022 return;
6023 }
6024
6025 init_operand_error_report ();
6026
6027 /* Sections are assumed to start aligned. In executable section, there is no
6028 MAP_DATA symbol pending. So we only align the address during
6029 MAP_DATA --> MAP_INSN transition.
6030 For other sections, this is not guaranteed. */
6031 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6032 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6033 frag_align_code (2, 0);
6034
6035 saved_cond = inst.cond;
6036 reset_aarch64_instruction (&inst);
6037 inst.cond = saved_cond;
6038
6039 /* Iterate through all opcode entries with the same mnemonic name. */
6040 do
6041 {
6042 opcode = template->opcode;
6043
6044 DEBUG_TRACE ("opcode %s found", opcode->name);
6045 #ifdef DEBUG_AARCH64
6046 if (debug_dump)
6047 dump_opcode_operands (opcode);
6048 #endif /* DEBUG_AARCH64 */
6049
6050 mapping_state (MAP_INSN);
6051
6052 inst_base = &inst.base;
6053 inst_base->opcode = opcode;
6054
6055 /* Truly conditionally executed instructions, e.g. b.cond. */
6056 if (opcode->flags & F_COND)
6057 {
6058 gas_assert (inst.cond != COND_ALWAYS);
6059 inst_base->cond = get_cond_from_value (inst.cond);
6060 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6061 }
6062 else if (inst.cond != COND_ALWAYS)
6063 {
6064 /* It shouldn't arrive here, where the assembly looks like a
6065 conditional instruction but the found opcode is unconditional. */
6066 gas_assert (0);
6067 continue;
6068 }
6069
6070 if (parse_operands (p, opcode)
6071 && programmer_friendly_fixup (&inst)
6072 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6073 {
6074 /* Check that this instruction is supported for this CPU. */
6075 if (!opcode->avariant
6076 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6077 {
6078 as_bad (_("selected processor does not support `%s'"), str);
6079 return;
6080 }
6081
6082 warn_unpredictable_ldst (&inst, str);
6083
6084 if (inst.reloc.type == BFD_RELOC_UNUSED
6085 || !inst.reloc.need_libopcodes_p)
6086 output_inst (NULL);
6087 else
6088 {
6089 /* If there is relocation generated for the instruction,
6090 store the instruction information for the future fix-up. */
6091 struct aarch64_inst *copy;
6092 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6093 copy = XNEW (struct aarch64_inst);
6094 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6095 output_inst (copy);
6096 }
6097 return;
6098 }
6099
6100 template = template->next;
6101 if (template != NULL)
6102 {
6103 reset_aarch64_instruction (&inst);
6104 inst.cond = saved_cond;
6105 }
6106 }
6107 while (template != NULL);
6108
6109 /* Issue the error messages if any. */
6110 output_operand_error_report (str);
6111 }
6112
6113 /* Various frobbings of labels and their addresses. */
6114
6115 void
6116 aarch64_start_line_hook (void)
6117 {
6118 last_label_seen = NULL;
6119 }
6120
6121 void
6122 aarch64_frob_label (symbolS * sym)
6123 {
6124 last_label_seen = sym;
6125
6126 dwarf2_emit_label (sym);
6127 }
6128
6129 int
6130 aarch64_data_in_code (void)
6131 {
6132 if (!strncmp (input_line_pointer + 1, "data:", 5))
6133 {
6134 *input_line_pointer = '/';
6135 input_line_pointer += 5;
6136 *input_line_pointer = 0;
6137 return 1;
6138 }
6139
6140 return 0;
6141 }
6142
6143 char *
6144 aarch64_canonicalize_symbol_name (char *name)
6145 {
6146 int len;
6147
6148 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6149 *(name + len - 5) = 0;
6150
6151 return name;
6152 }
6153 \f
6154 /* Table of all register names defined by default. The user can
6155 define additional names with .req. Note that all register names
6156 should appear in both upper and lowercase variants. Some registers
6157 also have mixed-case names. */
6158
6159 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6160 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6161 #define REGSET31(p,t) \
6162 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6163 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6164 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6165 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
6166 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6167 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6168 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6169 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6170 #define REGSET(p,t) \
6171 REGSET31(p,t), REGNUM(p,31,t)
6172
6173 /* These go into aarch64_reg_hsh hash-table. */
6174 static const reg_entry reg_names[] = {
6175 /* Integer registers. */
6176 REGSET31 (x, R_64), REGSET31 (X, R_64),
6177 REGSET31 (w, R_32), REGSET31 (W, R_32),
6178
6179 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6180 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6181
6182 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6183 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6184
6185 /* Coprocessor register numbers. */
6186 REGSET (c, CN), REGSET (C, CN),
6187
6188 /* Floating-point single precision registers. */
6189 REGSET (s, FP_S), REGSET (S, FP_S),
6190
6191 /* Floating-point double precision registers. */
6192 REGSET (d, FP_D), REGSET (D, FP_D),
6193
6194 /* Floating-point half precision registers. */
6195 REGSET (h, FP_H), REGSET (H, FP_H),
6196
6197 /* Floating-point byte precision registers. */
6198 REGSET (b, FP_B), REGSET (B, FP_B),
6199
6200 /* Floating-point quad precision registers. */
6201 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6202
6203 /* FP/SIMD registers. */
6204 REGSET (v, VN), REGSET (V, VN),
6205 };
6206
6207 #undef REGDEF
6208 #undef REGNUM
6209 #undef REGSET
6210
6211 #define N 1
6212 #define n 0
6213 #define Z 1
6214 #define z 0
6215 #define C 1
6216 #define c 0
6217 #define V 1
6218 #define v 0
6219 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6220 static const asm_nzcv nzcv_names[] = {
6221 {"nzcv", B (n, z, c, v)},
6222 {"nzcV", B (n, z, c, V)},
6223 {"nzCv", B (n, z, C, v)},
6224 {"nzCV", B (n, z, C, V)},
6225 {"nZcv", B (n, Z, c, v)},
6226 {"nZcV", B (n, Z, c, V)},
6227 {"nZCv", B (n, Z, C, v)},
6228 {"nZCV", B (n, Z, C, V)},
6229 {"Nzcv", B (N, z, c, v)},
6230 {"NzcV", B (N, z, c, V)},
6231 {"NzCv", B (N, z, C, v)},
6232 {"NzCV", B (N, z, C, V)},
6233 {"NZcv", B (N, Z, c, v)},
6234 {"NZcV", B (N, Z, c, V)},
6235 {"NZCv", B (N, Z, C, v)},
6236 {"NZCV", B (N, Z, C, V)}
6237 };
6238
6239 #undef N
6240 #undef n
6241 #undef Z
6242 #undef z
6243 #undef C
6244 #undef c
6245 #undef V
6246 #undef v
6247 #undef B
6248 \f
6249 /* MD interface: bits in the object file. */
6250
6251 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6252 for use in the a.out file, and stores them in the array pointed to by buf.
6253 This knows about the endian-ness of the target machine and does
6254 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6255 2 (short) and 4 (long) Floating numbers are put out as a series of
6256 LITTLENUMS (shorts, here at least). */
6257
6258 void
6259 md_number_to_chars (char *buf, valueT val, int n)
6260 {
6261 if (target_big_endian)
6262 number_to_chars_bigendian (buf, val, n);
6263 else
6264 number_to_chars_littleendian (buf, val, n);
6265 }
6266
6267 /* MD interface: Sections. */
6268
6269 /* Estimate the size of a frag before relaxing. Assume everything fits in
6270 4 bytes. */
6271
6272 int
6273 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6274 {
6275 fragp->fr_var = 4;
6276 return 4;
6277 }
6278
6279 /* Round up a section size to the appropriate boundary. */
6280
6281 valueT
6282 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6283 {
6284 return size;
6285 }
6286
6287 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6288 of an rs_align_code fragment.
6289
6290 Here we fill the frag with the appropriate info for padding the
6291 output stream. The resulting frag will consist of a fixed (fr_fix)
6292 and of a repeating (fr_var) part.
6293
6294 The fixed content is always emitted before the repeating content and
6295 these two parts are used as follows in constructing the output:
6296 - the fixed part will be used to align to a valid instruction word
6297 boundary, in case that we start at a misaligned address; as no
6298 executable instruction can live at the misaligned location, we
6299 simply fill with zeros;
6300 - the variable part will be used to cover the remaining padding and
6301 we fill using the AArch64 NOP instruction.
6302
6303 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6304 enough storage space for up to 3 bytes for padding the back to a valid
6305 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6306
6307 void
6308 aarch64_handle_align (fragS * fragP)
6309 {
6310 /* NOP = d503201f */
6311 /* AArch64 instructions are always little-endian. */
6312 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6313
6314 int bytes, fix, noop_size;
6315 char *p;
6316
6317 if (fragP->fr_type != rs_align_code)
6318 return;
6319
6320 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6321 p = fragP->fr_literal + fragP->fr_fix;
6322
6323 #ifdef OBJ_ELF
6324 gas_assert (fragP->tc_frag_data.recorded);
6325 #endif
6326
6327 noop_size = sizeof (aarch64_noop);
6328
6329 fix = bytes & (noop_size - 1);
6330 if (fix)
6331 {
6332 #ifdef OBJ_ELF
6333 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6334 #endif
6335 memset (p, 0, fix);
6336 p += fix;
6337 fragP->fr_fix += fix;
6338 }
6339
6340 if (noop_size)
6341 memcpy (p, aarch64_noop, noop_size);
6342 fragP->fr_var = noop_size;
6343 }
6344
6345 /* Perform target specific initialisation of a frag.
6346 Note - despite the name this initialisation is not done when the frag
6347 is created, but only when its type is assigned. A frag can be created
6348 and used a long time before its type is set, so beware of assuming that
6349 this initialisationis performed first. */
6350
6351 #ifndef OBJ_ELF
6352 void
6353 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6354 int max_chars ATTRIBUTE_UNUSED)
6355 {
6356 }
6357
6358 #else /* OBJ_ELF is defined. */
6359 void
6360 aarch64_init_frag (fragS * fragP, int max_chars)
6361 {
6362 /* Record a mapping symbol for alignment frags. We will delete this
6363 later if the alignment ends up empty. */
6364 if (!fragP->tc_frag_data.recorded)
6365 fragP->tc_frag_data.recorded = 1;
6366
6367 switch (fragP->fr_type)
6368 {
6369 case rs_align_test:
6370 case rs_fill:
6371 mapping_state_2 (MAP_DATA, max_chars);
6372 break;
6373 case rs_align:
6374 /* PR 20364: We can get alignment frags in code sections,
6375 so do not just assume that we should use the MAP_DATA state. */
6376 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
6377 break;
6378 case rs_align_code:
6379 mapping_state_2 (MAP_INSN, max_chars);
6380 break;
6381 default:
6382 break;
6383 }
6384 }
6385 \f
6386 /* Initialize the DWARF-2 unwind information for this procedure. */
6387
6388 void
6389 tc_aarch64_frame_initial_instructions (void)
6390 {
6391 cfi_add_CFA_def_cfa (REG_SP, 0);
6392 }
6393 #endif /* OBJ_ELF */
6394
6395 /* Convert REGNAME to a DWARF-2 register number. */
6396
6397 int
6398 tc_aarch64_regname_to_dw2regnum (char *regname)
6399 {
6400 const reg_entry *reg = parse_reg (&regname);
6401 if (reg == NULL)
6402 return -1;
6403
6404 switch (reg->type)
6405 {
6406 case REG_TYPE_SP_32:
6407 case REG_TYPE_SP_64:
6408 case REG_TYPE_R_32:
6409 case REG_TYPE_R_64:
6410 return reg->number;
6411
6412 case REG_TYPE_FP_B:
6413 case REG_TYPE_FP_H:
6414 case REG_TYPE_FP_S:
6415 case REG_TYPE_FP_D:
6416 case REG_TYPE_FP_Q:
6417 return reg->number + 64;
6418
6419 default:
6420 break;
6421 }
6422 return -1;
6423 }
6424
6425 /* Implement DWARF2_ADDR_SIZE. */
6426
6427 int
6428 aarch64_dwarf2_addr_size (void)
6429 {
6430 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6431 if (ilp32_p)
6432 return 4;
6433 #endif
6434 return bfd_arch_bits_per_address (stdoutput) / 8;
6435 }
6436
6437 /* MD interface: Symbol and relocation handling. */
6438
6439 /* Return the address within the segment that a PC-relative fixup is
6440 relative to. For AArch64 PC-relative fixups applied to instructions
6441 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6442
6443 long
6444 md_pcrel_from_section (fixS * fixP, segT seg)
6445 {
6446 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6447
6448 /* If this is pc-relative and we are going to emit a relocation
6449 then we just want to put out any pipeline compensation that the linker
6450 will need. Otherwise we want to use the calculated base. */
6451 if (fixP->fx_pcrel
6452 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6453 || aarch64_force_relocation (fixP)))
6454 base = 0;
6455
6456 /* AArch64 should be consistent for all pc-relative relocations. */
6457 return base + AARCH64_PCREL_OFFSET;
6458 }
6459
6460 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6461 Otherwise we have no need to default values of symbols. */
6462
6463 symbolS *
6464 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6465 {
6466 #ifdef OBJ_ELF
6467 if (name[0] == '_' && name[1] == 'G'
6468 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6469 {
6470 if (!GOT_symbol)
6471 {
6472 if (symbol_find (name))
6473 as_bad (_("GOT already in the symbol table"));
6474
6475 GOT_symbol = symbol_new (name, undefined_section,
6476 (valueT) 0, &zero_address_frag);
6477 }
6478
6479 return GOT_symbol;
6480 }
6481 #endif
6482
6483 return 0;
6484 }
6485
6486 /* Return non-zero if the indicated VALUE has overflowed the maximum
6487 range expressible by a unsigned number with the indicated number of
6488 BITS. */
6489
6490 static bfd_boolean
6491 unsigned_overflow (valueT value, unsigned bits)
6492 {
6493 valueT lim;
6494 if (bits >= sizeof (valueT) * 8)
6495 return FALSE;
6496 lim = (valueT) 1 << bits;
6497 return (value >= lim);
6498 }
6499
6500
6501 /* Return non-zero if the indicated VALUE has overflowed the maximum
6502 range expressible by an signed number with the indicated number of
6503 BITS. */
6504
6505 static bfd_boolean
6506 signed_overflow (offsetT value, unsigned bits)
6507 {
6508 offsetT lim;
6509 if (bits >= sizeof (offsetT) * 8)
6510 return FALSE;
6511 lim = (offsetT) 1 << (bits - 1);
6512 return (value < -lim || value >= lim);
6513 }
6514
6515 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6516 unsigned immediate offset load/store instruction, try to encode it as
6517 an unscaled, 9-bit, signed immediate offset load/store instruction.
6518 Return TRUE if it is successful; otherwise return FALSE.
6519
6520 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6521 in response to the standard LDR/STR mnemonics when the immediate offset is
6522 unambiguous, i.e. when it is negative or unaligned. */
6523
6524 static bfd_boolean
6525 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6526 {
6527 int idx;
6528 enum aarch64_op new_op;
6529 const aarch64_opcode *new_opcode;
6530
6531 gas_assert (instr->opcode->iclass == ldst_pos);
6532
6533 switch (instr->opcode->op)
6534 {
6535 case OP_LDRB_POS:new_op = OP_LDURB; break;
6536 case OP_STRB_POS: new_op = OP_STURB; break;
6537 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6538 case OP_LDRH_POS: new_op = OP_LDURH; break;
6539 case OP_STRH_POS: new_op = OP_STURH; break;
6540 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6541 case OP_LDR_POS: new_op = OP_LDUR; break;
6542 case OP_STR_POS: new_op = OP_STUR; break;
6543 case OP_LDRF_POS: new_op = OP_LDURV; break;
6544 case OP_STRF_POS: new_op = OP_STURV; break;
6545 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6546 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6547 default: new_op = OP_NIL; break;
6548 }
6549
6550 if (new_op == OP_NIL)
6551 return FALSE;
6552
6553 new_opcode = aarch64_get_opcode (new_op);
6554 gas_assert (new_opcode != NULL);
6555
6556 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6557 instr->opcode->op, new_opcode->op);
6558
6559 aarch64_replace_opcode (instr, new_opcode);
6560
6561 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6562 qualifier matching may fail because the out-of-date qualifier will
6563 prevent the operand being updated with a new and correct qualifier. */
6564 idx = aarch64_operand_index (instr->opcode->operands,
6565 AARCH64_OPND_ADDR_SIMM9);
6566 gas_assert (idx == 1);
6567 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6568
6569 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6570
6571 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6572 return FALSE;
6573
6574 return TRUE;
6575 }
6576
6577 /* Called by fix_insn to fix a MOV immediate alias instruction.
6578
6579 Operand for a generic move immediate instruction, which is an alias
6580 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6581 a 32-bit/64-bit immediate value into general register. An assembler error
6582 shall result if the immediate cannot be created by a single one of these
6583 instructions. If there is a choice, then to ensure reversability an
6584 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6585
6586 static void
6587 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6588 {
6589 const aarch64_opcode *opcode;
6590
6591 /* Need to check if the destination is SP/ZR. The check has to be done
6592 before any aarch64_replace_opcode. */
6593 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6594 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6595
6596 instr->operands[1].imm.value = value;
6597 instr->operands[1].skip = 0;
6598
6599 if (try_mov_wide_p)
6600 {
6601 /* Try the MOVZ alias. */
6602 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6603 aarch64_replace_opcode (instr, opcode);
6604 if (aarch64_opcode_encode (instr->opcode, instr,
6605 &instr->value, NULL, NULL))
6606 {
6607 put_aarch64_insn (buf, instr->value);
6608 return;
6609 }
6610 /* Try the MOVK alias. */
6611 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6612 aarch64_replace_opcode (instr, opcode);
6613 if (aarch64_opcode_encode (instr->opcode, instr,
6614 &instr->value, NULL, NULL))
6615 {
6616 put_aarch64_insn (buf, instr->value);
6617 return;
6618 }
6619 }
6620
6621 if (try_mov_bitmask_p)
6622 {
6623 /* Try the ORR alias. */
6624 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6625 aarch64_replace_opcode (instr, opcode);
6626 if (aarch64_opcode_encode (instr->opcode, instr,
6627 &instr->value, NULL, NULL))
6628 {
6629 put_aarch64_insn (buf, instr->value);
6630 return;
6631 }
6632 }
6633
6634 as_bad_where (fixP->fx_file, fixP->fx_line,
6635 _("immediate cannot be moved by a single instruction"));
6636 }
6637
6638 /* An instruction operand which is immediate related may have symbol used
6639 in the assembly, e.g.
6640
6641 mov w0, u32
6642 .set u32, 0x00ffff00
6643
6644 At the time when the assembly instruction is parsed, a referenced symbol,
6645 like 'u32' in the above example may not have been seen; a fixS is created
6646 in such a case and is handled here after symbols have been resolved.
6647 Instruction is fixed up with VALUE using the information in *FIXP plus
6648 extra information in FLAGS.
6649
6650 This function is called by md_apply_fix to fix up instructions that need
6651 a fix-up described above but does not involve any linker-time relocation. */
6652
6653 static void
6654 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6655 {
6656 int idx;
6657 uint32_t insn;
6658 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6659 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6660 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6661
6662 if (new_inst)
6663 {
6664 /* Now the instruction is about to be fixed-up, so the operand that
6665 was previously marked as 'ignored' needs to be unmarked in order
6666 to get the encoding done properly. */
6667 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6668 new_inst->operands[idx].skip = 0;
6669 }
6670
6671 gas_assert (opnd != AARCH64_OPND_NIL);
6672
6673 switch (opnd)
6674 {
6675 case AARCH64_OPND_EXCEPTION:
6676 if (unsigned_overflow (value, 16))
6677 as_bad_where (fixP->fx_file, fixP->fx_line,
6678 _("immediate out of range"));
6679 insn = get_aarch64_insn (buf);
6680 insn |= encode_svc_imm (value);
6681 put_aarch64_insn (buf, insn);
6682 break;
6683
6684 case AARCH64_OPND_AIMM:
6685 /* ADD or SUB with immediate.
6686 NOTE this assumes we come here with a add/sub shifted reg encoding
6687 3 322|2222|2 2 2 21111 111111
6688 1 098|7654|3 2 1 09876 543210 98765 43210
6689 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6690 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6691 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6692 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6693 ->
6694 3 322|2222|2 2 221111111111
6695 1 098|7654|3 2 109876543210 98765 43210
6696 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6697 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6698 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6699 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6700 Fields sf Rn Rd are already set. */
6701 insn = get_aarch64_insn (buf);
6702 if (value < 0)
6703 {
6704 /* Add <-> sub. */
6705 insn = reencode_addsub_switch_add_sub (insn);
6706 value = -value;
6707 }
6708
6709 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6710 && unsigned_overflow (value, 12))
6711 {
6712 /* Try to shift the value by 12 to make it fit. */
6713 if (((value >> 12) << 12) == value
6714 && ! unsigned_overflow (value, 12 + 12))
6715 {
6716 value >>= 12;
6717 insn |= encode_addsub_imm_shift_amount (1);
6718 }
6719 }
6720
6721 if (unsigned_overflow (value, 12))
6722 as_bad_where (fixP->fx_file, fixP->fx_line,
6723 _("immediate out of range"));
6724
6725 insn |= encode_addsub_imm (value);
6726
6727 put_aarch64_insn (buf, insn);
6728 break;
6729
6730 case AARCH64_OPND_SIMD_IMM:
6731 case AARCH64_OPND_SIMD_IMM_SFT:
6732 case AARCH64_OPND_LIMM:
6733 /* Bit mask immediate. */
6734 gas_assert (new_inst != NULL);
6735 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6736 new_inst->operands[idx].imm.value = value;
6737 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6738 &new_inst->value, NULL, NULL))
6739 put_aarch64_insn (buf, new_inst->value);
6740 else
6741 as_bad_where (fixP->fx_file, fixP->fx_line,
6742 _("invalid immediate"));
6743 break;
6744
6745 case AARCH64_OPND_HALF:
6746 /* 16-bit unsigned immediate. */
6747 if (unsigned_overflow (value, 16))
6748 as_bad_where (fixP->fx_file, fixP->fx_line,
6749 _("immediate out of range"));
6750 insn = get_aarch64_insn (buf);
6751 insn |= encode_movw_imm (value & 0xffff);
6752 put_aarch64_insn (buf, insn);
6753 break;
6754
6755 case AARCH64_OPND_IMM_MOV:
6756 /* Operand for a generic move immediate instruction, which is
6757 an alias instruction that generates a single MOVZ, MOVN or ORR
6758 instruction to loads a 32-bit/64-bit immediate value into general
6759 register. An assembler error shall result if the immediate cannot be
6760 created by a single one of these instructions. If there is a choice,
6761 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6762 and MOVZ or MOVN to ORR. */
6763 gas_assert (new_inst != NULL);
6764 fix_mov_imm_insn (fixP, buf, new_inst, value);
6765 break;
6766
6767 case AARCH64_OPND_ADDR_SIMM7:
6768 case AARCH64_OPND_ADDR_SIMM9:
6769 case AARCH64_OPND_ADDR_SIMM9_2:
6770 case AARCH64_OPND_ADDR_UIMM12:
6771 /* Immediate offset in an address. */
6772 insn = get_aarch64_insn (buf);
6773
6774 gas_assert (new_inst != NULL && new_inst->value == insn);
6775 gas_assert (new_inst->opcode->operands[1] == opnd
6776 || new_inst->opcode->operands[2] == opnd);
6777
6778 /* Get the index of the address operand. */
6779 if (new_inst->opcode->operands[1] == opnd)
6780 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6781 idx = 1;
6782 else
6783 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6784 idx = 2;
6785
6786 /* Update the resolved offset value. */
6787 new_inst->operands[idx].addr.offset.imm = value;
6788
6789 /* Encode/fix-up. */
6790 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6791 &new_inst->value, NULL, NULL))
6792 {
6793 put_aarch64_insn (buf, new_inst->value);
6794 break;
6795 }
6796 else if (new_inst->opcode->iclass == ldst_pos
6797 && try_to_encode_as_unscaled_ldst (new_inst))
6798 {
6799 put_aarch64_insn (buf, new_inst->value);
6800 break;
6801 }
6802
6803 as_bad_where (fixP->fx_file, fixP->fx_line,
6804 _("immediate offset out of range"));
6805 break;
6806
6807 default:
6808 gas_assert (0);
6809 as_fatal (_("unhandled operand code %d"), opnd);
6810 }
6811 }
6812
6813 /* Apply a fixup (fixP) to segment data, once it has been determined
6814 by our caller that we have all the info we need to fix it up.
6815
6816 Parameter valP is the pointer to the value of the bits. */
6817
6818 void
6819 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6820 {
6821 offsetT value = *valP;
6822 uint32_t insn;
6823 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6824 int scale;
6825 unsigned flags = fixP->fx_addnumber;
6826
6827 DEBUG_TRACE ("\n\n");
6828 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6829 DEBUG_TRACE ("Enter md_apply_fix");
6830
6831 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6832
6833 /* Note whether this will delete the relocation. */
6834
6835 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6836 fixP->fx_done = 1;
6837
6838 /* Process the relocations. */
6839 switch (fixP->fx_r_type)
6840 {
6841 case BFD_RELOC_NONE:
6842 /* This will need to go in the object file. */
6843 fixP->fx_done = 0;
6844 break;
6845
6846 case BFD_RELOC_8:
6847 case BFD_RELOC_8_PCREL:
6848 if (fixP->fx_done || !seg->use_rela_p)
6849 md_number_to_chars (buf, value, 1);
6850 break;
6851
6852 case BFD_RELOC_16:
6853 case BFD_RELOC_16_PCREL:
6854 if (fixP->fx_done || !seg->use_rela_p)
6855 md_number_to_chars (buf, value, 2);
6856 break;
6857
6858 case BFD_RELOC_32:
6859 case BFD_RELOC_32_PCREL:
6860 if (fixP->fx_done || !seg->use_rela_p)
6861 md_number_to_chars (buf, value, 4);
6862 break;
6863
6864 case BFD_RELOC_64:
6865 case BFD_RELOC_64_PCREL:
6866 if (fixP->fx_done || !seg->use_rela_p)
6867 md_number_to_chars (buf, value, 8);
6868 break;
6869
6870 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6871 /* We claim that these fixups have been processed here, even if
6872 in fact we generate an error because we do not have a reloc
6873 for them, so tc_gen_reloc() will reject them. */
6874 fixP->fx_done = 1;
6875 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6876 {
6877 as_bad_where (fixP->fx_file, fixP->fx_line,
6878 _("undefined symbol %s used as an immediate value"),
6879 S_GET_NAME (fixP->fx_addsy));
6880 goto apply_fix_return;
6881 }
6882 fix_insn (fixP, flags, value);
6883 break;
6884
6885 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6886 if (fixP->fx_done || !seg->use_rela_p)
6887 {
6888 if (value & 3)
6889 as_bad_where (fixP->fx_file, fixP->fx_line,
6890 _("pc-relative load offset not word aligned"));
6891 if (signed_overflow (value, 21))
6892 as_bad_where (fixP->fx_file, fixP->fx_line,
6893 _("pc-relative load offset out of range"));
6894 insn = get_aarch64_insn (buf);
6895 insn |= encode_ld_lit_ofs_19 (value >> 2);
6896 put_aarch64_insn (buf, insn);
6897 }
6898 break;
6899
6900 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6901 if (fixP->fx_done || !seg->use_rela_p)
6902 {
6903 if (signed_overflow (value, 21))
6904 as_bad_where (fixP->fx_file, fixP->fx_line,
6905 _("pc-relative address offset out of range"));
6906 insn = get_aarch64_insn (buf);
6907 insn |= encode_adr_imm (value);
6908 put_aarch64_insn (buf, insn);
6909 }
6910 break;
6911
6912 case BFD_RELOC_AARCH64_BRANCH19:
6913 if (fixP->fx_done || !seg->use_rela_p)
6914 {
6915 if (value & 3)
6916 as_bad_where (fixP->fx_file, fixP->fx_line,
6917 _("conditional branch target not word aligned"));
6918 if (signed_overflow (value, 21))
6919 as_bad_where (fixP->fx_file, fixP->fx_line,
6920 _("conditional branch out of range"));
6921 insn = get_aarch64_insn (buf);
6922 insn |= encode_cond_branch_ofs_19 (value >> 2);
6923 put_aarch64_insn (buf, insn);
6924 }
6925 break;
6926
6927 case BFD_RELOC_AARCH64_TSTBR14:
6928 if (fixP->fx_done || !seg->use_rela_p)
6929 {
6930 if (value & 3)
6931 as_bad_where (fixP->fx_file, fixP->fx_line,
6932 _("conditional branch target not word aligned"));
6933 if (signed_overflow (value, 16))
6934 as_bad_where (fixP->fx_file, fixP->fx_line,
6935 _("conditional branch out of range"));
6936 insn = get_aarch64_insn (buf);
6937 insn |= encode_tst_branch_ofs_14 (value >> 2);
6938 put_aarch64_insn (buf, insn);
6939 }
6940 break;
6941
6942 case BFD_RELOC_AARCH64_CALL26:
6943 case BFD_RELOC_AARCH64_JUMP26:
6944 if (fixP->fx_done || !seg->use_rela_p)
6945 {
6946 if (value & 3)
6947 as_bad_where (fixP->fx_file, fixP->fx_line,
6948 _("branch target not word aligned"));
6949 if (signed_overflow (value, 28))
6950 as_bad_where (fixP->fx_file, fixP->fx_line,
6951 _("branch out of range"));
6952 insn = get_aarch64_insn (buf);
6953 insn |= encode_branch_ofs_26 (value >> 2);
6954 put_aarch64_insn (buf, insn);
6955 }
6956 break;
6957
6958 case BFD_RELOC_AARCH64_MOVW_G0:
6959 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6960 case BFD_RELOC_AARCH64_MOVW_G0_S:
6961 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6962 scale = 0;
6963 goto movw_common;
6964 case BFD_RELOC_AARCH64_MOVW_G1:
6965 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6966 case BFD_RELOC_AARCH64_MOVW_G1_S:
6967 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6968 scale = 16;
6969 goto movw_common;
6970 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6971 scale = 0;
6972 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6973 /* Should always be exported to object file, see
6974 aarch64_force_relocation(). */
6975 gas_assert (!fixP->fx_done);
6976 gas_assert (seg->use_rela_p);
6977 goto movw_common;
6978 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6979 scale = 16;
6980 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6981 /* Should always be exported to object file, see
6982 aarch64_force_relocation(). */
6983 gas_assert (!fixP->fx_done);
6984 gas_assert (seg->use_rela_p);
6985 goto movw_common;
6986 case BFD_RELOC_AARCH64_MOVW_G2:
6987 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6988 case BFD_RELOC_AARCH64_MOVW_G2_S:
6989 scale = 32;
6990 goto movw_common;
6991 case BFD_RELOC_AARCH64_MOVW_G3:
6992 scale = 48;
6993 movw_common:
6994 if (fixP->fx_done || !seg->use_rela_p)
6995 {
6996 insn = get_aarch64_insn (buf);
6997
6998 if (!fixP->fx_done)
6999 {
7000 /* REL signed addend must fit in 16 bits */
7001 if (signed_overflow (value, 16))
7002 as_bad_where (fixP->fx_file, fixP->fx_line,
7003 _("offset out of range"));
7004 }
7005 else
7006 {
7007 /* Check for overflow and scale. */
7008 switch (fixP->fx_r_type)
7009 {
7010 case BFD_RELOC_AARCH64_MOVW_G0:
7011 case BFD_RELOC_AARCH64_MOVW_G1:
7012 case BFD_RELOC_AARCH64_MOVW_G2:
7013 case BFD_RELOC_AARCH64_MOVW_G3:
7014 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7015 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7016 if (unsigned_overflow (value, scale + 16))
7017 as_bad_where (fixP->fx_file, fixP->fx_line,
7018 _("unsigned value out of range"));
7019 break;
7020 case BFD_RELOC_AARCH64_MOVW_G0_S:
7021 case BFD_RELOC_AARCH64_MOVW_G1_S:
7022 case BFD_RELOC_AARCH64_MOVW_G2_S:
7023 /* NOTE: We can only come here with movz or movn. */
7024 if (signed_overflow (value, scale + 16))
7025 as_bad_where (fixP->fx_file, fixP->fx_line,
7026 _("signed value out of range"));
7027 if (value < 0)
7028 {
7029 /* Force use of MOVN. */
7030 value = ~value;
7031 insn = reencode_movzn_to_movn (insn);
7032 }
7033 else
7034 {
7035 /* Force use of MOVZ. */
7036 insn = reencode_movzn_to_movz (insn);
7037 }
7038 break;
7039 default:
7040 /* Unchecked relocations. */
7041 break;
7042 }
7043 value >>= scale;
7044 }
7045
7046 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7047 insn |= encode_movw_imm (value & 0xffff);
7048
7049 put_aarch64_insn (buf, insn);
7050 }
7051 break;
7052
7053 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7054 fixP->fx_r_type = (ilp32_p
7055 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7056 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7057 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7058 /* Should always be exported to object file, see
7059 aarch64_force_relocation(). */
7060 gas_assert (!fixP->fx_done);
7061 gas_assert (seg->use_rela_p);
7062 break;
7063
7064 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7065 fixP->fx_r_type = (ilp32_p
7066 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7067 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
7068 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7069 /* Should always be exported to object file, see
7070 aarch64_force_relocation(). */
7071 gas_assert (!fixP->fx_done);
7072 gas_assert (seg->use_rela_p);
7073 break;
7074
7075 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7076 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7077 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7078 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7079 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7080 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7081 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7082 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7083 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7084 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7085 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7086 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7087 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7088 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7089 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7090 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7091 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7092 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7093 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7094 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7095 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7096 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7097 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7098 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7099 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7100 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7101 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7102 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7103 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7104 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7105 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7106 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7107 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7108 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7109 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7110 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7111 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7112 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7113 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7114 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7115 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7116 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7117 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7118 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7119 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7120 /* Should always be exported to object file, see
7121 aarch64_force_relocation(). */
7122 gas_assert (!fixP->fx_done);
7123 gas_assert (seg->use_rela_p);
7124 break;
7125
7126 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7127 /* Should always be exported to object file, see
7128 aarch64_force_relocation(). */
7129 fixP->fx_r_type = (ilp32_p
7130 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7131 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7132 gas_assert (!fixP->fx_done);
7133 gas_assert (seg->use_rela_p);
7134 break;
7135
7136 case BFD_RELOC_AARCH64_ADD_LO12:
7137 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7138 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7139 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7140 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7141 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7142 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7143 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7144 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7145 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7146 case BFD_RELOC_AARCH64_LDST128_LO12:
7147 case BFD_RELOC_AARCH64_LDST16_LO12:
7148 case BFD_RELOC_AARCH64_LDST32_LO12:
7149 case BFD_RELOC_AARCH64_LDST64_LO12:
7150 case BFD_RELOC_AARCH64_LDST8_LO12:
7151 /* Should always be exported to object file, see
7152 aarch64_force_relocation(). */
7153 gas_assert (!fixP->fx_done);
7154 gas_assert (seg->use_rela_p);
7155 break;
7156
7157 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7158 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7159 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7160 break;
7161
7162 case BFD_RELOC_UNUSED:
7163 /* An error will already have been reported. */
7164 break;
7165
7166 default:
7167 as_bad_where (fixP->fx_file, fixP->fx_line,
7168 _("unexpected %s fixup"),
7169 bfd_get_reloc_code_name (fixP->fx_r_type));
7170 break;
7171 }
7172
7173 apply_fix_return:
7174 /* Free the allocated the struct aarch64_inst.
7175 N.B. currently there are very limited number of fix-up types actually use
7176 this field, so the impact on the performance should be minimal . */
7177 if (fixP->tc_fix_data.inst != NULL)
7178 free (fixP->tc_fix_data.inst);
7179
7180 return;
7181 }
7182
7183 /* Translate internal representation of relocation info to BFD target
7184 format. */
7185
7186 arelent *
7187 tc_gen_reloc (asection * section, fixS * fixp)
7188 {
7189 arelent *reloc;
7190 bfd_reloc_code_real_type code;
7191
7192 reloc = XNEW (arelent);
7193
7194 reloc->sym_ptr_ptr = XNEW (asymbol *);
7195 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7196 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7197
7198 if (fixp->fx_pcrel)
7199 {
7200 if (section->use_rela_p)
7201 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7202 else
7203 fixp->fx_offset = reloc->address;
7204 }
7205 reloc->addend = fixp->fx_offset;
7206
7207 code = fixp->fx_r_type;
7208 switch (code)
7209 {
7210 case BFD_RELOC_16:
7211 if (fixp->fx_pcrel)
7212 code = BFD_RELOC_16_PCREL;
7213 break;
7214
7215 case BFD_RELOC_32:
7216 if (fixp->fx_pcrel)
7217 code = BFD_RELOC_32_PCREL;
7218 break;
7219
7220 case BFD_RELOC_64:
7221 if (fixp->fx_pcrel)
7222 code = BFD_RELOC_64_PCREL;
7223 break;
7224
7225 default:
7226 break;
7227 }
7228
7229 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7230 if (reloc->howto == NULL)
7231 {
7232 as_bad_where (fixp->fx_file, fixp->fx_line,
7233 _
7234 ("cannot represent %s relocation in this object file format"),
7235 bfd_get_reloc_code_name (code));
7236 return NULL;
7237 }
7238
7239 return reloc;
7240 }
7241
7242 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7243
7244 void
7245 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7246 {
7247 bfd_reloc_code_real_type type;
7248 int pcrel = 0;
7249
7250 /* Pick a reloc.
7251 FIXME: @@ Should look at CPU word size. */
7252 switch (size)
7253 {
7254 case 1:
7255 type = BFD_RELOC_8;
7256 break;
7257 case 2:
7258 type = BFD_RELOC_16;
7259 break;
7260 case 4:
7261 type = BFD_RELOC_32;
7262 break;
7263 case 8:
7264 type = BFD_RELOC_64;
7265 break;
7266 default:
7267 as_bad (_("cannot do %u-byte relocation"), size);
7268 type = BFD_RELOC_UNUSED;
7269 break;
7270 }
7271
7272 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7273 }
7274
7275 int
7276 aarch64_force_relocation (struct fix *fixp)
7277 {
7278 switch (fixp->fx_r_type)
7279 {
7280 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7281 /* Perform these "immediate" internal relocations
7282 even if the symbol is extern or weak. */
7283 return 0;
7284
7285 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7286 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7287 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7288 /* Pseudo relocs that need to be fixed up according to
7289 ilp32_p. */
7290 return 0;
7291
7292 case BFD_RELOC_AARCH64_ADD_LO12:
7293 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7294 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7295 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7296 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7297 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7298 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7299 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7300 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7301 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7302 case BFD_RELOC_AARCH64_LDST128_LO12:
7303 case BFD_RELOC_AARCH64_LDST16_LO12:
7304 case BFD_RELOC_AARCH64_LDST32_LO12:
7305 case BFD_RELOC_AARCH64_LDST64_LO12:
7306 case BFD_RELOC_AARCH64_LDST8_LO12:
7307 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7308 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7309 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7310 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7311 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7312 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7313 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7314 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7315 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7316 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7317 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7318 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7319 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7320 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7321 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7322 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7323 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7324 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7325 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7326 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7327 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7328 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7329 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7330 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7331 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7332 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7333 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7334 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7335 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7336 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7337 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7338 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7339 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7340 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7341 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7342 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7343 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7344 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7345 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7346 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7347 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7348 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7349 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7350 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7351 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7352 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7353 /* Always leave these relocations for the linker. */
7354 return 1;
7355
7356 default:
7357 break;
7358 }
7359
7360 return generic_force_reloc (fixp);
7361 }
7362
7363 #ifdef OBJ_ELF
7364
7365 const char *
7366 elf64_aarch64_target_format (void)
7367 {
7368 if (strcmp (TARGET_OS, "cloudabi") == 0)
7369 {
7370 /* FIXME: What to do for ilp32_p ? */
7371 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
7372 }
7373 if (target_big_endian)
7374 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7375 else
7376 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7377 }
7378
7379 void
7380 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7381 {
7382 elf_frob_symbol (symp, puntp);
7383 }
7384 #endif
7385
7386 /* MD interface: Finalization. */
7387
7388 /* A good place to do this, although this was probably not intended
7389 for this kind of use. We need to dump the literal pool before
7390 references are made to a null symbol pointer. */
7391
7392 void
7393 aarch64_cleanup (void)
7394 {
7395 literal_pool *pool;
7396
7397 for (pool = list_of_pools; pool; pool = pool->next)
7398 {
7399 /* Put it at the end of the relevant section. */
7400 subseg_set (pool->section, pool->sub_section);
7401 s_ltorg (0);
7402 }
7403 }
7404
7405 #ifdef OBJ_ELF
7406 /* Remove any excess mapping symbols generated for alignment frags in
7407 SEC. We may have created a mapping symbol before a zero byte
7408 alignment; remove it if there's a mapping symbol after the
7409 alignment. */
7410 static void
7411 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7412 void *dummy ATTRIBUTE_UNUSED)
7413 {
7414 segment_info_type *seginfo = seg_info (sec);
7415 fragS *fragp;
7416
7417 if (seginfo == NULL || seginfo->frchainP == NULL)
7418 return;
7419
7420 for (fragp = seginfo->frchainP->frch_root;
7421 fragp != NULL; fragp = fragp->fr_next)
7422 {
7423 symbolS *sym = fragp->tc_frag_data.last_map;
7424 fragS *next = fragp->fr_next;
7425
7426 /* Variable-sized frags have been converted to fixed size by
7427 this point. But if this was variable-sized to start with,
7428 there will be a fixed-size frag after it. So don't handle
7429 next == NULL. */
7430 if (sym == NULL || next == NULL)
7431 continue;
7432
7433 if (S_GET_VALUE (sym) < next->fr_address)
7434 /* Not at the end of this frag. */
7435 continue;
7436 know (S_GET_VALUE (sym) == next->fr_address);
7437
7438 do
7439 {
7440 if (next->tc_frag_data.first_map != NULL)
7441 {
7442 /* Next frag starts with a mapping symbol. Discard this
7443 one. */
7444 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7445 break;
7446 }
7447
7448 if (next->fr_next == NULL)
7449 {
7450 /* This mapping symbol is at the end of the section. Discard
7451 it. */
7452 know (next->fr_fix == 0 && next->fr_var == 0);
7453 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7454 break;
7455 }
7456
7457 /* As long as we have empty frags without any mapping symbols,
7458 keep looking. */
7459 /* If the next frag is non-empty and does not start with a
7460 mapping symbol, then this mapping symbol is required. */
7461 if (next->fr_address != next->fr_next->fr_address)
7462 break;
7463
7464 next = next->fr_next;
7465 }
7466 while (next != NULL);
7467 }
7468 }
7469 #endif
7470
7471 /* Adjust the symbol table. */
7472
7473 void
7474 aarch64_adjust_symtab (void)
7475 {
7476 #ifdef OBJ_ELF
7477 /* Remove any overlapping mapping symbols generated by alignment frags. */
7478 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7479 /* Now do generic ELF adjustments. */
7480 elf_adjust_symtab ();
7481 #endif
7482 }
7483
7484 static void
7485 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7486 {
7487 const char *hash_err;
7488
7489 hash_err = hash_insert (table, key, value);
7490 if (hash_err)
7491 printf ("Internal Error: Can't hash %s\n", key);
7492 }
7493
7494 static void
7495 fill_instruction_hash_table (void)
7496 {
7497 aarch64_opcode *opcode = aarch64_opcode_table;
7498
7499 while (opcode->name != NULL)
7500 {
7501 templates *templ, *new_templ;
7502 templ = hash_find (aarch64_ops_hsh, opcode->name);
7503
7504 new_templ = XNEW (templates);
7505 new_templ->opcode = opcode;
7506 new_templ->next = NULL;
7507
7508 if (!templ)
7509 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7510 else
7511 {
7512 new_templ->next = templ->next;
7513 templ->next = new_templ;
7514 }
7515 ++opcode;
7516 }
7517 }
7518
7519 static inline void
7520 convert_to_upper (char *dst, const char *src, size_t num)
7521 {
7522 unsigned int i;
7523 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7524 *dst = TOUPPER (*src);
7525 *dst = '\0';
7526 }
7527
7528 /* Assume STR point to a lower-case string, allocate, convert and return
7529 the corresponding upper-case string. */
7530 static inline const char*
7531 get_upper_str (const char *str)
7532 {
7533 char *ret;
7534 size_t len = strlen (str);
7535 ret = XNEWVEC (char, len + 1);
7536 convert_to_upper (ret, str, len);
7537 return ret;
7538 }
7539
7540 /* MD interface: Initialization. */
7541
7542 void
7543 md_begin (void)
7544 {
7545 unsigned mach;
7546 unsigned int i;
7547
7548 if ((aarch64_ops_hsh = hash_new ()) == NULL
7549 || (aarch64_cond_hsh = hash_new ()) == NULL
7550 || (aarch64_shift_hsh = hash_new ()) == NULL
7551 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7552 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7553 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7554 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7555 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7556 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7557 || (aarch64_reg_hsh = hash_new ()) == NULL
7558 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7559 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7560 || (aarch64_pldop_hsh = hash_new ()) == NULL
7561 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
7562 as_fatal (_("virtual memory exhausted"));
7563
7564 fill_instruction_hash_table ();
7565
7566 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7567 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7568 (void *) (aarch64_sys_regs + i));
7569
7570 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7571 checked_hash_insert (aarch64_pstatefield_hsh,
7572 aarch64_pstatefields[i].name,
7573 (void *) (aarch64_pstatefields + i));
7574
7575 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
7576 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7577 aarch64_sys_regs_ic[i].name,
7578 (void *) (aarch64_sys_regs_ic + i));
7579
7580 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
7581 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7582 aarch64_sys_regs_dc[i].name,
7583 (void *) (aarch64_sys_regs_dc + i));
7584
7585 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
7586 checked_hash_insert (aarch64_sys_regs_at_hsh,
7587 aarch64_sys_regs_at[i].name,
7588 (void *) (aarch64_sys_regs_at + i));
7589
7590 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
7591 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7592 aarch64_sys_regs_tlbi[i].name,
7593 (void *) (aarch64_sys_regs_tlbi + i));
7594
7595 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7596 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7597 (void *) (reg_names + i));
7598
7599 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7600 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7601 (void *) (nzcv_names + i));
7602
7603 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7604 {
7605 const char *name = aarch64_operand_modifiers[i].name;
7606 checked_hash_insert (aarch64_shift_hsh, name,
7607 (void *) (aarch64_operand_modifiers + i));
7608 /* Also hash the name in the upper case. */
7609 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7610 (void *) (aarch64_operand_modifiers + i));
7611 }
7612
7613 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7614 {
7615 unsigned int j;
7616 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7617 the same condition code. */
7618 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7619 {
7620 const char *name = aarch64_conds[i].names[j];
7621 if (name == NULL)
7622 break;
7623 checked_hash_insert (aarch64_cond_hsh, name,
7624 (void *) (aarch64_conds + i));
7625 /* Also hash the name in the upper case. */
7626 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7627 (void *) (aarch64_conds + i));
7628 }
7629 }
7630
7631 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7632 {
7633 const char *name = aarch64_barrier_options[i].name;
7634 /* Skip xx00 - the unallocated values of option. */
7635 if ((i & 0x3) == 0)
7636 continue;
7637 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7638 (void *) (aarch64_barrier_options + i));
7639 /* Also hash the name in the upper case. */
7640 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7641 (void *) (aarch64_barrier_options + i));
7642 }
7643
7644 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7645 {
7646 const char* name = aarch64_prfops[i].name;
7647 /* Skip the unallocated hint encodings. */
7648 if (name == NULL)
7649 continue;
7650 checked_hash_insert (aarch64_pldop_hsh, name,
7651 (void *) (aarch64_prfops + i));
7652 /* Also hash the name in the upper case. */
7653 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7654 (void *) (aarch64_prfops + i));
7655 }
7656
7657 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
7658 {
7659 const char* name = aarch64_hint_options[i].name;
7660
7661 checked_hash_insert (aarch64_hint_opt_hsh, name,
7662 (void *) (aarch64_hint_options + i));
7663 /* Also hash the name in the upper case. */
7664 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7665 (void *) (aarch64_hint_options + i));
7666 }
7667
7668 /* Set the cpu variant based on the command-line options. */
7669 if (!mcpu_cpu_opt)
7670 mcpu_cpu_opt = march_cpu_opt;
7671
7672 if (!mcpu_cpu_opt)
7673 mcpu_cpu_opt = &cpu_default;
7674
7675 cpu_variant = *mcpu_cpu_opt;
7676
7677 /* Record the CPU type. */
7678 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7679
7680 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7681 }
7682
7683 /* Command line processing. */
7684
7685 const char *md_shortopts = "m:";
7686
7687 #ifdef AARCH64_BI_ENDIAN
7688 #define OPTION_EB (OPTION_MD_BASE + 0)
7689 #define OPTION_EL (OPTION_MD_BASE + 1)
7690 #else
7691 #if TARGET_BYTES_BIG_ENDIAN
7692 #define OPTION_EB (OPTION_MD_BASE + 0)
7693 #else
7694 #define OPTION_EL (OPTION_MD_BASE + 1)
7695 #endif
7696 #endif
7697
7698 struct option md_longopts[] = {
7699 #ifdef OPTION_EB
7700 {"EB", no_argument, NULL, OPTION_EB},
7701 #endif
7702 #ifdef OPTION_EL
7703 {"EL", no_argument, NULL, OPTION_EL},
7704 #endif
7705 {NULL, no_argument, NULL, 0}
7706 };
7707
7708 size_t md_longopts_size = sizeof (md_longopts);
7709
7710 struct aarch64_option_table
7711 {
7712 const char *option; /* Option name to match. */
7713 const char *help; /* Help information. */
7714 int *var; /* Variable to change. */
7715 int value; /* What to change it to. */
7716 char *deprecated; /* If non-null, print this message. */
7717 };
7718
7719 static struct aarch64_option_table aarch64_opts[] = {
7720 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7721 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7722 NULL},
7723 #ifdef DEBUG_AARCH64
7724 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7725 #endif /* DEBUG_AARCH64 */
7726 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7727 NULL},
7728 {"mno-verbose-error", N_("do not output verbose error messages"),
7729 &verbose_error_p, 0, NULL},
7730 {NULL, NULL, NULL, 0, NULL}
7731 };
7732
7733 struct aarch64_cpu_option_table
7734 {
7735 const char *name;
7736 const aarch64_feature_set value;
7737 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7738 case. */
7739 const char *canonical_name;
7740 };
7741
7742 /* This list should, at a minimum, contain all the cpu names
7743 recognized by GCC. */
7744 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7745 {"all", AARCH64_ANY, NULL},
7746 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
7747 AARCH64_FEATURE_CRC), "Cortex-A35"},
7748 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7749 AARCH64_FEATURE_CRC), "Cortex-A53"},
7750 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7751 AARCH64_FEATURE_CRC), "Cortex-A57"},
7752 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7753 AARCH64_FEATURE_CRC), "Cortex-A72"},
7754 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
7755 AARCH64_FEATURE_CRC), "Cortex-A73"},
7756 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7757 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7758 "Samsung Exynos M1"},
7759 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7760 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7761 "Qualcomm QDF24XX"},
7762 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7763 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7764 "Cavium ThunderX"},
7765 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
7766 AARCH64_FEATURE_CRYPTO),
7767 "Broadcom Vulcan"},
7768 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7769 in earlier releases and is superseded by 'xgene1' in all
7770 tools. */
7771 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7772 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7773 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7774 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7775 {"generic", AARCH64_ARCH_V8, NULL},
7776
7777 {NULL, AARCH64_ARCH_NONE, NULL}
7778 };
7779
7780 struct aarch64_arch_option_table
7781 {
7782 const char *name;
7783 const aarch64_feature_set value;
7784 };
7785
7786 /* This list should, at a minimum, contain all the architecture names
7787 recognized by GCC. */
7788 static const struct aarch64_arch_option_table aarch64_archs[] = {
7789 {"all", AARCH64_ANY},
7790 {"armv8-a", AARCH64_ARCH_V8},
7791 {"armv8.1-a", AARCH64_ARCH_V8_1},
7792 {"armv8.2-a", AARCH64_ARCH_V8_2},
7793 {NULL, AARCH64_ARCH_NONE}
7794 };
7795
7796 /* ISA extensions. */
7797 struct aarch64_option_cpu_value_table
7798 {
7799 const char *name;
7800 const aarch64_feature_set value;
7801 const aarch64_feature_set require; /* Feature dependencies. */
7802 };
7803
7804 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7805 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
7806 AARCH64_ARCH_NONE},
7807 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
7808 AARCH64_ARCH_NONE},
7809 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
7810 AARCH64_ARCH_NONE},
7811 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
7812 AARCH64_ARCH_NONE},
7813 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
7814 AARCH64_ARCH_NONE},
7815 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
7816 AARCH64_ARCH_NONE},
7817 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
7818 AARCH64_ARCH_NONE},
7819 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
7820 AARCH64_ARCH_NONE},
7821 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
7822 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7823 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
7824 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7825 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
7826 AARCH64_ARCH_NONE},
7827 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
7828 };
7829
7830 struct aarch64_long_option_table
7831 {
7832 const char *option; /* Substring to match. */
7833 const char *help; /* Help information. */
7834 int (*func) (const char *subopt); /* Function to decode sub-option. */
7835 char *deprecated; /* If non-null, print this message. */
7836 };
7837
7838 /* Transitive closure of features depending on set. */
7839 static aarch64_feature_set
7840 aarch64_feature_disable_set (aarch64_feature_set set)
7841 {
7842 const struct aarch64_option_cpu_value_table *opt;
7843 aarch64_feature_set prev = 0;
7844
7845 while (prev != set) {
7846 prev = set;
7847 for (opt = aarch64_features; opt->name != NULL; opt++)
7848 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
7849 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
7850 }
7851 return set;
7852 }
7853
7854 /* Transitive closure of dependencies of set. */
7855 static aarch64_feature_set
7856 aarch64_feature_enable_set (aarch64_feature_set set)
7857 {
7858 const struct aarch64_option_cpu_value_table *opt;
7859 aarch64_feature_set prev = 0;
7860
7861 while (prev != set) {
7862 prev = set;
7863 for (opt = aarch64_features; opt->name != NULL; opt++)
7864 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
7865 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
7866 }
7867 return set;
7868 }
7869
7870 static int
7871 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
7872 bfd_boolean ext_only)
7873 {
7874 /* We insist on extensions being added before being removed. We achieve
7875 this by using the ADDING_VALUE variable to indicate whether we are
7876 adding an extension (1) or removing it (0) and only allowing it to
7877 change in the order -1 -> 1 -> 0. */
7878 int adding_value = -1;
7879 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
7880
7881 /* Copy the feature set, so that we can modify it. */
7882 *ext_set = **opt_p;
7883 *opt_p = ext_set;
7884
7885 while (str != NULL && *str != 0)
7886 {
7887 const struct aarch64_option_cpu_value_table *opt;
7888 const char *ext = NULL;
7889 int optlen;
7890
7891 if (!ext_only)
7892 {
7893 if (*str != '+')
7894 {
7895 as_bad (_("invalid architectural extension"));
7896 return 0;
7897 }
7898
7899 ext = strchr (++str, '+');
7900 }
7901
7902 if (ext != NULL)
7903 optlen = ext - str;
7904 else
7905 optlen = strlen (str);
7906
7907 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7908 {
7909 if (adding_value != 0)
7910 adding_value = 0;
7911 optlen -= 2;
7912 str += 2;
7913 }
7914 else if (optlen > 0)
7915 {
7916 if (adding_value == -1)
7917 adding_value = 1;
7918 else if (adding_value != 1)
7919 {
7920 as_bad (_("must specify extensions to add before specifying "
7921 "those to remove"));
7922 return FALSE;
7923 }
7924 }
7925
7926 if (optlen == 0)
7927 {
7928 as_bad (_("missing architectural extension"));
7929 return 0;
7930 }
7931
7932 gas_assert (adding_value != -1);
7933
7934 for (opt = aarch64_features; opt->name != NULL; opt++)
7935 if (strncmp (opt->name, str, optlen) == 0)
7936 {
7937 aarch64_feature_set set;
7938
7939 /* Add or remove the extension. */
7940 if (adding_value)
7941 {
7942 set = aarch64_feature_enable_set (opt->value);
7943 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
7944 }
7945 else
7946 {
7947 set = aarch64_feature_disable_set (opt->value);
7948 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
7949 }
7950 break;
7951 }
7952
7953 if (opt->name == NULL)
7954 {
7955 as_bad (_("unknown architectural extension `%s'"), str);
7956 return 0;
7957 }
7958
7959 str = ext;
7960 };
7961
7962 return 1;
7963 }
7964
7965 static int
7966 aarch64_parse_cpu (const char *str)
7967 {
7968 const struct aarch64_cpu_option_table *opt;
7969 const char *ext = strchr (str, '+');
7970 size_t optlen;
7971
7972 if (ext != NULL)
7973 optlen = ext - str;
7974 else
7975 optlen = strlen (str);
7976
7977 if (optlen == 0)
7978 {
7979 as_bad (_("missing cpu name `%s'"), str);
7980 return 0;
7981 }
7982
7983 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7984 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7985 {
7986 mcpu_cpu_opt = &opt->value;
7987 if (ext != NULL)
7988 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7989
7990 return 1;
7991 }
7992
7993 as_bad (_("unknown cpu `%s'"), str);
7994 return 0;
7995 }
7996
7997 static int
7998 aarch64_parse_arch (const char *str)
7999 {
8000 const struct aarch64_arch_option_table *opt;
8001 const char *ext = strchr (str, '+');
8002 size_t optlen;
8003
8004 if (ext != NULL)
8005 optlen = ext - str;
8006 else
8007 optlen = strlen (str);
8008
8009 if (optlen == 0)
8010 {
8011 as_bad (_("missing architecture name `%s'"), str);
8012 return 0;
8013 }
8014
8015 for (opt = aarch64_archs; opt->name != NULL; opt++)
8016 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8017 {
8018 march_cpu_opt = &opt->value;
8019 if (ext != NULL)
8020 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8021
8022 return 1;
8023 }
8024
8025 as_bad (_("unknown architecture `%s'\n"), str);
8026 return 0;
8027 }
8028
8029 /* ABIs. */
8030 struct aarch64_option_abi_value_table
8031 {
8032 const char *name;
8033 enum aarch64_abi_type value;
8034 };
8035
8036 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8037 {"ilp32", AARCH64_ABI_ILP32},
8038 {"lp64", AARCH64_ABI_LP64},
8039 };
8040
8041 static int
8042 aarch64_parse_abi (const char *str)
8043 {
8044 unsigned int i;
8045
8046 if (str[0] == '\0')
8047 {
8048 as_bad (_("missing abi name `%s'"), str);
8049 return 0;
8050 }
8051
8052 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8053 if (strcmp (str, aarch64_abis[i].name) == 0)
8054 {
8055 aarch64_abi = aarch64_abis[i].value;
8056 return 1;
8057 }
8058
8059 as_bad (_("unknown abi `%s'\n"), str);
8060 return 0;
8061 }
8062
8063 static struct aarch64_long_option_table aarch64_long_opts[] = {
8064 #ifdef OBJ_ELF
8065 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8066 aarch64_parse_abi, NULL},
8067 #endif /* OBJ_ELF */
8068 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8069 aarch64_parse_cpu, NULL},
8070 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8071 aarch64_parse_arch, NULL},
8072 {NULL, NULL, 0, NULL}
8073 };
8074
8075 int
8076 md_parse_option (int c, const char *arg)
8077 {
8078 struct aarch64_option_table *opt;
8079 struct aarch64_long_option_table *lopt;
8080
8081 switch (c)
8082 {
8083 #ifdef OPTION_EB
8084 case OPTION_EB:
8085 target_big_endian = 1;
8086 break;
8087 #endif
8088
8089 #ifdef OPTION_EL
8090 case OPTION_EL:
8091 target_big_endian = 0;
8092 break;
8093 #endif
8094
8095 case 'a':
8096 /* Listing option. Just ignore these, we don't support additional
8097 ones. */
8098 return 0;
8099
8100 default:
8101 for (opt = aarch64_opts; opt->option != NULL; opt++)
8102 {
8103 if (c == opt->option[0]
8104 && ((arg == NULL && opt->option[1] == 0)
8105 || streq (arg, opt->option + 1)))
8106 {
8107 /* If the option is deprecated, tell the user. */
8108 if (opt->deprecated != NULL)
8109 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8110 arg ? arg : "", _(opt->deprecated));
8111
8112 if (opt->var != NULL)
8113 *opt->var = opt->value;
8114
8115 return 1;
8116 }
8117 }
8118
8119 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8120 {
8121 /* These options are expected to have an argument. */
8122 if (c == lopt->option[0]
8123 && arg != NULL
8124 && strncmp (arg, lopt->option + 1,
8125 strlen (lopt->option + 1)) == 0)
8126 {
8127 /* If the option is deprecated, tell the user. */
8128 if (lopt->deprecated != NULL)
8129 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8130 _(lopt->deprecated));
8131
8132 /* Call the sup-option parser. */
8133 return lopt->func (arg + strlen (lopt->option) - 1);
8134 }
8135 }
8136
8137 return 0;
8138 }
8139
8140 return 1;
8141 }
8142
8143 void
8144 md_show_usage (FILE * fp)
8145 {
8146 struct aarch64_option_table *opt;
8147 struct aarch64_long_option_table *lopt;
8148
8149 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8150
8151 for (opt = aarch64_opts; opt->option != NULL; opt++)
8152 if (opt->help != NULL)
8153 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8154
8155 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8156 if (lopt->help != NULL)
8157 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8158
8159 #ifdef OPTION_EB
8160 fprintf (fp, _("\
8161 -EB assemble code for a big-endian cpu\n"));
8162 #endif
8163
8164 #ifdef OPTION_EL
8165 fprintf (fp, _("\
8166 -EL assemble code for a little-endian cpu\n"));
8167 #endif
8168 }
8169
8170 /* Parse a .cpu directive. */
8171
8172 static void
8173 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8174 {
8175 const struct aarch64_cpu_option_table *opt;
8176 char saved_char;
8177 char *name;
8178 char *ext;
8179 size_t optlen;
8180
8181 name = input_line_pointer;
8182 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8183 input_line_pointer++;
8184 saved_char = *input_line_pointer;
8185 *input_line_pointer = 0;
8186
8187 ext = strchr (name, '+');
8188
8189 if (ext != NULL)
8190 optlen = ext - name;
8191 else
8192 optlen = strlen (name);
8193
8194 /* Skip the first "all" entry. */
8195 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8196 if (strlen (opt->name) == optlen
8197 && strncmp (name, opt->name, optlen) == 0)
8198 {
8199 mcpu_cpu_opt = &opt->value;
8200 if (ext != NULL)
8201 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8202 return;
8203
8204 cpu_variant = *mcpu_cpu_opt;
8205
8206 *input_line_pointer = saved_char;
8207 demand_empty_rest_of_line ();
8208 return;
8209 }
8210 as_bad (_("unknown cpu `%s'"), name);
8211 *input_line_pointer = saved_char;
8212 ignore_rest_of_line ();
8213 }
8214
8215
8216 /* Parse a .arch directive. */
8217
8218 static void
8219 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8220 {
8221 const struct aarch64_arch_option_table *opt;
8222 char saved_char;
8223 char *name;
8224 char *ext;
8225 size_t optlen;
8226
8227 name = input_line_pointer;
8228 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8229 input_line_pointer++;
8230 saved_char = *input_line_pointer;
8231 *input_line_pointer = 0;
8232
8233 ext = strchr (name, '+');
8234
8235 if (ext != NULL)
8236 optlen = ext - name;
8237 else
8238 optlen = strlen (name);
8239
8240 /* Skip the first "all" entry. */
8241 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8242 if (strlen (opt->name) == optlen
8243 && strncmp (name, opt->name, optlen) == 0)
8244 {
8245 mcpu_cpu_opt = &opt->value;
8246 if (ext != NULL)
8247 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8248 return;
8249
8250 cpu_variant = *mcpu_cpu_opt;
8251
8252 *input_line_pointer = saved_char;
8253 demand_empty_rest_of_line ();
8254 return;
8255 }
8256
8257 as_bad (_("unknown architecture `%s'\n"), name);
8258 *input_line_pointer = saved_char;
8259 ignore_rest_of_line ();
8260 }
8261
8262 /* Parse a .arch_extension directive. */
8263
8264 static void
8265 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8266 {
8267 char saved_char;
8268 char *ext = input_line_pointer;;
8269
8270 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8271 input_line_pointer++;
8272 saved_char = *input_line_pointer;
8273 *input_line_pointer = 0;
8274
8275 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8276 return;
8277
8278 cpu_variant = *mcpu_cpu_opt;
8279
8280 *input_line_pointer = saved_char;
8281 demand_empty_rest_of_line ();
8282 }
8283
8284 /* Copy symbol information. */
8285
8286 void
8287 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8288 {
8289 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8290 }