* config/tc-aarch64.c (parse_sys_reg): Do not issue error messages
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright 2009, 2010, 2011, 2012, 2013
4 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GAS.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the license, or
12 (at your option) any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING3. If not,
21 see <http://www.gnu.org/licenses/>. */
22
23 #include "as.h"
24 #include <limits.h>
25 #include <stdarg.h>
26 #include "bfd_stdint.h"
27 #define NO_RELOC 0
28 #include "safe-ctype.h"
29 #include "subsegs.h"
30 #include "obstack.h"
31
32 #ifdef OBJ_ELF
33 #include "elf/aarch64.h"
34 #include "dw2gencfi.h"
35 #endif
36
37 #include "dwarf2dbg.h"
38
39 /* Types of processor to assemble for. */
40 #ifndef CPU_DEFAULT
41 #define CPU_DEFAULT AARCH64_ARCH_V8
42 #endif
43
44 #define streq(a, b) (strcmp (a, b) == 0)
45
46 static aarch64_feature_set cpu_variant;
47
48 /* Variables that we set while parsing command-line options. Once all
49 options have been read we re-process these values to set the real
50 assembly flags. */
51 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
52 static const aarch64_feature_set *march_cpu_opt = NULL;
53
54 /* Constants for known architecture features. */
55 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
56
57 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
58 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
59
60 #ifdef OBJ_ELF
61 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
62 static symbolS *GOT_symbol;
63
64 /* Which ABI to use. */
65 enum aarch64_abi_type
66 {
67 AARCH64_ABI_LP64 = 0,
68 AARCH64_ABI_ILP32 = 1
69 };
70
71 /* AArch64 ABI for the output file. */
72 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
73
74 /* When non-zero, program to a 32-bit model, in which the C data types
75 int, long and all pointer types are 32-bit objects (ILP32); or to a
76 64-bit model, in which the C int type is 32-bits but the C long type
77 and all pointer types are 64-bit objects (LP64). */
78 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
79 #endif
80
81 enum neon_el_type
82 {
83 NT_invtype = -1,
84 NT_b,
85 NT_h,
86 NT_s,
87 NT_d,
88 NT_q
89 };
90
91 /* Bits for DEFINED field in neon_type_el. */
92 #define NTA_HASTYPE 1
93 #define NTA_HASINDEX 2
94
95 struct neon_type_el
96 {
97 enum neon_el_type type;
98 unsigned char defined;
99 unsigned width;
100 int64_t index;
101 };
102
103 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
104
105 struct reloc
106 {
107 bfd_reloc_code_real_type type;
108 expressionS exp;
109 int pc_rel;
110 enum aarch64_opnd opnd;
111 uint32_t flags;
112 unsigned need_libopcodes_p : 1;
113 };
114
115 struct aarch64_instruction
116 {
117 /* libopcodes structure for instruction intermediate representation. */
118 aarch64_inst base;
119 /* Record assembly errors found during the parsing. */
120 struct
121 {
122 enum aarch64_operand_error_kind kind;
123 const char *error;
124 } parsing_error;
125 /* The condition that appears in the assembly line. */
126 int cond;
127 /* Relocation information (including the GAS internal fixup). */
128 struct reloc reloc;
129 /* Need to generate an immediate in the literal pool. */
130 unsigned gen_lit_pool : 1;
131 };
132
133 typedef struct aarch64_instruction aarch64_instruction;
134
135 static aarch64_instruction inst;
136
137 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
138 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
139
140 /* Diagnostics inline function utilites.
141
142 These are lightweight utlities which should only be called by parse_operands
143 and other parsers. GAS processes each assembly line by parsing it against
144 instruction template(s), in the case of multiple templates (for the same
145 mnemonic name), those templates are tried one by one until one succeeds or
146 all fail. An assembly line may fail a few templates before being
147 successfully parsed; an error saved here in most cases is not a user error
148 but an error indicating the current template is not the right template.
149 Therefore it is very important that errors can be saved at a low cost during
150 the parsing; we don't want to slow down the whole parsing by recording
151 non-user errors in detail.
152
153 Remember that the objective is to help GAS pick up the most approapriate
154 error message in the case of multiple templates, e.g. FMOV which has 8
155 templates. */
156
157 static inline void
158 clear_error (void)
159 {
160 inst.parsing_error.kind = AARCH64_OPDE_NIL;
161 inst.parsing_error.error = NULL;
162 }
163
164 static inline bfd_boolean
165 error_p (void)
166 {
167 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
168 }
169
170 static inline const char *
171 get_error_message (void)
172 {
173 return inst.parsing_error.error;
174 }
175
176 static inline void
177 set_error_message (const char *error)
178 {
179 inst.parsing_error.error = error;
180 }
181
182 static inline enum aarch64_operand_error_kind
183 get_error_kind (void)
184 {
185 return inst.parsing_error.kind;
186 }
187
188 static inline void
189 set_error_kind (enum aarch64_operand_error_kind kind)
190 {
191 inst.parsing_error.kind = kind;
192 }
193
194 static inline void
195 set_error (enum aarch64_operand_error_kind kind, const char *error)
196 {
197 inst.parsing_error.kind = kind;
198 inst.parsing_error.error = error;
199 }
200
201 static inline void
202 set_recoverable_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_RECOVERABLE, error);
205 }
206
207 /* Use the DESC field of the corresponding aarch64_operand entry to compose
208 the error message. */
209 static inline void
210 set_default_error (void)
211 {
212 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
213 }
214
215 static inline void
216 set_syntax_error (const char *error)
217 {
218 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
219 }
220
221 static inline void
222 set_first_syntax_error (const char *error)
223 {
224 if (! error_p ())
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_fatal_syntax_error (const char *error)
230 {
231 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
232 }
233 \f
234 /* Number of littlenums required to hold an extended precision number. */
235 #define MAX_LITTLENUMS 6
236
237 /* Return value for certain parsers when the parsing fails; those parsers
238 return the information of the parsed result, e.g. register number, on
239 success. */
240 #define PARSE_FAIL -1
241
242 /* This is an invalid condition code that means no conditional field is
243 present. */
244 #define COND_ALWAYS 0x10
245
246 typedef struct
247 {
248 const char *template;
249 unsigned long value;
250 } asm_barrier_opt;
251
252 typedef struct
253 {
254 const char *template;
255 uint32_t value;
256 } asm_nzcv;
257
258 struct reloc_entry
259 {
260 char *name;
261 bfd_reloc_code_real_type reloc;
262 };
263
264 /* Structure for a hash table entry for a register. */
265 typedef struct
266 {
267 const char *name;
268 unsigned char number;
269 unsigned char type;
270 unsigned char builtin;
271 } reg_entry;
272
273 /* Macros to define the register types and masks for the purpose
274 of parsing. */
275
276 #undef AARCH64_REG_TYPES
277 #define AARCH64_REG_TYPES \
278 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
279 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
280 BASIC_REG_TYPE(SP_32) /* wsp */ \
281 BASIC_REG_TYPE(SP_64) /* sp */ \
282 BASIC_REG_TYPE(Z_32) /* wzr */ \
283 BASIC_REG_TYPE(Z_64) /* xzr */ \
284 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
285 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
286 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
287 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
288 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
289 BASIC_REG_TYPE(CN) /* c[0-7] */ \
290 BASIC_REG_TYPE(VN) /* v[0-31] */ \
291 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
292 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
293 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
294 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
296 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
297 /* Typecheck: any [BHSDQ]P FP. */ \
298 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
299 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
300 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
301 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
303 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
304 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
305 /* Any integer register; used for error messages only. */ \
306 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
307 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
309 /* Pseudo type to mark the end of the enumerator sequence. */ \
310 BASIC_REG_TYPE(MAX)
311
312 #undef BASIC_REG_TYPE
313 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
316
317 /* Register type enumerators. */
318 typedef enum
319 {
320 /* A list of REG_TYPE_*. */
321 AARCH64_REG_TYPES
322 } aarch64_reg_type;
323
324 #undef BASIC_REG_TYPE
325 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
326 #undef REG_TYPE
327 #define REG_TYPE(T) (1 << REG_TYPE_##T)
328 #undef MULTI_REG_TYPE
329 #define MULTI_REG_TYPE(T,V) V,
330
331 /* Values indexed by aarch64_reg_type to assist the type checking. */
332 static const unsigned reg_type_masks[] =
333 {
334 AARCH64_REG_TYPES
335 };
336
337 #undef BASIC_REG_TYPE
338 #undef REG_TYPE
339 #undef MULTI_REG_TYPE
340 #undef AARCH64_REG_TYPES
341
342 /* Diagnostics used when we don't get a register of the expected type.
343 Note: this has to synchronized with aarch64_reg_type definitions
344 above. */
345 static const char *
346 get_reg_expected_msg (aarch64_reg_type reg_type)
347 {
348 const char *msg;
349
350 switch (reg_type)
351 {
352 case REG_TYPE_R_32:
353 msg = N_("integer 32-bit register expected");
354 break;
355 case REG_TYPE_R_64:
356 msg = N_("integer 64-bit register expected");
357 break;
358 case REG_TYPE_R_N:
359 msg = N_("integer register expected");
360 break;
361 case REG_TYPE_R_Z_SP:
362 msg = N_("integer, zero or SP register expected");
363 break;
364 case REG_TYPE_FP_B:
365 msg = N_("8-bit SIMD scalar register expected");
366 break;
367 case REG_TYPE_FP_H:
368 msg = N_("16-bit SIMD scalar or floating-point half precision "
369 "register expected");
370 break;
371 case REG_TYPE_FP_S:
372 msg = N_("32-bit SIMD scalar or floating-point single precision "
373 "register expected");
374 break;
375 case REG_TYPE_FP_D:
376 msg = N_("64-bit SIMD scalar or floating-point double precision "
377 "register expected");
378 break;
379 case REG_TYPE_FP_Q:
380 msg = N_("128-bit SIMD scalar or floating-point quad precision "
381 "register expected");
382 break;
383 case REG_TYPE_CN:
384 msg = N_("C0 - C15 expected");
385 break;
386 case REG_TYPE_R_Z_BHSDQ_V:
387 msg = N_("register expected");
388 break;
389 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
390 msg = N_("SIMD scalar or floating-point register expected");
391 break;
392 case REG_TYPE_VN: /* any V reg */
393 msg = N_("vector register expected");
394 break;
395 default:
396 as_fatal (_("invalid register type %d"), reg_type);
397 }
398 return msg;
399 }
400
401 /* Some well known registers that we refer to directly elsewhere. */
402 #define REG_SP 31
403
404 /* Instructions take 4 bytes in the object file. */
405 #define INSN_SIZE 4
406
407 /* Define some common error messages. */
408 #define BAD_SP _("SP not allowed here")
409
410 static struct hash_control *aarch64_ops_hsh;
411 static struct hash_control *aarch64_cond_hsh;
412 static struct hash_control *aarch64_shift_hsh;
413 static struct hash_control *aarch64_sys_regs_hsh;
414 static struct hash_control *aarch64_pstatefield_hsh;
415 static struct hash_control *aarch64_sys_regs_ic_hsh;
416 static struct hash_control *aarch64_sys_regs_dc_hsh;
417 static struct hash_control *aarch64_sys_regs_at_hsh;
418 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
419 static struct hash_control *aarch64_reg_hsh;
420 static struct hash_control *aarch64_barrier_opt_hsh;
421 static struct hash_control *aarch64_nzcv_hsh;
422 static struct hash_control *aarch64_pldop_hsh;
423
424 /* Stuff needed to resolve the label ambiguity
425 As:
426 ...
427 label: <insn>
428 may differ from:
429 ...
430 label:
431 <insn> */
432
433 static symbolS *last_label_seen;
434
435 /* Literal pool structure. Held on a per-section
436 and per-sub-section basis. */
437
438 #define MAX_LITERAL_POOL_SIZE 1024
439 typedef struct literal_pool
440 {
441 expressionS literals[MAX_LITERAL_POOL_SIZE];
442 unsigned int next_free_entry;
443 unsigned int id;
444 symbolS *symbol;
445 segT section;
446 subsegT sub_section;
447 int size;
448 struct literal_pool *next;
449 } literal_pool;
450
451 /* Pointer to a linked list of literal pools. */
452 static literal_pool *list_of_pools = NULL;
453 \f
454 /* Pure syntax. */
455
456 /* This array holds the chars that always start a comment. If the
457 pre-processor is disabled, these aren't very useful. */
458 const char comment_chars[] = "";
459
460 /* This array holds the chars that only start a comment at the beginning of
461 a line. If the line seems to have the form '# 123 filename'
462 .line and .file directives will appear in the pre-processed output. */
463 /* Note that input_file.c hand checks for '#' at the beginning of the
464 first line of the input file. This is because the compiler outputs
465 #NO_APP at the beginning of its output. */
466 /* Also note that comments like this one will always work. */
467 const char line_comment_chars[] = "#";
468
469 const char line_separator_chars[] = ";";
470
471 /* Chars that can be used to separate mant
472 from exp in floating point numbers. */
473 const char EXP_CHARS[] = "eE";
474
475 /* Chars that mean this number is a floating point constant. */
476 /* As in 0f12.456 */
477 /* or 0d1.2345e12 */
478
479 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
480
481 /* Prefix character that indicates the start of an immediate value. */
482 #define is_immediate_prefix(C) ((C) == '#')
483
484 /* Separator character handling. */
485
486 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
487
488 static inline bfd_boolean
489 skip_past_char (char **str, char c)
490 {
491 if (**str == c)
492 {
493 (*str)++;
494 return TRUE;
495 }
496 else
497 return FALSE;
498 }
499
500 #define skip_past_comma(str) skip_past_char (str, ',')
501
502 /* Arithmetic expressions (possibly involving symbols). */
503
504 static bfd_boolean in_my_get_expression_p = FALSE;
505
506 /* Third argument to my_get_expression. */
507 #define GE_NO_PREFIX 0
508 #define GE_OPT_PREFIX 1
509
510 /* Return TRUE if the string pointed by *STR is successfully parsed
511 as an valid expression; *EP will be filled with the information of
512 such an expression. Otherwise return FALSE. */
513
514 static bfd_boolean
515 my_get_expression (expressionS * ep, char **str, int prefix_mode,
516 int reject_absent)
517 {
518 char *save_in;
519 segT seg;
520 int prefix_present_p = 0;
521
522 switch (prefix_mode)
523 {
524 case GE_NO_PREFIX:
525 break;
526 case GE_OPT_PREFIX:
527 if (is_immediate_prefix (**str))
528 {
529 (*str)++;
530 prefix_present_p = 1;
531 }
532 break;
533 default:
534 abort ();
535 }
536
537 memset (ep, 0, sizeof (expressionS));
538
539 save_in = input_line_pointer;
540 input_line_pointer = *str;
541 in_my_get_expression_p = TRUE;
542 seg = expression (ep);
543 in_my_get_expression_p = FALSE;
544
545 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
546 {
547 /* We found a bad expression in md_operand(). */
548 *str = input_line_pointer;
549 input_line_pointer = save_in;
550 if (prefix_present_p && ! error_p ())
551 set_fatal_syntax_error (_("bad expression"));
552 else
553 set_first_syntax_error (_("bad expression"));
554 return FALSE;
555 }
556
557 #ifdef OBJ_AOUT
558 if (seg != absolute_section
559 && seg != text_section
560 && seg != data_section
561 && seg != bss_section && seg != undefined_section)
562 {
563 set_syntax_error (_("bad segment"));
564 *str = input_line_pointer;
565 input_line_pointer = save_in;
566 return FALSE;
567 }
568 #else
569 (void) seg;
570 #endif
571
572 *str = input_line_pointer;
573 input_line_pointer = save_in;
574 return TRUE;
575 }
576
577 /* Turn a string in input_line_pointer into a floating point constant
578 of type TYPE, and store the appropriate bytes in *LITP. The number
579 of LITTLENUMS emitted is stored in *SIZEP. An error message is
580 returned, or NULL on OK. */
581
582 char *
583 md_atof (int type, char *litP, int *sizeP)
584 {
585 return ieee_md_atof (type, litP, sizeP, target_big_endian);
586 }
587
588 /* We handle all bad expressions here, so that we can report the faulty
589 instruction in the error message. */
590 void
591 md_operand (expressionS * exp)
592 {
593 if (in_my_get_expression_p)
594 exp->X_op = O_illegal;
595 }
596
597 /* Immediate values. */
598
599 /* Errors may be set multiple times during parsing or bit encoding
600 (particularly in the Neon bits), but usually the earliest error which is set
601 will be the most meaningful. Avoid overwriting it with later (cascading)
602 errors by calling this function. */
603
604 static void
605 first_error (const char *error)
606 {
607 if (! error_p ())
608 set_syntax_error (error);
609 }
610
611 /* Similiar to first_error, but this function accepts formatted error
612 message. */
613 static void
614 first_error_fmt (const char *format, ...)
615 {
616 va_list args;
617 enum
618 { size = 100 };
619 /* N.B. this single buffer will not cause error messages for different
620 instructions to pollute each other; this is because at the end of
621 processing of each assembly line, error message if any will be
622 collected by as_bad. */
623 static char buffer[size];
624
625 if (! error_p ())
626 {
627 int ret ATTRIBUTE_UNUSED;
628 va_start (args, format);
629 ret = vsnprintf (buffer, size, format, args);
630 know (ret <= size - 1 && ret >= 0);
631 va_end (args);
632 set_syntax_error (buffer);
633 }
634 }
635
636 /* Register parsing. */
637
638 /* Generic register parser which is called by other specialized
639 register parsers.
640 CCP points to what should be the beginning of a register name.
641 If it is indeed a valid register name, advance CCP over it and
642 return the reg_entry structure; otherwise return NULL.
643 It does not issue diagnostics. */
644
645 static reg_entry *
646 parse_reg (char **ccp)
647 {
648 char *start = *ccp;
649 char *p;
650 reg_entry *reg;
651
652 #ifdef REGISTER_PREFIX
653 if (*start != REGISTER_PREFIX)
654 return NULL;
655 start++;
656 #endif
657
658 p = start;
659 if (!ISALPHA (*p) || !is_name_beginner (*p))
660 return NULL;
661
662 do
663 p++;
664 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
665
666 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
667
668 if (!reg)
669 return NULL;
670
671 *ccp = p;
672 return reg;
673 }
674
675 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
676 return FALSE. */
677 static bfd_boolean
678 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
679 {
680 if (reg->type == type)
681 return TRUE;
682
683 switch (type)
684 {
685 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
686 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
687 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
688 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
689 case REG_TYPE_VN: /* Vector register. */
690 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
691 return ((reg_type_masks[reg->type] & reg_type_masks[type])
692 == reg_type_masks[reg->type]);
693 default:
694 as_fatal ("unhandled type %d", type);
695 abort ();
696 }
697 }
698
699 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
700 Return the register number otherwise. *ISREG32 is set to one if the
701 register is 32-bit wide; *ISREGZERO is set to one if the register is
702 of type Z_32 or Z_64.
703 Note that this function does not issue any diagnostics. */
704
705 static int
706 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
707 int *isreg32, int *isregzero)
708 {
709 char *str = *ccp;
710 const reg_entry *reg = parse_reg (&str);
711
712 if (reg == NULL)
713 return PARSE_FAIL;
714
715 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
716 return PARSE_FAIL;
717
718 switch (reg->type)
719 {
720 case REG_TYPE_SP_32:
721 case REG_TYPE_SP_64:
722 if (reject_sp)
723 return PARSE_FAIL;
724 *isreg32 = reg->type == REG_TYPE_SP_32;
725 *isregzero = 0;
726 break;
727 case REG_TYPE_R_32:
728 case REG_TYPE_R_64:
729 *isreg32 = reg->type == REG_TYPE_R_32;
730 *isregzero = 0;
731 break;
732 case REG_TYPE_Z_32:
733 case REG_TYPE_Z_64:
734 if (reject_rz)
735 return PARSE_FAIL;
736 *isreg32 = reg->type == REG_TYPE_Z_32;
737 *isregzero = 1;
738 break;
739 default:
740 return PARSE_FAIL;
741 }
742
743 *ccp = str;
744
745 return reg->number;
746 }
747
748 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
749 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
750 otherwise return FALSE.
751
752 Accept only one occurrence of:
753 8b 16b 4h 8h 2s 4s 1d 2d
754 b h s d q */
755 static bfd_boolean
756 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
757 {
758 char *ptr = *str;
759 unsigned width;
760 unsigned element_size;
761 enum neon_el_type type;
762
763 /* skip '.' */
764 ptr++;
765
766 if (!ISDIGIT (*ptr))
767 {
768 width = 0;
769 goto elt_size;
770 }
771 width = strtoul (ptr, &ptr, 10);
772 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
773 {
774 first_error_fmt (_("bad size %d in vector width specifier"), width);
775 return FALSE;
776 }
777
778 elt_size:
779 switch (TOLOWER (*ptr))
780 {
781 case 'b':
782 type = NT_b;
783 element_size = 8;
784 break;
785 case 'h':
786 type = NT_h;
787 element_size = 16;
788 break;
789 case 's':
790 type = NT_s;
791 element_size = 32;
792 break;
793 case 'd':
794 type = NT_d;
795 element_size = 64;
796 break;
797 case 'q':
798 if (width == 1)
799 {
800 type = NT_q;
801 element_size = 128;
802 break;
803 }
804 /* fall through. */
805 default:
806 if (*ptr != '\0')
807 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
808 else
809 first_error (_("missing element size"));
810 return FALSE;
811 }
812 if (width != 0 && width * element_size != 64 && width * element_size != 128)
813 {
814 first_error_fmt (_
815 ("invalid element size %d and vector size combination %c"),
816 width, *ptr);
817 return FALSE;
818 }
819 ptr++;
820
821 parsed_type->type = type;
822 parsed_type->width = width;
823
824 *str = ptr;
825
826 return TRUE;
827 }
828
829 /* Parse a single type, e.g. ".8b", leading period included.
830 Only applicable to Vn registers.
831
832 Return TRUE on success; otherwise return FALSE. */
833 static bfd_boolean
834 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
835 {
836 char *str = *ccp;
837
838 if (*str == '.')
839 {
840 if (! parse_neon_type_for_operand (vectype, &str))
841 {
842 first_error (_("vector type expected"));
843 return FALSE;
844 }
845 }
846 else
847 return FALSE;
848
849 *ccp = str;
850
851 return TRUE;
852 }
853
854 /* Parse a register of the type TYPE.
855
856 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
857 name or the parsed register is not of TYPE.
858
859 Otherwise return the register number, and optionally fill in the actual
860 type of the register in *RTYPE when multiple alternatives were given, and
861 return the register shape and element index information in *TYPEINFO.
862
863 IN_REG_LIST should be set with TRUE if the caller is parsing a register
864 list. */
865
866 static int
867 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
868 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
869 {
870 char *str = *ccp;
871 const reg_entry *reg = parse_reg (&str);
872 struct neon_type_el atype;
873 struct neon_type_el parsetype;
874 bfd_boolean is_typed_vecreg = FALSE;
875
876 atype.defined = 0;
877 atype.type = NT_invtype;
878 atype.width = -1;
879 atype.index = 0;
880
881 if (reg == NULL)
882 {
883 if (typeinfo)
884 *typeinfo = atype;
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888
889 if (! aarch64_check_reg_type (reg, type))
890 {
891 DEBUG_TRACE ("reg type check failed");
892 set_default_error ();
893 return PARSE_FAIL;
894 }
895 type = reg->type;
896
897 if (type == REG_TYPE_VN
898 && parse_neon_operand_type (&parsetype, &str))
899 {
900 /* Register if of the form Vn.[bhsdq]. */
901 is_typed_vecreg = TRUE;
902
903 if (parsetype.width == 0)
904 /* Expect index. In the new scheme we cannot have
905 Vn.[bhsdq] represent a scalar. Therefore any
906 Vn.[bhsdq] should have an index following it.
907 Except in reglists ofcourse. */
908 atype.defined |= NTA_HASINDEX;
909 else
910 atype.defined |= NTA_HASTYPE;
911
912 atype.type = parsetype.type;
913 atype.width = parsetype.width;
914 }
915
916 if (skip_past_char (&str, '['))
917 {
918 expressionS exp;
919
920 /* Reject Sn[index] syntax. */
921 if (!is_typed_vecreg)
922 {
923 first_error (_("this type of register can't be indexed"));
924 return PARSE_FAIL;
925 }
926
927 if (in_reg_list == TRUE)
928 {
929 first_error (_("index not allowed inside register list"));
930 return PARSE_FAIL;
931 }
932
933 atype.defined |= NTA_HASINDEX;
934
935 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
936
937 if (exp.X_op != O_constant)
938 {
939 first_error (_("constant expression required"));
940 return PARSE_FAIL;
941 }
942
943 if (! skip_past_char (&str, ']'))
944 return PARSE_FAIL;
945
946 atype.index = exp.X_add_number;
947 }
948 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
949 {
950 /* Indexed vector register expected. */
951 first_error (_("indexed vector register expected"));
952 return PARSE_FAIL;
953 }
954
955 /* A vector reg Vn should be typed or indexed. */
956 if (type == REG_TYPE_VN && atype.defined == 0)
957 {
958 first_error (_("invalid use of vector register"));
959 }
960
961 if (typeinfo)
962 *typeinfo = atype;
963
964 if (rtype)
965 *rtype = type;
966
967 *ccp = str;
968
969 return reg->number;
970 }
971
972 /* Parse register.
973
974 Return the register number on success; return PARSE_FAIL otherwise.
975
976 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
977 the register (e.g. NEON double or quad reg when either has been requested).
978
979 If this is a NEON vector register with additional type information, fill
980 in the struct pointed to by VECTYPE (if non-NULL).
981
982 This parser does not handle register list. */
983
984 static int
985 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
986 aarch64_reg_type *rtype, struct neon_type_el *vectype)
987 {
988 struct neon_type_el atype;
989 char *str = *ccp;
990 int reg = parse_typed_reg (&str, type, rtype, &atype,
991 /*in_reg_list= */ FALSE);
992
993 if (reg == PARSE_FAIL)
994 return PARSE_FAIL;
995
996 if (vectype)
997 *vectype = atype;
998
999 *ccp = str;
1000
1001 return reg;
1002 }
1003
1004 static inline bfd_boolean
1005 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
1006 {
1007 return
1008 e1.type == e2.type
1009 && e1.defined == e2.defined
1010 && e1.width == e2.width && e1.index == e2.index;
1011 }
1012
1013 /* This function parses the NEON register list. On success, it returns
1014 the parsed register list information in the following encoded format:
1015
1016 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1017 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1018
1019 The information of the register shape and/or index is returned in
1020 *VECTYPE.
1021
1022 It returns PARSE_FAIL if the register list is invalid.
1023
1024 The list contains one to four registers.
1025 Each register can be one of:
1026 <Vt>.<T>[<index>]
1027 <Vt>.<T>
1028 All <T> should be identical.
1029 All <index> should be identical.
1030 There are restrictions on <Vt> numbers which are checked later
1031 (by reg_list_valid_p). */
1032
1033 static int
1034 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1035 {
1036 char *str = *ccp;
1037 int nb_regs;
1038 struct neon_type_el typeinfo, typeinfo_first;
1039 int val, val_range;
1040 int in_range;
1041 int ret_val;
1042 int i;
1043 bfd_boolean error = FALSE;
1044 bfd_boolean expect_index = FALSE;
1045
1046 if (*str != '{')
1047 {
1048 set_syntax_error (_("expecting {"));
1049 return PARSE_FAIL;
1050 }
1051 str++;
1052
1053 nb_regs = 0;
1054 typeinfo_first.defined = 0;
1055 typeinfo_first.type = NT_invtype;
1056 typeinfo_first.width = -1;
1057 typeinfo_first.index = 0;
1058 ret_val = 0;
1059 val = -1;
1060 val_range = -1;
1061 in_range = 0;
1062 do
1063 {
1064 if (in_range)
1065 {
1066 str++; /* skip over '-' */
1067 val_range = val;
1068 }
1069 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1070 /*in_reg_list= */ TRUE);
1071 if (val == PARSE_FAIL)
1072 {
1073 set_first_syntax_error (_("invalid vector register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077 /* reject [bhsd]n */
1078 if (typeinfo.defined == 0)
1079 {
1080 set_first_syntax_error (_("invalid scalar register in list"));
1081 error = TRUE;
1082 continue;
1083 }
1084
1085 if (typeinfo.defined & NTA_HASINDEX)
1086 expect_index = TRUE;
1087
1088 if (in_range)
1089 {
1090 if (val < val_range)
1091 {
1092 set_first_syntax_error
1093 (_("invalid range in vector register list"));
1094 error = TRUE;
1095 }
1096 val_range++;
1097 }
1098 else
1099 {
1100 val_range = val;
1101 if (nb_regs == 0)
1102 typeinfo_first = typeinfo;
1103 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1104 {
1105 set_first_syntax_error
1106 (_("type mismatch in vector register list"));
1107 error = TRUE;
1108 }
1109 }
1110 if (! error)
1111 for (i = val_range; i <= val; i++)
1112 {
1113 ret_val |= i << (5 * nb_regs);
1114 nb_regs++;
1115 }
1116 in_range = 0;
1117 }
1118 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1119
1120 skip_whitespace (str);
1121 if (*str != '}')
1122 {
1123 set_first_syntax_error (_("end of vector register list not found"));
1124 error = TRUE;
1125 }
1126 str++;
1127
1128 skip_whitespace (str);
1129
1130 if (expect_index)
1131 {
1132 if (skip_past_char (&str, '['))
1133 {
1134 expressionS exp;
1135
1136 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1137 if (exp.X_op != O_constant)
1138 {
1139 set_first_syntax_error (_("constant expression required."));
1140 error = TRUE;
1141 }
1142 if (! skip_past_char (&str, ']'))
1143 error = TRUE;
1144 else
1145 typeinfo_first.index = exp.X_add_number;
1146 }
1147 else
1148 {
1149 set_first_syntax_error (_("expected index"));
1150 error = TRUE;
1151 }
1152 }
1153
1154 if (nb_regs > 4)
1155 {
1156 set_first_syntax_error (_("too many registers in vector register list"));
1157 error = TRUE;
1158 }
1159 else if (nb_regs == 0)
1160 {
1161 set_first_syntax_error (_("empty vector register list"));
1162 error = TRUE;
1163 }
1164
1165 *ccp = str;
1166 if (! error)
1167 *vectype = typeinfo_first;
1168
1169 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1170 }
1171
1172 /* Directives: register aliases. */
1173
1174 static reg_entry *
1175 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1176 {
1177 reg_entry *new;
1178 const char *name;
1179
1180 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1181 {
1182 if (new->builtin)
1183 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1184 str);
1185
1186 /* Only warn about a redefinition if it's not defined as the
1187 same register. */
1188 else if (new->number != number || new->type != type)
1189 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1190
1191 return NULL;
1192 }
1193
1194 name = xstrdup (str);
1195 new = xmalloc (sizeof (reg_entry));
1196
1197 new->name = name;
1198 new->number = number;
1199 new->type = type;
1200 new->builtin = FALSE;
1201
1202 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1203 abort ();
1204
1205 return new;
1206 }
1207
1208 /* Look for the .req directive. This is of the form:
1209
1210 new_register_name .req existing_register_name
1211
1212 If we find one, or if it looks sufficiently like one that we want to
1213 handle any error here, return TRUE. Otherwise return FALSE. */
1214
1215 static bfd_boolean
1216 create_register_alias (char *newname, char *p)
1217 {
1218 const reg_entry *old;
1219 char *oldname, *nbuf;
1220 size_t nlen;
1221
1222 /* The input scrubber ensures that whitespace after the mnemonic is
1223 collapsed to single spaces. */
1224 oldname = p;
1225 if (strncmp (oldname, " .req ", 6) != 0)
1226 return FALSE;
1227
1228 oldname += 6;
1229 if (*oldname == '\0')
1230 return FALSE;
1231
1232 old = hash_find (aarch64_reg_hsh, oldname);
1233 if (!old)
1234 {
1235 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1236 return TRUE;
1237 }
1238
1239 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1240 the desired alias name, and p points to its end. If not, then
1241 the desired alias name is in the global original_case_string. */
1242 #ifdef TC_CASE_SENSITIVE
1243 nlen = p - newname;
1244 #else
1245 newname = original_case_string;
1246 nlen = strlen (newname);
1247 #endif
1248
1249 nbuf = alloca (nlen + 1);
1250 memcpy (nbuf, newname, nlen);
1251 nbuf[nlen] = '\0';
1252
1253 /* Create aliases under the new name as stated; an all-lowercase
1254 version of the new name; and an all-uppercase version of the new
1255 name. */
1256 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1257 {
1258 for (p = nbuf; *p; p++)
1259 *p = TOUPPER (*p);
1260
1261 if (strncmp (nbuf, newname, nlen))
1262 {
1263 /* If this attempt to create an additional alias fails, do not bother
1264 trying to create the all-lower case alias. We will fail and issue
1265 a second, duplicate error message. This situation arises when the
1266 programmer does something like:
1267 foo .req r0
1268 Foo .req r1
1269 The second .req creates the "Foo" alias but then fails to create
1270 the artificial FOO alias because it has already been created by the
1271 first .req. */
1272 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1273 return TRUE;
1274 }
1275
1276 for (p = nbuf; *p; p++)
1277 *p = TOLOWER (*p);
1278
1279 if (strncmp (nbuf, newname, nlen))
1280 insert_reg_alias (nbuf, old->number, old->type);
1281 }
1282
1283 return TRUE;
1284 }
1285
1286 /* Should never be called, as .req goes between the alias and the
1287 register name, not at the beginning of the line. */
1288 static void
1289 s_req (int a ATTRIBUTE_UNUSED)
1290 {
1291 as_bad (_("invalid syntax for .req directive"));
1292 }
1293
1294 /* The .unreq directive deletes an alias which was previously defined
1295 by .req. For example:
1296
1297 my_alias .req r11
1298 .unreq my_alias */
1299
1300 static void
1301 s_unreq (int a ATTRIBUTE_UNUSED)
1302 {
1303 char *name;
1304 char saved_char;
1305
1306 name = input_line_pointer;
1307
1308 while (*input_line_pointer != 0
1309 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1310 ++input_line_pointer;
1311
1312 saved_char = *input_line_pointer;
1313 *input_line_pointer = 0;
1314
1315 if (!*name)
1316 as_bad (_("invalid syntax for .unreq directive"));
1317 else
1318 {
1319 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1320
1321 if (!reg)
1322 as_bad (_("unknown register alias '%s'"), name);
1323 else if (reg->builtin)
1324 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1325 name);
1326 else
1327 {
1328 char *p;
1329 char *nbuf;
1330
1331 hash_delete (aarch64_reg_hsh, name, FALSE);
1332 free ((char *) reg->name);
1333 free (reg);
1334
1335 /* Also locate the all upper case and all lower case versions.
1336 Do not complain if we cannot find one or the other as it
1337 was probably deleted above. */
1338
1339 nbuf = strdup (name);
1340 for (p = nbuf; *p; p++)
1341 *p = TOUPPER (*p);
1342 reg = hash_find (aarch64_reg_hsh, nbuf);
1343 if (reg)
1344 {
1345 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1346 free ((char *) reg->name);
1347 free (reg);
1348 }
1349
1350 for (p = nbuf; *p; p++)
1351 *p = TOLOWER (*p);
1352 reg = hash_find (aarch64_reg_hsh, nbuf);
1353 if (reg)
1354 {
1355 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1356 free ((char *) reg->name);
1357 free (reg);
1358 }
1359
1360 free (nbuf);
1361 }
1362 }
1363
1364 *input_line_pointer = saved_char;
1365 demand_empty_rest_of_line ();
1366 }
1367
1368 /* Directives: Instruction set selection. */
1369
1370 #ifdef OBJ_ELF
1371 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1372 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1373 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1374 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1375
1376 /* Create a new mapping symbol for the transition to STATE. */
1377
1378 static void
1379 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1380 {
1381 symbolS *symbolP;
1382 const char *symname;
1383 int type;
1384
1385 switch (state)
1386 {
1387 case MAP_DATA:
1388 symname = "$d";
1389 type = BSF_NO_FLAGS;
1390 break;
1391 case MAP_INSN:
1392 symname = "$x";
1393 type = BSF_NO_FLAGS;
1394 break;
1395 default:
1396 abort ();
1397 }
1398
1399 symbolP = symbol_new (symname, now_seg, value, frag);
1400 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1401
1402 /* Save the mapping symbols for future reference. Also check that
1403 we do not place two mapping symbols at the same offset within a
1404 frag. We'll handle overlap between frags in
1405 check_mapping_symbols.
1406
1407 If .fill or other data filling directive generates zero sized data,
1408 the mapping symbol for the following code will have the same value
1409 as the one generated for the data filling directive. In this case,
1410 we replace the old symbol with the new one at the same address. */
1411 if (value == 0)
1412 {
1413 if (frag->tc_frag_data.first_map != NULL)
1414 {
1415 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1416 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1417 &symbol_lastP);
1418 }
1419 frag->tc_frag_data.first_map = symbolP;
1420 }
1421 if (frag->tc_frag_data.last_map != NULL)
1422 {
1423 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1424 S_GET_VALUE (symbolP));
1425 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1426 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1427 &symbol_lastP);
1428 }
1429 frag->tc_frag_data.last_map = symbolP;
1430 }
1431
1432 /* We must sometimes convert a region marked as code to data during
1433 code alignment, if an odd number of bytes have to be padded. The
1434 code mapping symbol is pushed to an aligned address. */
1435
1436 static void
1437 insert_data_mapping_symbol (enum mstate state,
1438 valueT value, fragS * frag, offsetT bytes)
1439 {
1440 /* If there was already a mapping symbol, remove it. */
1441 if (frag->tc_frag_data.last_map != NULL
1442 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1443 frag->fr_address + value)
1444 {
1445 symbolS *symp = frag->tc_frag_data.last_map;
1446
1447 if (value == 0)
1448 {
1449 know (frag->tc_frag_data.first_map == symp);
1450 frag->tc_frag_data.first_map = NULL;
1451 }
1452 frag->tc_frag_data.last_map = NULL;
1453 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1454 }
1455
1456 make_mapping_symbol (MAP_DATA, value, frag);
1457 make_mapping_symbol (state, value + bytes, frag);
1458 }
1459
1460 static void mapping_state_2 (enum mstate state, int max_chars);
1461
1462 /* Set the mapping state to STATE. Only call this when about to
1463 emit some STATE bytes to the file. */
1464
1465 void
1466 mapping_state (enum mstate state)
1467 {
1468 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1469
1470 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1471
1472 if (mapstate == state)
1473 /* The mapping symbol has already been emitted.
1474 There is nothing else to do. */
1475 return;
1476 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1477 /* This case will be evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492
1493 mapping_state_2 (state, 0);
1494 #undef TRANSITION
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 if ((pool->literals[entry].X_op == exp->X_op)
1621 && (exp->X_op == O_constant)
1622 && (pool->literals[entry].X_add_number == exp->X_add_number)
1623 && (pool->literals[entry].X_unsigned == exp->X_unsigned))
1624 break;
1625
1626 if ((pool->literals[entry].X_op == exp->X_op)
1627 && (exp->X_op == O_symbol)
1628 && (pool->literals[entry].X_add_number == exp->X_add_number)
1629 && (pool->literals[entry].X_add_symbol == exp->X_add_symbol)
1630 && (pool->literals[entry].X_op_symbol == exp->X_op_symbol))
1631 break;
1632 }
1633
1634 /* Do we need to create a new entry? */
1635 if (entry == pool->next_free_entry)
1636 {
1637 if (entry >= MAX_LITERAL_POOL_SIZE)
1638 {
1639 set_syntax_error (_("literal pool overflow"));
1640 return FALSE;
1641 }
1642
1643 pool->literals[entry] = *exp;
1644 pool->next_free_entry += 1;
1645 }
1646
1647 exp->X_op = O_symbol;
1648 exp->X_add_number = ((int) entry) * size;
1649 exp->X_add_symbol = pool->symbol;
1650
1651 return TRUE;
1652 }
1653
1654 /* Can't use symbol_new here, so have to create a symbol and then at
1655 a later date assign it a value. Thats what these functions do. */
1656
1657 static void
1658 symbol_locate (symbolS * symbolP,
1659 const char *name,/* It is copied, the caller can modify. */
1660 segT segment, /* Segment identifier (SEG_<something>). */
1661 valueT valu, /* Symbol value. */
1662 fragS * frag) /* Associated fragment. */
1663 {
1664 unsigned int name_length;
1665 char *preserved_copy_of_name;
1666
1667 name_length = strlen (name) + 1; /* +1 for \0. */
1668 obstack_grow (&notes, name, name_length);
1669 preserved_copy_of_name = obstack_finish (&notes);
1670
1671 #ifdef tc_canonicalize_symbol_name
1672 preserved_copy_of_name =
1673 tc_canonicalize_symbol_name (preserved_copy_of_name);
1674 #endif
1675
1676 S_SET_NAME (symbolP, preserved_copy_of_name);
1677
1678 S_SET_SEGMENT (symbolP, segment);
1679 S_SET_VALUE (symbolP, valu);
1680 symbol_clear_list_pointers (symbolP);
1681
1682 symbol_set_frag (symbolP, frag);
1683
1684 /* Link to end of symbol chain. */
1685 {
1686 extern int symbol_table_frozen;
1687
1688 if (symbol_table_frozen)
1689 abort ();
1690 }
1691
1692 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1693
1694 obj_symbol_new_hook (symbolP);
1695
1696 #ifdef tc_symbol_new_hook
1697 tc_symbol_new_hook (symbolP);
1698 #endif
1699
1700 #ifdef DEBUG_SYMS
1701 verify_symbol_chain (symbol_rootP, symbol_lastP);
1702 #endif /* DEBUG_SYMS */
1703 }
1704
1705
1706 static void
1707 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1708 {
1709 unsigned int entry;
1710 literal_pool *pool;
1711 char sym_name[20];
1712 int align;
1713
1714 for (align = 2; align <= 4; align++)
1715 {
1716 int size = 1 << align;
1717
1718 pool = find_literal_pool (size);
1719 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1720 continue;
1721
1722 mapping_state (MAP_DATA);
1723
1724 /* Align pool as you have word accesses.
1725 Only make a frag if we have to. */
1726 if (!need_pass_2)
1727 frag_align (align, 0, 0);
1728
1729 record_alignment (now_seg, align);
1730
1731 sprintf (sym_name, "$$lit_\002%x", pool->id);
1732
1733 symbol_locate (pool->symbol, sym_name, now_seg,
1734 (valueT) frag_now_fix (), frag_now);
1735 symbol_table_insert (pool->symbol);
1736
1737 for (entry = 0; entry < pool->next_free_entry; entry++)
1738 /* First output the expression in the instruction to the pool. */
1739 emit_expr (&(pool->literals[entry]), size); /* .word|.xword */
1740
1741 /* Mark the pool as empty. */
1742 pool->next_free_entry = 0;
1743 pool->symbol = NULL;
1744 }
1745 }
1746
1747 #ifdef OBJ_ELF
1748 /* Forward declarations for functions below, in the MD interface
1749 section. */
1750 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1751 static struct reloc_table_entry * find_reloc_table_entry (char **);
1752
1753 /* Directives: Data. */
1754 /* N.B. the support for relocation suffix in this directive needs to be
1755 implemented properly. */
1756
1757 static void
1758 s_aarch64_elf_cons (int nbytes)
1759 {
1760 expressionS exp;
1761
1762 #ifdef md_flush_pending_output
1763 md_flush_pending_output ();
1764 #endif
1765
1766 if (is_it_end_of_statement ())
1767 {
1768 demand_empty_rest_of_line ();
1769 return;
1770 }
1771
1772 #ifdef md_cons_align
1773 md_cons_align (nbytes);
1774 #endif
1775
1776 mapping_state (MAP_DATA);
1777 do
1778 {
1779 struct reloc_table_entry *reloc;
1780
1781 expression (&exp);
1782
1783 if (exp.X_op != O_symbol)
1784 emit_expr (&exp, (unsigned int) nbytes);
1785 else
1786 {
1787 skip_past_char (&input_line_pointer, '#');
1788 if (skip_past_char (&input_line_pointer, ':'))
1789 {
1790 reloc = find_reloc_table_entry (&input_line_pointer);
1791 if (reloc == NULL)
1792 as_bad (_("unrecognized relocation suffix"));
1793 else
1794 as_bad (_("unimplemented relocation suffix"));
1795 ignore_rest_of_line ();
1796 return;
1797 }
1798 else
1799 emit_expr (&exp, (unsigned int) nbytes);
1800 }
1801 }
1802 while (*input_line_pointer++ == ',');
1803
1804 /* Put terminator back into stream. */
1805 input_line_pointer--;
1806 demand_empty_rest_of_line ();
1807 }
1808
1809 #endif /* OBJ_ELF */
1810
1811 /* Output a 32-bit word, but mark as an instruction. */
1812
1813 static void
1814 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1815 {
1816 expressionS exp;
1817
1818 #ifdef md_flush_pending_output
1819 md_flush_pending_output ();
1820 #endif
1821
1822 if (is_it_end_of_statement ())
1823 {
1824 demand_empty_rest_of_line ();
1825 return;
1826 }
1827
1828 if (!need_pass_2)
1829 frag_align_code (2, 0);
1830 #ifdef OBJ_ELF
1831 mapping_state (MAP_INSN);
1832 #endif
1833
1834 do
1835 {
1836 expression (&exp);
1837 if (exp.X_op != O_constant)
1838 {
1839 as_bad (_("constant expression required"));
1840 ignore_rest_of_line ();
1841 return;
1842 }
1843
1844 if (target_big_endian)
1845 {
1846 unsigned int val = exp.X_add_number;
1847 exp.X_add_number = SWAP_32 (val);
1848 }
1849 emit_expr (&exp, 4);
1850 }
1851 while (*input_line_pointer++ == ',');
1852
1853 /* Put terminator back into stream. */
1854 input_line_pointer--;
1855 demand_empty_rest_of_line ();
1856 }
1857
1858 #ifdef OBJ_ELF
1859 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1860
1861 static void
1862 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1863 {
1864 expressionS exp;
1865
1866 /* Since we're just labelling the code, there's no need to define a
1867 mapping symbol. */
1868 expression (&exp);
1869 /* Make sure there is enough room in this frag for the following
1870 blr. This trick only works if the blr follows immediately after
1871 the .tlsdesc directive. */
1872 frag_grow (4);
1873 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1874 BFD_RELOC_AARCH64_TLSDESC_CALL);
1875
1876 demand_empty_rest_of_line ();
1877 }
1878 #endif /* OBJ_ELF */
1879
1880 static void s_aarch64_arch (int);
1881 static void s_aarch64_cpu (int);
1882
1883 /* This table describes all the machine specific pseudo-ops the assembler
1884 has to support. The fields are:
1885 pseudo-op name without dot
1886 function to call to execute this pseudo-op
1887 Integer arg to pass to the function. */
1888
1889 const pseudo_typeS md_pseudo_table[] = {
1890 /* Never called because '.req' does not start a line. */
1891 {"req", s_req, 0},
1892 {"unreq", s_unreq, 0},
1893 {"bss", s_bss, 0},
1894 {"even", s_even, 0},
1895 {"ltorg", s_ltorg, 0},
1896 {"pool", s_ltorg, 0},
1897 {"cpu", s_aarch64_cpu, 0},
1898 {"arch", s_aarch64_arch, 0},
1899 {"inst", s_aarch64_inst, 0},
1900 #ifdef OBJ_ELF
1901 {"tlsdesccall", s_tlsdesccall, 0},
1902 {"word", s_aarch64_elf_cons, 4},
1903 {"long", s_aarch64_elf_cons, 4},
1904 {"xword", s_aarch64_elf_cons, 8},
1905 {"dword", s_aarch64_elf_cons, 8},
1906 #endif
1907 {0, 0, 0}
1908 };
1909 \f
1910
1911 /* Check whether STR points to a register name followed by a comma or the
1912 end of line; REG_TYPE indicates which register types are checked
1913 against. Return TRUE if STR is such a register name; otherwise return
1914 FALSE. The function does not intend to produce any diagnostics, but since
1915 the register parser aarch64_reg_parse, which is called by this function,
1916 does produce diagnostics, we call clear_error to clear any diagnostics
1917 that may be generated by aarch64_reg_parse.
1918 Also, the function returns FALSE directly if there is any user error
1919 present at the function entry. This prevents the existing diagnostics
1920 state from being spoiled.
1921 The function currently serves parse_constant_immediate and
1922 parse_big_immediate only. */
1923 static bfd_boolean
1924 reg_name_p (char *str, aarch64_reg_type reg_type)
1925 {
1926 int reg;
1927
1928 /* Prevent the diagnostics state from being spoiled. */
1929 if (error_p ())
1930 return FALSE;
1931
1932 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1933
1934 /* Clear the parsing error that may be set by the reg parser. */
1935 clear_error ();
1936
1937 if (reg == PARSE_FAIL)
1938 return FALSE;
1939
1940 skip_whitespace (str);
1941 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1942 return TRUE;
1943
1944 return FALSE;
1945 }
1946
1947 /* Parser functions used exclusively in instruction operands. */
1948
1949 /* Parse an immediate expression which may not be constant.
1950
1951 To prevent the expression parser from pushing a register name
1952 into the symbol table as an undefined symbol, firstly a check is
1953 done to find out whether STR is a valid register name followed
1954 by a comma or the end of line. Return FALSE if STR is such a
1955 string. */
1956
1957 static bfd_boolean
1958 parse_immediate_expression (char **str, expressionS *exp)
1959 {
1960 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1961 {
1962 set_recoverable_error (_("immediate operand required"));
1963 return FALSE;
1964 }
1965
1966 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
1967
1968 if (exp->X_op == O_absent)
1969 {
1970 set_fatal_syntax_error (_("missing immediate expression"));
1971 return FALSE;
1972 }
1973
1974 return TRUE;
1975 }
1976
1977 /* Constant immediate-value read function for use in insn parsing.
1978 STR points to the beginning of the immediate (with the optional
1979 leading #); *VAL receives the value.
1980
1981 Return TRUE on success; otherwise return FALSE. */
1982
1983 static bfd_boolean
1984 parse_constant_immediate (char **str, int64_t * val)
1985 {
1986 expressionS exp;
1987
1988 if (! parse_immediate_expression (str, &exp))
1989 return FALSE;
1990
1991 if (exp.X_op != O_constant)
1992 {
1993 set_syntax_error (_("constant expression required"));
1994 return FALSE;
1995 }
1996
1997 *val = exp.X_add_number;
1998 return TRUE;
1999 }
2000
2001 static uint32_t
2002 encode_imm_float_bits (uint32_t imm)
2003 {
2004 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2005 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2006 }
2007
2008 /* Return TRUE if the single-precision floating-point value encoded in IMM
2009 can be expressed in the AArch64 8-bit signed floating-point format with
2010 3-bit exponent and normalized 4 bits of precision; in other words, the
2011 floating-point value must be expressable as
2012 (+/-) n / 16 * power (2, r)
2013 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2014
2015 static bfd_boolean
2016 aarch64_imm_float_p (uint32_t imm)
2017 {
2018 /* If a single-precision floating-point value has the following bit
2019 pattern, it can be expressed in the AArch64 8-bit floating-point
2020 format:
2021
2022 3 32222222 2221111111111
2023 1 09876543 21098765432109876543210
2024 n Eeeeeexx xxxx0000000000000000000
2025
2026 where n, e and each x are either 0 or 1 independently, with
2027 E == ~ e. */
2028
2029 uint32_t pattern;
2030
2031 /* Prepare the pattern for 'Eeeeee'. */
2032 if (((imm >> 30) & 0x1) == 0)
2033 pattern = 0x3e000000;
2034 else
2035 pattern = 0x40000000;
2036
2037 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2038 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2039 }
2040
2041 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2042
2043 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2044 8-bit signed floating-point format with 3-bit exponent and normalized 4
2045 bits of precision (i.e. can be used in an FMOV instruction); return the
2046 equivalent single-precision encoding in *FPWORD.
2047
2048 Otherwise return FALSE. */
2049
2050 static bfd_boolean
2051 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2052 {
2053 /* If a double-precision floating-point value has the following bit
2054 pattern, it can be expressed in the AArch64 8-bit floating-point
2055 format:
2056
2057 6 66655555555 554444444...21111111111
2058 3 21098765432 109876543...098765432109876543210
2059 n Eeeeeeeeexx xxxx00000...000000000000000000000
2060
2061 where n, e and each x are either 0 or 1 independently, with
2062 E == ~ e. */
2063
2064 uint32_t pattern;
2065 uint32_t high32 = imm >> 32;
2066
2067 /* Lower 32 bits need to be 0s. */
2068 if ((imm & 0xffffffff) != 0)
2069 return FALSE;
2070
2071 /* Prepare the pattern for 'Eeeeeeeee'. */
2072 if (((high32 >> 30) & 0x1) == 0)
2073 pattern = 0x3fc00000;
2074 else
2075 pattern = 0x40000000;
2076
2077 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2078 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2079 {
2080 /* Convert to the single-precision encoding.
2081 i.e. convert
2082 n Eeeeeeeeexx xxxx00000...000000000000000000000
2083 to
2084 n Eeeeeexx xxxx0000000000000000000. */
2085 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2086 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2087 return TRUE;
2088 }
2089 else
2090 return FALSE;
2091 }
2092
2093 /* Parse a floating-point immediate. Return TRUE on success and return the
2094 value in *IMMED in the format of IEEE754 single-precision encoding.
2095 *CCP points to the start of the string; DP_P is TRUE when the immediate
2096 is expected to be in double-precision (N.B. this only matters when
2097 hexadecimal representation is involved).
2098
2099 N.B. 0.0 is accepted by this function. */
2100
2101 static bfd_boolean
2102 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2103 {
2104 char *str = *ccp;
2105 char *fpnum;
2106 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2107 int found_fpchar = 0;
2108 int64_t val = 0;
2109 unsigned fpword = 0;
2110 bfd_boolean hex_p = FALSE;
2111
2112 skip_past_char (&str, '#');
2113
2114 fpnum = str;
2115 skip_whitespace (fpnum);
2116
2117 if (strncmp (fpnum, "0x", 2) == 0)
2118 {
2119 /* Support the hexadecimal representation of the IEEE754 encoding.
2120 Double-precision is expected when DP_P is TRUE, otherwise the
2121 representation should be in single-precision. */
2122 if (! parse_constant_immediate (&str, &val))
2123 goto invalid_fp;
2124
2125 if (dp_p)
2126 {
2127 if (! aarch64_double_precision_fmovable (val, &fpword))
2128 goto invalid_fp;
2129 }
2130 else if ((uint64_t) val > 0xffffffff)
2131 goto invalid_fp;
2132 else
2133 fpword = val;
2134
2135 hex_p = TRUE;
2136 }
2137 else
2138 {
2139 /* We must not accidentally parse an integer as a floating-point number.
2140 Make sure that the value we parse is not an integer by checking for
2141 special characters '.' or 'e'. */
2142 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2143 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2144 {
2145 found_fpchar = 1;
2146 break;
2147 }
2148
2149 if (!found_fpchar)
2150 return FALSE;
2151 }
2152
2153 if (! hex_p)
2154 {
2155 int i;
2156
2157 if ((str = atof_ieee (str, 's', words)) == NULL)
2158 goto invalid_fp;
2159
2160 /* Our FP word must be 32 bits (single-precision FP). */
2161 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2162 {
2163 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2164 fpword |= words[i];
2165 }
2166 }
2167
2168 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2169 {
2170 *immed = fpword;
2171 *ccp = str;
2172 return TRUE;
2173 }
2174
2175 invalid_fp:
2176 set_fatal_syntax_error (_("invalid floating-point constant"));
2177 return FALSE;
2178 }
2179
2180 /* Less-generic immediate-value read function with the possibility of loading
2181 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2182 instructions.
2183
2184 To prevent the expression parser from pushing a register name into the
2185 symbol table as an undefined symbol, a check is firstly done to find
2186 out whether STR is a valid register name followed by a comma or the end
2187 of line. Return FALSE if STR is such a register. */
2188
2189 static bfd_boolean
2190 parse_big_immediate (char **str, int64_t *imm)
2191 {
2192 char *ptr = *str;
2193
2194 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2195 {
2196 set_syntax_error (_("immediate operand required"));
2197 return FALSE;
2198 }
2199
2200 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2201
2202 if (inst.reloc.exp.X_op == O_constant)
2203 *imm = inst.reloc.exp.X_add_number;
2204
2205 *str = ptr;
2206
2207 return TRUE;
2208 }
2209
2210 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2211 if NEED_LIBOPCODES is non-zero, the fixup will need
2212 assistance from the libopcodes. */
2213
2214 static inline void
2215 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2216 const aarch64_opnd_info *operand,
2217 int need_libopcodes_p)
2218 {
2219 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2220 reloc->opnd = operand->type;
2221 if (need_libopcodes_p)
2222 reloc->need_libopcodes_p = 1;
2223 };
2224
2225 /* Return TRUE if the instruction needs to be fixed up later internally by
2226 the GAS; otherwise return FALSE. */
2227
2228 static inline bfd_boolean
2229 aarch64_gas_internal_fixup_p (void)
2230 {
2231 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2232 }
2233
2234 /* Assign the immediate value to the relavant field in *OPERAND if
2235 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2236 needs an internal fixup in a later stage.
2237 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2238 IMM.VALUE that may get assigned with the constant. */
2239 static inline void
2240 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2241 aarch64_opnd_info *operand,
2242 int addr_off_p,
2243 int need_libopcodes_p,
2244 int skip_p)
2245 {
2246 if (reloc->exp.X_op == O_constant)
2247 {
2248 if (addr_off_p)
2249 operand->addr.offset.imm = reloc->exp.X_add_number;
2250 else
2251 operand->imm.value = reloc->exp.X_add_number;
2252 reloc->type = BFD_RELOC_UNUSED;
2253 }
2254 else
2255 {
2256 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2257 /* Tell libopcodes to ignore this operand or not. This is helpful
2258 when one of the operands needs to be fixed up later but we need
2259 libopcodes to check the other operands. */
2260 operand->skip = skip_p;
2261 }
2262 }
2263
2264 /* Relocation modifiers. Each entry in the table contains the textual
2265 name for the relocation which may be placed before a symbol used as
2266 a load/store offset, or add immediate. It must be surrounded by a
2267 leading and trailing colon, for example:
2268
2269 ldr x0, [x1, #:rello:varsym]
2270 add x0, x1, #:rello:varsym */
2271
2272 struct reloc_table_entry
2273 {
2274 const char *name;
2275 int pc_rel;
2276 bfd_reloc_code_real_type adrp_type;
2277 bfd_reloc_code_real_type movw_type;
2278 bfd_reloc_code_real_type add_type;
2279 bfd_reloc_code_real_type ldst_type;
2280 };
2281
2282 static struct reloc_table_entry reloc_table[] = {
2283 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2284 {"lo12", 0,
2285 0,
2286 0,
2287 BFD_RELOC_AARCH64_ADD_LO12,
2288 BFD_RELOC_AARCH64_LDST_LO12},
2289
2290 /* Higher 21 bits of pc-relative page offset: ADRP */
2291 {"pg_hi21", 1,
2292 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2293 0,
2294 0,
2295 0},
2296
2297 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2298 {"pg_hi21_nc", 1,
2299 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2300 0,
2301 0,
2302 0},
2303
2304 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2305 {"abs_g0", 0,
2306 0,
2307 BFD_RELOC_AARCH64_MOVW_G0,
2308 0,
2309 0},
2310
2311 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2312 {"abs_g0_s", 0,
2313 0,
2314 BFD_RELOC_AARCH64_MOVW_G0_S,
2315 0,
2316 0},
2317
2318 /* Less significant bits 0-15 of address/value: MOVK, no check */
2319 {"abs_g0_nc", 0,
2320 0,
2321 BFD_RELOC_AARCH64_MOVW_G0_NC,
2322 0,
2323 0},
2324
2325 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2326 {"abs_g1", 0,
2327 0,
2328 BFD_RELOC_AARCH64_MOVW_G1,
2329 0,
2330 0},
2331
2332 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2333 {"abs_g1_s", 0,
2334 0,
2335 BFD_RELOC_AARCH64_MOVW_G1_S,
2336 0,
2337 0},
2338
2339 /* Less significant bits 16-31 of address/value: MOVK, no check */
2340 {"abs_g1_nc", 0,
2341 0,
2342 BFD_RELOC_AARCH64_MOVW_G1_NC,
2343 0,
2344 0},
2345
2346 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2347 {"abs_g2", 0,
2348 0,
2349 BFD_RELOC_AARCH64_MOVW_G2,
2350 0,
2351 0},
2352
2353 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2354 {"abs_g2_s", 0,
2355 0,
2356 BFD_RELOC_AARCH64_MOVW_G2_S,
2357 0,
2358 0},
2359
2360 /* Less significant bits 32-47 of address/value: MOVK, no check */
2361 {"abs_g2_nc", 0,
2362 0,
2363 BFD_RELOC_AARCH64_MOVW_G2_NC,
2364 0,
2365 0},
2366
2367 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2368 {"abs_g3", 0,
2369 0,
2370 BFD_RELOC_AARCH64_MOVW_G3,
2371 0,
2372 0},
2373
2374 /* Get to the page containing GOT entry for a symbol. */
2375 {"got", 1,
2376 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2377 0,
2378 0,
2379 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2380
2381 /* 12 bit offset into the page containing GOT entry for that symbol. */
2382 {"got_lo12", 0,
2383 0,
2384 0,
2385 0,
2386 BFD_RELOC_AARCH64_LD_GOT_LO12_NC},
2387
2388 /* Get to the page containing GOT TLS entry for a symbol */
2389 {"tlsgd", 0,
2390 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2391 0,
2392 0,
2393 0},
2394
2395 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2396 {"tlsgd_lo12", 0,
2397 0,
2398 0,
2399 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2400 0},
2401
2402 /* Get to the page containing GOT TLS entry for a symbol */
2403 {"tlsdesc", 0,
2404 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2405 0,
2406 0,
2407 0},
2408
2409 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2410 {"tlsdesc_lo12", 0,
2411 0,
2412 0,
2413 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2414 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC},
2415
2416 /* Get to the page containing GOT TLS entry for a symbol */
2417 {"gottprel", 0,
2418 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2419 0,
2420 0,
2421 0},
2422
2423 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2424 {"gottprel_lo12", 0,
2425 0,
2426 0,
2427 0,
2428 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC},
2429
2430 /* Get tp offset for a symbol. */
2431 {"tprel", 0,
2432 0,
2433 0,
2434 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2435 0},
2436
2437 /* Get tp offset for a symbol. */
2438 {"tprel_lo12", 0,
2439 0,
2440 0,
2441 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2442 0},
2443
2444 /* Get tp offset for a symbol. */
2445 {"tprel_hi12", 0,
2446 0,
2447 0,
2448 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2449 0},
2450
2451 /* Get tp offset for a symbol. */
2452 {"tprel_lo12_nc", 0,
2453 0,
2454 0,
2455 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2456 0},
2457
2458 /* Most significant bits 32-47 of address/value: MOVZ. */
2459 {"tprel_g2", 0,
2460 0,
2461 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2462 0,
2463 0},
2464
2465 /* Most significant bits 16-31 of address/value: MOVZ. */
2466 {"tprel_g1", 0,
2467 0,
2468 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2469 0,
2470 0},
2471
2472 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2473 {"tprel_g1_nc", 0,
2474 0,
2475 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2476 0,
2477 0},
2478
2479 /* Most significant bits 0-15 of address/value: MOVZ. */
2480 {"tprel_g0", 0,
2481 0,
2482 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2483 0,
2484 0},
2485
2486 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2487 {"tprel_g0_nc", 0,
2488 0,
2489 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2490 0,
2491 0},
2492 };
2493
2494 /* Given the address of a pointer pointing to the textual name of a
2495 relocation as may appear in assembler source, attempt to find its
2496 details in reloc_table. The pointer will be updated to the character
2497 after the trailing colon. On failure, NULL will be returned;
2498 otherwise return the reloc_table_entry. */
2499
2500 static struct reloc_table_entry *
2501 find_reloc_table_entry (char **str)
2502 {
2503 unsigned int i;
2504 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2505 {
2506 int length = strlen (reloc_table[i].name);
2507
2508 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2509 && (*str)[length] == ':')
2510 {
2511 *str += (length + 1);
2512 return &reloc_table[i];
2513 }
2514 }
2515
2516 return NULL;
2517 }
2518
2519 /* Mode argument to parse_shift and parser_shifter_operand. */
2520 enum parse_shift_mode
2521 {
2522 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2523 "#imm{,lsl #n}" */
2524 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2525 "#imm" */
2526 SHIFTED_LSL, /* bare "lsl #n" */
2527 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2528 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2529 };
2530
2531 /* Parse a <shift> operator on an AArch64 data processing instruction.
2532 Return TRUE on success; otherwise return FALSE. */
2533 static bfd_boolean
2534 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2535 {
2536 const struct aarch64_name_value_pair *shift_op;
2537 enum aarch64_modifier_kind kind;
2538 expressionS exp;
2539 int exp_has_prefix;
2540 char *s = *str;
2541 char *p = s;
2542
2543 for (p = *str; ISALPHA (*p); p++)
2544 ;
2545
2546 if (p == *str)
2547 {
2548 set_syntax_error (_("shift expression expected"));
2549 return FALSE;
2550 }
2551
2552 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2553
2554 if (shift_op == NULL)
2555 {
2556 set_syntax_error (_("shift operator expected"));
2557 return FALSE;
2558 }
2559
2560 kind = aarch64_get_operand_modifier (shift_op);
2561
2562 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2563 {
2564 set_syntax_error (_("invalid use of 'MSL'"));
2565 return FALSE;
2566 }
2567
2568 switch (mode)
2569 {
2570 case SHIFTED_LOGIC_IMM:
2571 if (aarch64_extend_operator_p (kind) == TRUE)
2572 {
2573 set_syntax_error (_("extending shift is not permitted"));
2574 return FALSE;
2575 }
2576 break;
2577
2578 case SHIFTED_ARITH_IMM:
2579 if (kind == AARCH64_MOD_ROR)
2580 {
2581 set_syntax_error (_("'ROR' shift is not permitted"));
2582 return FALSE;
2583 }
2584 break;
2585
2586 case SHIFTED_LSL:
2587 if (kind != AARCH64_MOD_LSL)
2588 {
2589 set_syntax_error (_("only 'LSL' shift is permitted"));
2590 return FALSE;
2591 }
2592 break;
2593
2594 case SHIFTED_REG_OFFSET:
2595 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2596 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2597 {
2598 set_fatal_syntax_error
2599 (_("invalid shift for the register offset addressing mode"));
2600 return FALSE;
2601 }
2602 break;
2603
2604 case SHIFTED_LSL_MSL:
2605 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2606 {
2607 set_syntax_error (_("invalid shift operator"));
2608 return FALSE;
2609 }
2610 break;
2611
2612 default:
2613 abort ();
2614 }
2615
2616 /* Whitespace can appear here if the next thing is a bare digit. */
2617 skip_whitespace (p);
2618
2619 /* Parse shift amount. */
2620 exp_has_prefix = 0;
2621 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2622 exp.X_op = O_absent;
2623 else
2624 {
2625 if (is_immediate_prefix (*p))
2626 {
2627 p++;
2628 exp_has_prefix = 1;
2629 }
2630 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2631 }
2632 if (exp.X_op == O_absent)
2633 {
2634 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2635 {
2636 set_syntax_error (_("missing shift amount"));
2637 return FALSE;
2638 }
2639 operand->shifter.amount = 0;
2640 }
2641 else if (exp.X_op != O_constant)
2642 {
2643 set_syntax_error (_("constant shift amount required"));
2644 return FALSE;
2645 }
2646 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2647 {
2648 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2649 return FALSE;
2650 }
2651 else
2652 {
2653 operand->shifter.amount = exp.X_add_number;
2654 operand->shifter.amount_present = 1;
2655 }
2656
2657 operand->shifter.operator_present = 1;
2658 operand->shifter.kind = kind;
2659
2660 *str = p;
2661 return TRUE;
2662 }
2663
2664 /* Parse a <shifter_operand> for a data processing instruction:
2665
2666 #<immediate>
2667 #<immediate>, LSL #imm
2668
2669 Validation of immediate operands is deferred to md_apply_fix.
2670
2671 Return TRUE on success; otherwise return FALSE. */
2672
2673 static bfd_boolean
2674 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2675 enum parse_shift_mode mode)
2676 {
2677 char *p;
2678
2679 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2680 return FALSE;
2681
2682 p = *str;
2683
2684 /* Accept an immediate expression. */
2685 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2686 return FALSE;
2687
2688 /* Accept optional LSL for arithmetic immediate values. */
2689 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2690 if (! parse_shift (&p, operand, SHIFTED_LSL))
2691 return FALSE;
2692
2693 /* Not accept any shifter for logical immediate values. */
2694 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2695 && parse_shift (&p, operand, mode))
2696 {
2697 set_syntax_error (_("unexpected shift operator"));
2698 return FALSE;
2699 }
2700
2701 *str = p;
2702 return TRUE;
2703 }
2704
2705 /* Parse a <shifter_operand> for a data processing instruction:
2706
2707 <Rm>
2708 <Rm>, <shift>
2709 #<immediate>
2710 #<immediate>, LSL #imm
2711
2712 where <shift> is handled by parse_shift above, and the last two
2713 cases are handled by the function above.
2714
2715 Validation of immediate operands is deferred to md_apply_fix.
2716
2717 Return TRUE on success; otherwise return FALSE. */
2718
2719 static bfd_boolean
2720 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2721 enum parse_shift_mode mode)
2722 {
2723 int reg;
2724 int isreg32, isregzero;
2725 enum aarch64_operand_class opd_class
2726 = aarch64_get_operand_class (operand->type);
2727
2728 if ((reg =
2729 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2730 {
2731 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2732 {
2733 set_syntax_error (_("unexpected register in the immediate operand"));
2734 return FALSE;
2735 }
2736
2737 if (!isregzero && reg == REG_SP)
2738 {
2739 set_syntax_error (BAD_SP);
2740 return FALSE;
2741 }
2742
2743 operand->reg.regno = reg;
2744 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2745
2746 /* Accept optional shift operation on register. */
2747 if (! skip_past_comma (str))
2748 return TRUE;
2749
2750 if (! parse_shift (str, operand, mode))
2751 return FALSE;
2752
2753 return TRUE;
2754 }
2755 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2756 {
2757 set_syntax_error
2758 (_("integer register expected in the extended/shifted operand "
2759 "register"));
2760 return FALSE;
2761 }
2762
2763 /* We have a shifted immediate variable. */
2764 return parse_shifter_operand_imm (str, operand, mode);
2765 }
2766
2767 /* Return TRUE on success; return FALSE otherwise. */
2768
2769 static bfd_boolean
2770 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2771 enum parse_shift_mode mode)
2772 {
2773 char *p = *str;
2774
2775 /* Determine if we have the sequence of characters #: or just :
2776 coming next. If we do, then we check for a :rello: relocation
2777 modifier. If we don't, punt the whole lot to
2778 parse_shifter_operand. */
2779
2780 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2781 {
2782 struct reloc_table_entry *entry;
2783
2784 if (p[0] == '#')
2785 p += 2;
2786 else
2787 p++;
2788 *str = p;
2789
2790 /* Try to parse a relocation. Anything else is an error. */
2791 if (!(entry = find_reloc_table_entry (str)))
2792 {
2793 set_syntax_error (_("unknown relocation modifier"));
2794 return FALSE;
2795 }
2796
2797 if (entry->add_type == 0)
2798 {
2799 set_syntax_error
2800 (_("this relocation modifier is not allowed on this instruction"));
2801 return FALSE;
2802 }
2803
2804 /* Save str before we decompose it. */
2805 p = *str;
2806
2807 /* Next, we parse the expression. */
2808 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2809 return FALSE;
2810
2811 /* Record the relocation type (use the ADD variant here). */
2812 inst.reloc.type = entry->add_type;
2813 inst.reloc.pc_rel = entry->pc_rel;
2814
2815 /* If str is empty, we've reached the end, stop here. */
2816 if (**str == '\0')
2817 return TRUE;
2818
2819 /* Otherwise, we have a shifted reloc modifier, so rewind to
2820 recover the variable name and continue parsing for the shifter. */
2821 *str = p;
2822 return parse_shifter_operand_imm (str, operand, mode);
2823 }
2824
2825 return parse_shifter_operand (str, operand, mode);
2826 }
2827
2828 /* Parse all forms of an address expression. Information is written
2829 to *OPERAND and/or inst.reloc.
2830
2831 The A64 instruction set has the following addressing modes:
2832
2833 Offset
2834 [base] // in SIMD ld/st structure
2835 [base{,#0}] // in ld/st exclusive
2836 [base{,#imm}]
2837 [base,Xm{,LSL #imm}]
2838 [base,Xm,SXTX {#imm}]
2839 [base,Wm,(S|U)XTW {#imm}]
2840 Pre-indexed
2841 [base,#imm]!
2842 Post-indexed
2843 [base],#imm
2844 [base],Xm // in SIMD ld/st structure
2845 PC-relative (literal)
2846 label
2847 =immediate
2848
2849 (As a convenience, the notation "=immediate" is permitted in conjunction
2850 with the pc-relative literal load instructions to automatically place an
2851 immediate value or symbolic address in a nearby literal pool and generate
2852 a hidden label which references it.)
2853
2854 Upon a successful parsing, the address structure in *OPERAND will be
2855 filled in the following way:
2856
2857 .base_regno = <base>
2858 .offset.is_reg // 1 if the offset is a register
2859 .offset.imm = <imm>
2860 .offset.regno = <Rm>
2861
2862 For different addressing modes defined in the A64 ISA:
2863
2864 Offset
2865 .pcrel=0; .preind=1; .postind=0; .writeback=0
2866 Pre-indexed
2867 .pcrel=0; .preind=1; .postind=0; .writeback=1
2868 Post-indexed
2869 .pcrel=0; .preind=0; .postind=1; .writeback=1
2870 PC-relative (literal)
2871 .pcrel=1; .preind=1; .postind=0; .writeback=0
2872
2873 The shift/extension information, if any, will be stored in .shifter.
2874
2875 It is the caller's responsibility to check for addressing modes not
2876 supported by the instruction, and to set inst.reloc.type. */
2877
2878 static bfd_boolean
2879 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2880 int accept_reg_post_index)
2881 {
2882 char *p = *str;
2883 int reg;
2884 int isreg32, isregzero;
2885 expressionS *exp = &inst.reloc.exp;
2886
2887 if (! skip_past_char (&p, '['))
2888 {
2889 /* =immediate or label. */
2890 operand->addr.pcrel = 1;
2891 operand->addr.preind = 1;
2892
2893 /* #:<reloc_op>:<symbol> */
2894 skip_past_char (&p, '#');
2895 if (reloc && skip_past_char (&p, ':'))
2896 {
2897 struct reloc_table_entry *entry;
2898
2899 /* Try to parse a relocation modifier. Anything else is
2900 an error. */
2901 entry = find_reloc_table_entry (&p);
2902 if (! entry)
2903 {
2904 set_syntax_error (_("unknown relocation modifier"));
2905 return FALSE;
2906 }
2907
2908 if (entry->ldst_type == 0)
2909 {
2910 set_syntax_error
2911 (_("this relocation modifier is not allowed on this "
2912 "instruction"));
2913 return FALSE;
2914 }
2915
2916 /* #:<reloc_op>: */
2917 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2918 {
2919 set_syntax_error (_("invalid relocation expression"));
2920 return FALSE;
2921 }
2922
2923 /* #:<reloc_op>:<expr> */
2924 /* Record the load/store relocation type. */
2925 inst.reloc.type = entry->ldst_type;
2926 inst.reloc.pc_rel = entry->pc_rel;
2927 }
2928 else
2929 {
2930
2931 if (skip_past_char (&p, '='))
2932 /* =immediate; need to generate the literal in the literal pool. */
2933 inst.gen_lit_pool = 1;
2934
2935 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2936 {
2937 set_syntax_error (_("invalid address"));
2938 return FALSE;
2939 }
2940 }
2941
2942 *str = p;
2943 return TRUE;
2944 }
2945
2946 /* [ */
2947
2948 /* Accept SP and reject ZR */
2949 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2950 if (reg == PARSE_FAIL || isreg32)
2951 {
2952 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2953 return FALSE;
2954 }
2955 operand->addr.base_regno = reg;
2956
2957 /* [Xn */
2958 if (skip_past_comma (&p))
2959 {
2960 /* [Xn, */
2961 operand->addr.preind = 1;
2962
2963 /* Reject SP and accept ZR */
2964 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
2965 if (reg != PARSE_FAIL)
2966 {
2967 /* [Xn,Rm */
2968 operand->addr.offset.regno = reg;
2969 operand->addr.offset.is_reg = 1;
2970 /* Shifted index. */
2971 if (skip_past_comma (&p))
2972 {
2973 /* [Xn,Rm, */
2974 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
2975 /* Use the diagnostics set in parse_shift, so not set new
2976 error message here. */
2977 return FALSE;
2978 }
2979 /* We only accept:
2980 [base,Xm{,LSL #imm}]
2981 [base,Xm,SXTX {#imm}]
2982 [base,Wm,(S|U)XTW {#imm}] */
2983 if (operand->shifter.kind == AARCH64_MOD_NONE
2984 || operand->shifter.kind == AARCH64_MOD_LSL
2985 || operand->shifter.kind == AARCH64_MOD_SXTX)
2986 {
2987 if (isreg32)
2988 {
2989 set_syntax_error (_("invalid use of 32-bit register offset"));
2990 return FALSE;
2991 }
2992 }
2993 else if (!isreg32)
2994 {
2995 set_syntax_error (_("invalid use of 64-bit register offset"));
2996 return FALSE;
2997 }
2998 }
2999 else
3000 {
3001 /* [Xn,#:<reloc_op>:<symbol> */
3002 skip_past_char (&p, '#');
3003 if (reloc && skip_past_char (&p, ':'))
3004 {
3005 struct reloc_table_entry *entry;
3006
3007 /* Try to parse a relocation modifier. Anything else is
3008 an error. */
3009 if (!(entry = find_reloc_table_entry (&p)))
3010 {
3011 set_syntax_error (_("unknown relocation modifier"));
3012 return FALSE;
3013 }
3014
3015 if (entry->ldst_type == 0)
3016 {
3017 set_syntax_error
3018 (_("this relocation modifier is not allowed on this "
3019 "instruction"));
3020 return FALSE;
3021 }
3022
3023 /* [Xn,#:<reloc_op>: */
3024 /* We now have the group relocation table entry corresponding to
3025 the name in the assembler source. Next, we parse the
3026 expression. */
3027 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3028 {
3029 set_syntax_error (_("invalid relocation expression"));
3030 return FALSE;
3031 }
3032
3033 /* [Xn,#:<reloc_op>:<expr> */
3034 /* Record the load/store relocation type. */
3035 inst.reloc.type = entry->ldst_type;
3036 inst.reloc.pc_rel = entry->pc_rel;
3037 }
3038 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3039 {
3040 set_syntax_error (_("invalid expression in the address"));
3041 return FALSE;
3042 }
3043 /* [Xn,<expr> */
3044 }
3045 }
3046
3047 if (! skip_past_char (&p, ']'))
3048 {
3049 set_syntax_error (_("']' expected"));
3050 return FALSE;
3051 }
3052
3053 if (skip_past_char (&p, '!'))
3054 {
3055 if (operand->addr.preind && operand->addr.offset.is_reg)
3056 {
3057 set_syntax_error (_("register offset not allowed in pre-indexed "
3058 "addressing mode"));
3059 return FALSE;
3060 }
3061 /* [Xn]! */
3062 operand->addr.writeback = 1;
3063 }
3064 else if (skip_past_comma (&p))
3065 {
3066 /* [Xn], */
3067 operand->addr.postind = 1;
3068 operand->addr.writeback = 1;
3069
3070 if (operand->addr.preind)
3071 {
3072 set_syntax_error (_("cannot combine pre- and post-indexing"));
3073 return FALSE;
3074 }
3075
3076 if (accept_reg_post_index
3077 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3078 &isregzero)) != PARSE_FAIL)
3079 {
3080 /* [Xn],Xm */
3081 if (isreg32)
3082 {
3083 set_syntax_error (_("invalid 32-bit register offset"));
3084 return FALSE;
3085 }
3086 operand->addr.offset.regno = reg;
3087 operand->addr.offset.is_reg = 1;
3088 }
3089 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3090 {
3091 /* [Xn],#expr */
3092 set_syntax_error (_("invalid expression in the address"));
3093 return FALSE;
3094 }
3095 }
3096
3097 /* If at this point neither .preind nor .postind is set, we have a
3098 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3099 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3100 {
3101 if (operand->addr.writeback)
3102 {
3103 /* Reject [Rn]! */
3104 set_syntax_error (_("missing offset in the pre-indexed address"));
3105 return FALSE;
3106 }
3107 operand->addr.preind = 1;
3108 inst.reloc.exp.X_op = O_constant;
3109 inst.reloc.exp.X_add_number = 0;
3110 }
3111
3112 *str = p;
3113 return TRUE;
3114 }
3115
3116 /* Return TRUE on success; otherwise return FALSE. */
3117 static bfd_boolean
3118 parse_address (char **str, aarch64_opnd_info *operand,
3119 int accept_reg_post_index)
3120 {
3121 return parse_address_main (str, operand, 0, accept_reg_post_index);
3122 }
3123
3124 /* Return TRUE on success; otherwise return FALSE. */
3125 static bfd_boolean
3126 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3127 {
3128 return parse_address_main (str, operand, 1, 0);
3129 }
3130
3131 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3132 Return TRUE on success; otherwise return FALSE. */
3133 static bfd_boolean
3134 parse_half (char **str, int *internal_fixup_p)
3135 {
3136 char *p, *saved;
3137 int dummy;
3138
3139 p = *str;
3140 skip_past_char (&p, '#');
3141
3142 gas_assert (internal_fixup_p);
3143 *internal_fixup_p = 0;
3144
3145 if (*p == ':')
3146 {
3147 struct reloc_table_entry *entry;
3148
3149 /* Try to parse a relocation. Anything else is an error. */
3150 ++p;
3151 if (!(entry = find_reloc_table_entry (&p)))
3152 {
3153 set_syntax_error (_("unknown relocation modifier"));
3154 return FALSE;
3155 }
3156
3157 if (entry->movw_type == 0)
3158 {
3159 set_syntax_error
3160 (_("this relocation modifier is not allowed on this instruction"));
3161 return FALSE;
3162 }
3163
3164 inst.reloc.type = entry->movw_type;
3165 }
3166 else
3167 *internal_fixup_p = 1;
3168
3169 /* Avoid parsing a register as a general symbol. */
3170 saved = p;
3171 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3172 return FALSE;
3173 p = saved;
3174
3175 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3176 return FALSE;
3177
3178 *str = p;
3179 return TRUE;
3180 }
3181
3182 /* Parse an operand for an ADRP instruction:
3183 ADRP <Xd>, <label>
3184 Return TRUE on success; otherwise return FALSE. */
3185
3186 static bfd_boolean
3187 parse_adrp (char **str)
3188 {
3189 char *p;
3190
3191 p = *str;
3192 if (*p == ':')
3193 {
3194 struct reloc_table_entry *entry;
3195
3196 /* Try to parse a relocation. Anything else is an error. */
3197 ++p;
3198 if (!(entry = find_reloc_table_entry (&p)))
3199 {
3200 set_syntax_error (_("unknown relocation modifier"));
3201 return FALSE;
3202 }
3203
3204 if (entry->adrp_type == 0)
3205 {
3206 set_syntax_error
3207 (_("this relocation modifier is not allowed on this instruction"));
3208 return FALSE;
3209 }
3210
3211 inst.reloc.type = entry->adrp_type;
3212 }
3213 else
3214 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3215
3216 inst.reloc.pc_rel = 1;
3217
3218 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3219 return FALSE;
3220
3221 *str = p;
3222 return TRUE;
3223 }
3224
3225 /* Miscellaneous. */
3226
3227 /* Parse an option for a preload instruction. Returns the encoding for the
3228 option, or PARSE_FAIL. */
3229
3230 static int
3231 parse_pldop (char **str)
3232 {
3233 char *p, *q;
3234 const struct aarch64_name_value_pair *o;
3235
3236 p = q = *str;
3237 while (ISALNUM (*q))
3238 q++;
3239
3240 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3241 if (!o)
3242 return PARSE_FAIL;
3243
3244 *str = q;
3245 return o->value;
3246 }
3247
3248 /* Parse an option for a barrier instruction. Returns the encoding for the
3249 option, or PARSE_FAIL. */
3250
3251 static int
3252 parse_barrier (char **str)
3253 {
3254 char *p, *q;
3255 const asm_barrier_opt *o;
3256
3257 p = q = *str;
3258 while (ISALPHA (*q))
3259 q++;
3260
3261 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3262 if (!o)
3263 return PARSE_FAIL;
3264
3265 *str = q;
3266 return o->value;
3267 }
3268
3269 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3270 Returns the encoding for the option, or PARSE_FAIL.
3271
3272 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3273 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3274
3275 static int
3276 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3277 {
3278 char *p, *q;
3279 char buf[32];
3280 const aarch64_sys_reg *o;
3281 int value;
3282
3283 p = buf;
3284 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3285 if (p < buf + 31)
3286 *p++ = TOLOWER (*q);
3287 *p = '\0';
3288 /* Assert that BUF be large enough. */
3289 gas_assert (p - buf == q - *str);
3290
3291 o = hash_find (sys_regs, buf);
3292 if (!o)
3293 {
3294 if (!imple_defined_p)
3295 return PARSE_FAIL;
3296 else
3297 {
3298 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
3299 registers. */
3300 unsigned int op0, op1, cn, cm, op2;
3301 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
3302 return PARSE_FAIL;
3303 /* The architecture specifies the encoding space for implementation
3304 defined registers as:
3305 op0 op1 CRn CRm op2
3306 1x xxx 1x11 xxxx xxx
3307 For convenience GAS accepts a wider encoding space, as follows:
3308 op0 op1 CRn CRm op2
3309 1x xxx xxxx xxxx xxx */
3310 if ((op0 != 2 && op0 != 3) || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3311 return PARSE_FAIL;
3312 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3313 }
3314 }
3315 else
3316 {
3317 /* Only check system register names for deprecation. If we have
3318 been given a PSTATE field name (impled_defined_p == 0) then allow
3319 any value. */
3320 if (imple_defined_p && aarch64_sys_reg_deprecated_p (o))
3321 as_warn (_("system register name '%s' is deprecated and may be "
3322 "removed in a future release"), buf);
3323 value = o->value;
3324 }
3325
3326 *str = q;
3327 return value;
3328 }
3329
3330 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3331 for the option, or NULL. */
3332
3333 static const aarch64_sys_ins_reg *
3334 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3335 {
3336 char *p, *q;
3337 char buf[32];
3338 const aarch64_sys_ins_reg *o;
3339
3340 p = buf;
3341 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3342 if (p < buf + 31)
3343 *p++ = TOLOWER (*q);
3344 *p = '\0';
3345
3346 o = hash_find (sys_ins_regs, buf);
3347 if (!o)
3348 return NULL;
3349
3350 *str = q;
3351 return o;
3352 }
3353 \f
3354 #define po_char_or_fail(chr) do { \
3355 if (! skip_past_char (&str, chr)) \
3356 goto failure; \
3357 } while (0)
3358
3359 #define po_reg_or_fail(regtype) do { \
3360 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3361 if (val == PARSE_FAIL) \
3362 { \
3363 set_default_error (); \
3364 goto failure; \
3365 } \
3366 } while (0)
3367
3368 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3369 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3370 &isreg32, &isregzero); \
3371 if (val == PARSE_FAIL) \
3372 { \
3373 set_default_error (); \
3374 goto failure; \
3375 } \
3376 info->reg.regno = val; \
3377 if (isreg32) \
3378 info->qualifier = AARCH64_OPND_QLF_W; \
3379 else \
3380 info->qualifier = AARCH64_OPND_QLF_X; \
3381 } while (0)
3382
3383 #define po_imm_nc_or_fail() do { \
3384 if (! parse_constant_immediate (&str, &val)) \
3385 goto failure; \
3386 } while (0)
3387
3388 #define po_imm_or_fail(min, max) do { \
3389 if (! parse_constant_immediate (&str, &val)) \
3390 goto failure; \
3391 if (val < min || val > max) \
3392 { \
3393 set_fatal_syntax_error (_("immediate value out of range "\
3394 #min " to "#max)); \
3395 goto failure; \
3396 } \
3397 } while (0)
3398
3399 #define po_misc_or_fail(expr) do { \
3400 if (!expr) \
3401 goto failure; \
3402 } while (0)
3403 \f
3404 /* encode the 12-bit imm field of Add/sub immediate */
3405 static inline uint32_t
3406 encode_addsub_imm (uint32_t imm)
3407 {
3408 return imm << 10;
3409 }
3410
3411 /* encode the shift amount field of Add/sub immediate */
3412 static inline uint32_t
3413 encode_addsub_imm_shift_amount (uint32_t cnt)
3414 {
3415 return cnt << 22;
3416 }
3417
3418
3419 /* encode the imm field of Adr instruction */
3420 static inline uint32_t
3421 encode_adr_imm (uint32_t imm)
3422 {
3423 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3424 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3425 }
3426
3427 /* encode the immediate field of Move wide immediate */
3428 static inline uint32_t
3429 encode_movw_imm (uint32_t imm)
3430 {
3431 return imm << 5;
3432 }
3433
3434 /* encode the 26-bit offset of unconditional branch */
3435 static inline uint32_t
3436 encode_branch_ofs_26 (uint32_t ofs)
3437 {
3438 return ofs & ((1 << 26) - 1);
3439 }
3440
3441 /* encode the 19-bit offset of conditional branch and compare & branch */
3442 static inline uint32_t
3443 encode_cond_branch_ofs_19 (uint32_t ofs)
3444 {
3445 return (ofs & ((1 << 19) - 1)) << 5;
3446 }
3447
3448 /* encode the 19-bit offset of ld literal */
3449 static inline uint32_t
3450 encode_ld_lit_ofs_19 (uint32_t ofs)
3451 {
3452 return (ofs & ((1 << 19) - 1)) << 5;
3453 }
3454
3455 /* Encode the 14-bit offset of test & branch. */
3456 static inline uint32_t
3457 encode_tst_branch_ofs_14 (uint32_t ofs)
3458 {
3459 return (ofs & ((1 << 14) - 1)) << 5;
3460 }
3461
3462 /* Encode the 16-bit imm field of svc/hvc/smc. */
3463 static inline uint32_t
3464 encode_svc_imm (uint32_t imm)
3465 {
3466 return imm << 5;
3467 }
3468
3469 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3470 static inline uint32_t
3471 reencode_addsub_switch_add_sub (uint32_t opcode)
3472 {
3473 return opcode ^ (1 << 30);
3474 }
3475
3476 static inline uint32_t
3477 reencode_movzn_to_movz (uint32_t opcode)
3478 {
3479 return opcode | (1 << 30);
3480 }
3481
3482 static inline uint32_t
3483 reencode_movzn_to_movn (uint32_t opcode)
3484 {
3485 return opcode & ~(1 << 30);
3486 }
3487
3488 /* Overall per-instruction processing. */
3489
3490 /* We need to be able to fix up arbitrary expressions in some statements.
3491 This is so that we can handle symbols that are an arbitrary distance from
3492 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3493 which returns part of an address in a form which will be valid for
3494 a data instruction. We do this by pushing the expression into a symbol
3495 in the expr_section, and creating a fix for that. */
3496
3497 static fixS *
3498 fix_new_aarch64 (fragS * frag,
3499 int where,
3500 short int size, expressionS * exp, int pc_rel, int reloc)
3501 {
3502 fixS *new_fix;
3503
3504 switch (exp->X_op)
3505 {
3506 case O_constant:
3507 case O_symbol:
3508 case O_add:
3509 case O_subtract:
3510 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3511 break;
3512
3513 default:
3514 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3515 pc_rel, reloc);
3516 break;
3517 }
3518 return new_fix;
3519 }
3520 \f
3521 /* Diagnostics on operands errors. */
3522
3523 /* By default, output one-line error message only.
3524 Enable the verbose error message by -merror-verbose. */
3525 static int verbose_error_p = 0;
3526
3527 #ifdef DEBUG_AARCH64
3528 /* N.B. this is only for the purpose of debugging. */
3529 const char* operand_mismatch_kind_names[] =
3530 {
3531 "AARCH64_OPDE_NIL",
3532 "AARCH64_OPDE_RECOVERABLE",
3533 "AARCH64_OPDE_SYNTAX_ERROR",
3534 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3535 "AARCH64_OPDE_INVALID_VARIANT",
3536 "AARCH64_OPDE_OUT_OF_RANGE",
3537 "AARCH64_OPDE_UNALIGNED",
3538 "AARCH64_OPDE_REG_LIST",
3539 "AARCH64_OPDE_OTHER_ERROR",
3540 };
3541 #endif /* DEBUG_AARCH64 */
3542
3543 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3544
3545 When multiple errors of different kinds are found in the same assembly
3546 line, only the error of the highest severity will be picked up for
3547 issuing the diagnostics. */
3548
3549 static inline bfd_boolean
3550 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3551 enum aarch64_operand_error_kind rhs)
3552 {
3553 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3554 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3555 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3556 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3557 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3558 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3559 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3560 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3561 return lhs > rhs;
3562 }
3563
3564 /* Helper routine to get the mnemonic name from the assembly instruction
3565 line; should only be called for the diagnosis purpose, as there is
3566 string copy operation involved, which may affect the runtime
3567 performance if used in elsewhere. */
3568
3569 static const char*
3570 get_mnemonic_name (const char *str)
3571 {
3572 static char mnemonic[32];
3573 char *ptr;
3574
3575 /* Get the first 15 bytes and assume that the full name is included. */
3576 strncpy (mnemonic, str, 31);
3577 mnemonic[31] = '\0';
3578
3579 /* Scan up to the end of the mnemonic, which must end in white space,
3580 '.', or end of string. */
3581 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3582 ;
3583
3584 *ptr = '\0';
3585
3586 /* Append '...' to the truncated long name. */
3587 if (ptr - mnemonic == 31)
3588 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3589
3590 return mnemonic;
3591 }
3592
3593 static void
3594 reset_aarch64_instruction (aarch64_instruction *instruction)
3595 {
3596 memset (instruction, '\0', sizeof (aarch64_instruction));
3597 instruction->reloc.type = BFD_RELOC_UNUSED;
3598 }
3599
3600 /* Data strutures storing one user error in the assembly code related to
3601 operands. */
3602
3603 struct operand_error_record
3604 {
3605 const aarch64_opcode *opcode;
3606 aarch64_operand_error detail;
3607 struct operand_error_record *next;
3608 };
3609
3610 typedef struct operand_error_record operand_error_record;
3611
3612 struct operand_errors
3613 {
3614 operand_error_record *head;
3615 operand_error_record *tail;
3616 };
3617
3618 typedef struct operand_errors operand_errors;
3619
3620 /* Top-level data structure reporting user errors for the current line of
3621 the assembly code.
3622 The way md_assemble works is that all opcodes sharing the same mnemonic
3623 name are iterated to find a match to the assembly line. In this data
3624 structure, each of the such opcodes will have one operand_error_record
3625 allocated and inserted. In other words, excessive errors related with
3626 a single opcode are disregarded. */
3627 operand_errors operand_error_report;
3628
3629 /* Free record nodes. */
3630 static operand_error_record *free_opnd_error_record_nodes = NULL;
3631
3632 /* Initialize the data structure that stores the operand mismatch
3633 information on assembling one line of the assembly code. */
3634 static void
3635 init_operand_error_report (void)
3636 {
3637 if (operand_error_report.head != NULL)
3638 {
3639 gas_assert (operand_error_report.tail != NULL);
3640 operand_error_report.tail->next = free_opnd_error_record_nodes;
3641 free_opnd_error_record_nodes = operand_error_report.head;
3642 operand_error_report.head = NULL;
3643 operand_error_report.tail = NULL;
3644 return;
3645 }
3646 gas_assert (operand_error_report.tail == NULL);
3647 }
3648
3649 /* Return TRUE if some operand error has been recorded during the
3650 parsing of the current assembly line using the opcode *OPCODE;
3651 otherwise return FALSE. */
3652 static inline bfd_boolean
3653 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3654 {
3655 operand_error_record *record = operand_error_report.head;
3656 return record && record->opcode == opcode;
3657 }
3658
3659 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3660 OPCODE field is initialized with OPCODE.
3661 N.B. only one record for each opcode, i.e. the maximum of one error is
3662 recorded for each instruction template. */
3663
3664 static void
3665 add_operand_error_record (const operand_error_record* new_record)
3666 {
3667 const aarch64_opcode *opcode = new_record->opcode;
3668 operand_error_record* record = operand_error_report.head;
3669
3670 /* The record may have been created for this opcode. If not, we need
3671 to prepare one. */
3672 if (! opcode_has_operand_error_p (opcode))
3673 {
3674 /* Get one empty record. */
3675 if (free_opnd_error_record_nodes == NULL)
3676 {
3677 record = xmalloc (sizeof (operand_error_record));
3678 if (record == NULL)
3679 abort ();
3680 }
3681 else
3682 {
3683 record = free_opnd_error_record_nodes;
3684 free_opnd_error_record_nodes = record->next;
3685 }
3686 record->opcode = opcode;
3687 /* Insert at the head. */
3688 record->next = operand_error_report.head;
3689 operand_error_report.head = record;
3690 if (operand_error_report.tail == NULL)
3691 operand_error_report.tail = record;
3692 }
3693 else if (record->detail.kind != AARCH64_OPDE_NIL
3694 && record->detail.index <= new_record->detail.index
3695 && operand_error_higher_severity_p (record->detail.kind,
3696 new_record->detail.kind))
3697 {
3698 /* In the case of multiple errors found on operands related with a
3699 single opcode, only record the error of the leftmost operand and
3700 only if the error is of higher severity. */
3701 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3702 " the existing error %s on operand %d",
3703 operand_mismatch_kind_names[new_record->detail.kind],
3704 new_record->detail.index,
3705 operand_mismatch_kind_names[record->detail.kind],
3706 record->detail.index);
3707 return;
3708 }
3709
3710 record->detail = new_record->detail;
3711 }
3712
3713 static inline void
3714 record_operand_error_info (const aarch64_opcode *opcode,
3715 aarch64_operand_error *error_info)
3716 {
3717 operand_error_record record;
3718 record.opcode = opcode;
3719 record.detail = *error_info;
3720 add_operand_error_record (&record);
3721 }
3722
3723 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3724 error message *ERROR, for operand IDX (count from 0). */
3725
3726 static void
3727 record_operand_error (const aarch64_opcode *opcode, int idx,
3728 enum aarch64_operand_error_kind kind,
3729 const char* error)
3730 {
3731 aarch64_operand_error info;
3732 memset(&info, 0, sizeof (info));
3733 info.index = idx;
3734 info.kind = kind;
3735 info.error = error;
3736 record_operand_error_info (opcode, &info);
3737 }
3738
3739 static void
3740 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3741 enum aarch64_operand_error_kind kind,
3742 const char* error, const int *extra_data)
3743 {
3744 aarch64_operand_error info;
3745 info.index = idx;
3746 info.kind = kind;
3747 info.error = error;
3748 info.data[0] = extra_data[0];
3749 info.data[1] = extra_data[1];
3750 info.data[2] = extra_data[2];
3751 record_operand_error_info (opcode, &info);
3752 }
3753
3754 static void
3755 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3756 const char* error, int lower_bound,
3757 int upper_bound)
3758 {
3759 int data[3] = {lower_bound, upper_bound, 0};
3760 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3761 error, data);
3762 }
3763
3764 /* Remove the operand error record for *OPCODE. */
3765 static void ATTRIBUTE_UNUSED
3766 remove_operand_error_record (const aarch64_opcode *opcode)
3767 {
3768 if (opcode_has_operand_error_p (opcode))
3769 {
3770 operand_error_record* record = operand_error_report.head;
3771 gas_assert (record != NULL && operand_error_report.tail != NULL);
3772 operand_error_report.head = record->next;
3773 record->next = free_opnd_error_record_nodes;
3774 free_opnd_error_record_nodes = record;
3775 if (operand_error_report.head == NULL)
3776 {
3777 gas_assert (operand_error_report.tail == record);
3778 operand_error_report.tail = NULL;
3779 }
3780 }
3781 }
3782
3783 /* Given the instruction in *INSTR, return the index of the best matched
3784 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3785
3786 Return -1 if there is no qualifier sequence; return the first match
3787 if there is multiple matches found. */
3788
3789 static int
3790 find_best_match (const aarch64_inst *instr,
3791 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3792 {
3793 int i, num_opnds, max_num_matched, idx;
3794
3795 num_opnds = aarch64_num_of_operands (instr->opcode);
3796 if (num_opnds == 0)
3797 {
3798 DEBUG_TRACE ("no operand");
3799 return -1;
3800 }
3801
3802 max_num_matched = 0;
3803 idx = -1;
3804
3805 /* For each pattern. */
3806 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3807 {
3808 int j, num_matched;
3809 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3810
3811 /* Most opcodes has much fewer patterns in the list. */
3812 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3813 {
3814 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3815 if (i != 0 && idx == -1)
3816 /* If nothing has been matched, return the 1st sequence. */
3817 idx = 0;
3818 break;
3819 }
3820
3821 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3822 if (*qualifiers == instr->operands[j].qualifier)
3823 ++num_matched;
3824
3825 if (num_matched > max_num_matched)
3826 {
3827 max_num_matched = num_matched;
3828 idx = i;
3829 }
3830 }
3831
3832 DEBUG_TRACE ("return with %d", idx);
3833 return idx;
3834 }
3835
3836 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3837 corresponding operands in *INSTR. */
3838
3839 static inline void
3840 assign_qualifier_sequence (aarch64_inst *instr,
3841 const aarch64_opnd_qualifier_t *qualifiers)
3842 {
3843 int i = 0;
3844 int num_opnds = aarch64_num_of_operands (instr->opcode);
3845 gas_assert (num_opnds);
3846 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3847 instr->operands[i].qualifier = *qualifiers;
3848 }
3849
3850 /* Print operands for the diagnosis purpose. */
3851
3852 static void
3853 print_operands (char *buf, const aarch64_opcode *opcode,
3854 const aarch64_opnd_info *opnds)
3855 {
3856 int i;
3857
3858 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3859 {
3860 const size_t size = 128;
3861 char str[size];
3862
3863 /* We regard the opcode operand info more, however we also look into
3864 the inst->operands to support the disassembling of the optional
3865 operand.
3866 The two operand code should be the same in all cases, apart from
3867 when the operand can be optional. */
3868 if (opcode->operands[i] == AARCH64_OPND_NIL
3869 || opnds[i].type == AARCH64_OPND_NIL)
3870 break;
3871
3872 /* Generate the operand string in STR. */
3873 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3874
3875 /* Delimiter. */
3876 if (str[0] != '\0')
3877 strcat (buf, i == 0 ? " " : ",");
3878
3879 /* Append the operand string. */
3880 strcat (buf, str);
3881 }
3882 }
3883
3884 /* Send to stderr a string as information. */
3885
3886 static void
3887 output_info (const char *format, ...)
3888 {
3889 char *file;
3890 unsigned int line;
3891 va_list args;
3892
3893 as_where (&file, &line);
3894 if (file)
3895 {
3896 if (line != 0)
3897 fprintf (stderr, "%s:%u: ", file, line);
3898 else
3899 fprintf (stderr, "%s: ", file);
3900 }
3901 fprintf (stderr, _("Info: "));
3902 va_start (args, format);
3903 vfprintf (stderr, format, args);
3904 va_end (args);
3905 (void) putc ('\n', stderr);
3906 }
3907
3908 /* Output one operand error record. */
3909
3910 static void
3911 output_operand_error_record (const operand_error_record *record, char *str)
3912 {
3913 int idx = record->detail.index;
3914 const aarch64_opcode *opcode = record->opcode;
3915 enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
3916 : AARCH64_OPND_NIL);
3917 const aarch64_operand_error *detail = &record->detail;
3918
3919 switch (detail->kind)
3920 {
3921 case AARCH64_OPDE_NIL:
3922 gas_assert (0);
3923 break;
3924
3925 case AARCH64_OPDE_SYNTAX_ERROR:
3926 case AARCH64_OPDE_RECOVERABLE:
3927 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3928 case AARCH64_OPDE_OTHER_ERROR:
3929 gas_assert (idx >= 0);
3930 /* Use the prepared error message if there is, otherwise use the
3931 operand description string to describe the error. */
3932 if (detail->error != NULL)
3933 {
3934 if (detail->index == -1)
3935 as_bad (_("%s -- `%s'"), detail->error, str);
3936 else
3937 as_bad (_("%s at operand %d -- `%s'"),
3938 detail->error, detail->index + 1, str);
3939 }
3940 else
3941 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3942 aarch64_get_operand_desc (opd_code), str);
3943 break;
3944
3945 case AARCH64_OPDE_INVALID_VARIANT:
3946 as_bad (_("operand mismatch -- `%s'"), str);
3947 if (verbose_error_p)
3948 {
3949 /* We will try to correct the erroneous instruction and also provide
3950 more information e.g. all other valid variants.
3951
3952 The string representation of the corrected instruction and other
3953 valid variants are generated by
3954
3955 1) obtaining the intermediate representation of the erroneous
3956 instruction;
3957 2) manipulating the IR, e.g. replacing the operand qualifier;
3958 3) printing out the instruction by calling the printer functions
3959 shared with the disassembler.
3960
3961 The limitation of this method is that the exact input assembly
3962 line cannot be accurately reproduced in some cases, for example an
3963 optional operand present in the actual assembly line will be
3964 omitted in the output; likewise for the optional syntax rules,
3965 e.g. the # before the immediate. Another limitation is that the
3966 assembly symbols and relocation operations in the assembly line
3967 currently cannot be printed out in the error report. Last but not
3968 least, when there is other error(s) co-exist with this error, the
3969 'corrected' instruction may be still incorrect, e.g. given
3970 'ldnp h0,h1,[x0,#6]!'
3971 this diagnosis will provide the version:
3972 'ldnp s0,s1,[x0,#6]!'
3973 which is still not right. */
3974 size_t len = strlen (get_mnemonic_name (str));
3975 int i, qlf_idx;
3976 bfd_boolean result;
3977 const size_t size = 2048;
3978 char buf[size];
3979 aarch64_inst *inst_base = &inst.base;
3980 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
3981
3982 /* Init inst. */
3983 reset_aarch64_instruction (&inst);
3984 inst_base->opcode = opcode;
3985
3986 /* Reset the error report so that there is no side effect on the
3987 following operand parsing. */
3988 init_operand_error_report ();
3989
3990 /* Fill inst. */
3991 result = parse_operands (str + len, opcode)
3992 && programmer_friendly_fixup (&inst);
3993 gas_assert (result);
3994 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
3995 NULL, NULL);
3996 gas_assert (!result);
3997
3998 /* Find the most matched qualifier sequence. */
3999 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4000 gas_assert (qlf_idx > -1);
4001
4002 /* Assign the qualifiers. */
4003 assign_qualifier_sequence (inst_base,
4004 opcode->qualifiers_list[qlf_idx]);
4005
4006 /* Print the hint. */
4007 output_info (_(" did you mean this?"));
4008 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4009 print_operands (buf, opcode, inst_base->operands);
4010 output_info (_(" %s"), buf);
4011
4012 /* Print out other variant(s) if there is any. */
4013 if (qlf_idx != 0 ||
4014 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4015 output_info (_(" other valid variant(s):"));
4016
4017 /* For each pattern. */
4018 qualifiers_list = opcode->qualifiers_list;
4019 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4020 {
4021 /* Most opcodes has much fewer patterns in the list.
4022 First NIL qualifier indicates the end in the list. */
4023 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4024 break;
4025
4026 if (i != qlf_idx)
4027 {
4028 /* Mnemonics name. */
4029 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4030
4031 /* Assign the qualifiers. */
4032 assign_qualifier_sequence (inst_base, *qualifiers_list);
4033
4034 /* Print instruction. */
4035 print_operands (buf, opcode, inst_base->operands);
4036
4037 output_info (_(" %s"), buf);
4038 }
4039 }
4040 }
4041 break;
4042
4043 case AARCH64_OPDE_OUT_OF_RANGE:
4044 if (detail->data[0] != detail->data[1])
4045 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4046 detail->error ? detail->error : _("immediate value"),
4047 detail->data[0], detail->data[1], detail->index + 1, str);
4048 else
4049 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4050 detail->error ? detail->error : _("immediate value"),
4051 detail->data[0], detail->index + 1, str);
4052 break;
4053
4054 case AARCH64_OPDE_REG_LIST:
4055 if (detail->data[0] == 1)
4056 as_bad (_("invalid number of registers in the list; "
4057 "only 1 register is expected at operand %d -- `%s'"),
4058 detail->index + 1, str);
4059 else
4060 as_bad (_("invalid number of registers in the list; "
4061 "%d registers are expected at operand %d -- `%s'"),
4062 detail->data[0], detail->index + 1, str);
4063 break;
4064
4065 case AARCH64_OPDE_UNALIGNED:
4066 as_bad (_("immediate value should be a multiple of "
4067 "%d at operand %d -- `%s'"),
4068 detail->data[0], detail->index + 1, str);
4069 break;
4070
4071 default:
4072 gas_assert (0);
4073 break;
4074 }
4075 }
4076
4077 /* Process and output the error message about the operand mismatching.
4078
4079 When this function is called, the operand error information had
4080 been collected for an assembly line and there will be multiple
4081 errors in the case of mulitple instruction templates; output the
4082 error message that most closely describes the problem. */
4083
4084 static void
4085 output_operand_error_report (char *str)
4086 {
4087 int largest_error_pos;
4088 const char *msg = NULL;
4089 enum aarch64_operand_error_kind kind;
4090 operand_error_record *curr;
4091 operand_error_record *head = operand_error_report.head;
4092 operand_error_record *record = NULL;
4093
4094 /* No error to report. */
4095 if (head == NULL)
4096 return;
4097
4098 gas_assert (head != NULL && operand_error_report.tail != NULL);
4099
4100 /* Only one error. */
4101 if (head == operand_error_report.tail)
4102 {
4103 DEBUG_TRACE ("single opcode entry with error kind: %s",
4104 operand_mismatch_kind_names[head->detail.kind]);
4105 output_operand_error_record (head, str);
4106 return;
4107 }
4108
4109 /* Find the error kind of the highest severity. */
4110 DEBUG_TRACE ("multiple opcode entres with error kind");
4111 kind = AARCH64_OPDE_NIL;
4112 for (curr = head; curr != NULL; curr = curr->next)
4113 {
4114 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4115 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4116 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4117 kind = curr->detail.kind;
4118 }
4119 gas_assert (kind != AARCH64_OPDE_NIL);
4120
4121 /* Pick up one of errors of KIND to report. */
4122 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4123 for (curr = head; curr != NULL; curr = curr->next)
4124 {
4125 if (curr->detail.kind != kind)
4126 continue;
4127 /* If there are multiple errors, pick up the one with the highest
4128 mismatching operand index. In the case of multiple errors with
4129 the equally highest operand index, pick up the first one or the
4130 first one with non-NULL error message. */
4131 if (curr->detail.index > largest_error_pos
4132 || (curr->detail.index == largest_error_pos && msg == NULL
4133 && curr->detail.error != NULL))
4134 {
4135 largest_error_pos = curr->detail.index;
4136 record = curr;
4137 msg = record->detail.error;
4138 }
4139 }
4140
4141 gas_assert (largest_error_pos != -2 && record != NULL);
4142 DEBUG_TRACE ("Pick up error kind %s to report",
4143 operand_mismatch_kind_names[record->detail.kind]);
4144
4145 /* Output. */
4146 output_operand_error_record (record, str);
4147 }
4148 \f
4149 /* Write an AARCH64 instruction to buf - always little-endian. */
4150 static void
4151 put_aarch64_insn (char *buf, uint32_t insn)
4152 {
4153 unsigned char *where = (unsigned char *) buf;
4154 where[0] = insn;
4155 where[1] = insn >> 8;
4156 where[2] = insn >> 16;
4157 where[3] = insn >> 24;
4158 }
4159
4160 static uint32_t
4161 get_aarch64_insn (char *buf)
4162 {
4163 unsigned char *where = (unsigned char *) buf;
4164 uint32_t result;
4165 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4166 return result;
4167 }
4168
4169 static void
4170 output_inst (struct aarch64_inst *new_inst)
4171 {
4172 char *to = NULL;
4173
4174 to = frag_more (INSN_SIZE);
4175
4176 frag_now->tc_frag_data.recorded = 1;
4177
4178 put_aarch64_insn (to, inst.base.value);
4179
4180 if (inst.reloc.type != BFD_RELOC_UNUSED)
4181 {
4182 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4183 INSN_SIZE, &inst.reloc.exp,
4184 inst.reloc.pc_rel,
4185 inst.reloc.type);
4186 DEBUG_TRACE ("Prepared relocation fix up");
4187 /* Don't check the addend value against the instruction size,
4188 that's the job of our code in md_apply_fix(). */
4189 fixp->fx_no_overflow = 1;
4190 if (new_inst != NULL)
4191 fixp->tc_fix_data.inst = new_inst;
4192 if (aarch64_gas_internal_fixup_p ())
4193 {
4194 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4195 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4196 fixp->fx_addnumber = inst.reloc.flags;
4197 }
4198 }
4199
4200 dwarf2_emit_insn (INSN_SIZE);
4201 }
4202
4203 /* Link together opcodes of the same name. */
4204
4205 struct templates
4206 {
4207 aarch64_opcode *opcode;
4208 struct templates *next;
4209 };
4210
4211 typedef struct templates templates;
4212
4213 static templates *
4214 lookup_mnemonic (const char *start, int len)
4215 {
4216 templates *templ = NULL;
4217
4218 templ = hash_find_n (aarch64_ops_hsh, start, len);
4219 return templ;
4220 }
4221
4222 /* Subroutine of md_assemble, responsible for looking up the primary
4223 opcode from the mnemonic the user wrote. STR points to the
4224 beginning of the mnemonic. */
4225
4226 static templates *
4227 opcode_lookup (char **str)
4228 {
4229 char *end, *base;
4230 const aarch64_cond *cond;
4231 char condname[16];
4232 int len;
4233
4234 /* Scan up to the end of the mnemonic, which must end in white space,
4235 '.', or end of string. */
4236 for (base = end = *str; is_part_of_name(*end); end++)
4237 if (*end == '.')
4238 break;
4239
4240 if (end == base)
4241 return 0;
4242
4243 inst.cond = COND_ALWAYS;
4244
4245 /* Handle a possible condition. */
4246 if (end[0] == '.')
4247 {
4248 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4249 if (cond)
4250 {
4251 inst.cond = cond->value;
4252 *str = end + 3;
4253 }
4254 else
4255 {
4256 *str = end;
4257 return 0;
4258 }
4259 }
4260 else
4261 *str = end;
4262
4263 len = end - base;
4264
4265 if (inst.cond == COND_ALWAYS)
4266 {
4267 /* Look for unaffixed mnemonic. */
4268 return lookup_mnemonic (base, len);
4269 }
4270 else if (len <= 13)
4271 {
4272 /* append ".c" to mnemonic if conditional */
4273 memcpy (condname, base, len);
4274 memcpy (condname + len, ".c", 2);
4275 base = condname;
4276 len += 2;
4277 return lookup_mnemonic (base, len);
4278 }
4279
4280 return NULL;
4281 }
4282
4283 /* Internal helper routine converting a vector neon_type_el structure
4284 *VECTYPE to a corresponding operand qualifier. */
4285
4286 static inline aarch64_opnd_qualifier_t
4287 vectype_to_qualifier (const struct neon_type_el *vectype)
4288 {
4289 /* Element size in bytes indexed by neon_el_type. */
4290 const unsigned char ele_size[5]
4291 = {1, 2, 4, 8, 16};
4292
4293 if (!vectype->defined || vectype->type == NT_invtype)
4294 goto vectype_conversion_fail;
4295
4296 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4297
4298 if (vectype->defined & NTA_HASINDEX)
4299 /* Vector element register. */
4300 return AARCH64_OPND_QLF_S_B + vectype->type;
4301 else
4302 {
4303 /* Vector register. */
4304 int reg_size = ele_size[vectype->type] * vectype->width;
4305 unsigned offset;
4306 if (reg_size != 16 && reg_size != 8)
4307 goto vectype_conversion_fail;
4308 /* The conversion is calculated based on the relation of the order of
4309 qualifiers to the vector element size and vector register size. */
4310 offset = (vectype->type == NT_q)
4311 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4312 gas_assert (offset <= 8);
4313 return AARCH64_OPND_QLF_V_8B + offset;
4314 }
4315
4316 vectype_conversion_fail:
4317 first_error (_("bad vector arrangement type"));
4318 return AARCH64_OPND_QLF_NIL;
4319 }
4320
4321 /* Process an optional operand that is found omitted from the assembly line.
4322 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4323 instruction's opcode entry while IDX is the index of this omitted operand.
4324 */
4325
4326 static void
4327 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4328 int idx, aarch64_opnd_info *operand)
4329 {
4330 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4331 gas_assert (optional_operand_p (opcode, idx));
4332 gas_assert (!operand->present);
4333
4334 switch (type)
4335 {
4336 case AARCH64_OPND_Rd:
4337 case AARCH64_OPND_Rn:
4338 case AARCH64_OPND_Rm:
4339 case AARCH64_OPND_Rt:
4340 case AARCH64_OPND_Rt2:
4341 case AARCH64_OPND_Rs:
4342 case AARCH64_OPND_Ra:
4343 case AARCH64_OPND_Rt_SYS:
4344 case AARCH64_OPND_Rd_SP:
4345 case AARCH64_OPND_Rn_SP:
4346 case AARCH64_OPND_Fd:
4347 case AARCH64_OPND_Fn:
4348 case AARCH64_OPND_Fm:
4349 case AARCH64_OPND_Fa:
4350 case AARCH64_OPND_Ft:
4351 case AARCH64_OPND_Ft2:
4352 case AARCH64_OPND_Sd:
4353 case AARCH64_OPND_Sn:
4354 case AARCH64_OPND_Sm:
4355 case AARCH64_OPND_Vd:
4356 case AARCH64_OPND_Vn:
4357 case AARCH64_OPND_Vm:
4358 case AARCH64_OPND_VdD1:
4359 case AARCH64_OPND_VnD1:
4360 operand->reg.regno = default_value;
4361 break;
4362
4363 case AARCH64_OPND_Ed:
4364 case AARCH64_OPND_En:
4365 case AARCH64_OPND_Em:
4366 operand->reglane.regno = default_value;
4367 break;
4368
4369 case AARCH64_OPND_IDX:
4370 case AARCH64_OPND_BIT_NUM:
4371 case AARCH64_OPND_IMMR:
4372 case AARCH64_OPND_IMMS:
4373 case AARCH64_OPND_SHLL_IMM:
4374 case AARCH64_OPND_IMM_VLSL:
4375 case AARCH64_OPND_IMM_VLSR:
4376 case AARCH64_OPND_CCMP_IMM:
4377 case AARCH64_OPND_FBITS:
4378 case AARCH64_OPND_UIMM4:
4379 case AARCH64_OPND_UIMM3_OP1:
4380 case AARCH64_OPND_UIMM3_OP2:
4381 case AARCH64_OPND_IMM:
4382 case AARCH64_OPND_WIDTH:
4383 case AARCH64_OPND_UIMM7:
4384 case AARCH64_OPND_NZCV:
4385 operand->imm.value = default_value;
4386 break;
4387
4388 case AARCH64_OPND_EXCEPTION:
4389 inst.reloc.type = BFD_RELOC_UNUSED;
4390 break;
4391
4392 case AARCH64_OPND_BARRIER_ISB:
4393 operand->barrier = aarch64_barrier_options + default_value;
4394
4395 default:
4396 break;
4397 }
4398 }
4399
4400 /* Process the relocation type for move wide instructions.
4401 Return TRUE on success; otherwise return FALSE. */
4402
4403 static bfd_boolean
4404 process_movw_reloc_info (void)
4405 {
4406 int is32;
4407 unsigned shift;
4408
4409 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4410
4411 if (inst.base.opcode->op == OP_MOVK)
4412 switch (inst.reloc.type)
4413 {
4414 case BFD_RELOC_AARCH64_MOVW_G0_S:
4415 case BFD_RELOC_AARCH64_MOVW_G1_S:
4416 case BFD_RELOC_AARCH64_MOVW_G2_S:
4417 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4418 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4419 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4420 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4421 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4422 set_syntax_error
4423 (_("the specified relocation type is not allowed for MOVK"));
4424 return FALSE;
4425 default:
4426 break;
4427 }
4428
4429 switch (inst.reloc.type)
4430 {
4431 case BFD_RELOC_AARCH64_MOVW_G0:
4432 case BFD_RELOC_AARCH64_MOVW_G0_S:
4433 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4434 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4435 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4436 shift = 0;
4437 break;
4438 case BFD_RELOC_AARCH64_MOVW_G1:
4439 case BFD_RELOC_AARCH64_MOVW_G1_S:
4440 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4441 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4442 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4443 shift = 16;
4444 break;
4445 case BFD_RELOC_AARCH64_MOVW_G2:
4446 case BFD_RELOC_AARCH64_MOVW_G2_S:
4447 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4448 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4449 if (is32)
4450 {
4451 set_fatal_syntax_error
4452 (_("the specified relocation type is not allowed for 32-bit "
4453 "register"));
4454 return FALSE;
4455 }
4456 shift = 32;
4457 break;
4458 case BFD_RELOC_AARCH64_MOVW_G3:
4459 if (is32)
4460 {
4461 set_fatal_syntax_error
4462 (_("the specified relocation type is not allowed for 32-bit "
4463 "register"));
4464 return FALSE;
4465 }
4466 shift = 48;
4467 break;
4468 default:
4469 /* More cases should be added when more MOVW-related relocation types
4470 are supported in GAS. */
4471 gas_assert (aarch64_gas_internal_fixup_p ());
4472 /* The shift amount should have already been set by the parser. */
4473 return TRUE;
4474 }
4475 inst.base.operands[1].shifter.amount = shift;
4476 return TRUE;
4477 }
4478
4479 /* A primitive log caculator. */
4480
4481 static inline unsigned int
4482 get_logsz (unsigned int size)
4483 {
4484 const unsigned char ls[16] =
4485 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4486 if (size > 16)
4487 {
4488 gas_assert (0);
4489 return -1;
4490 }
4491 gas_assert (ls[size - 1] != (unsigned char)-1);
4492 return ls[size - 1];
4493 }
4494
4495 /* Determine and return the real reloc type code for an instruction
4496 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4497
4498 static inline bfd_reloc_code_real_type
4499 ldst_lo12_determine_real_reloc_type (void)
4500 {
4501 int logsz;
4502 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4503 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4504
4505 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4506 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4507 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4508 BFD_RELOC_AARCH64_LDST128_LO12
4509 };
4510
4511 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4512 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4513
4514 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4515 opd1_qlf =
4516 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4517 1, opd0_qlf, 0);
4518 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4519
4520 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4521 gas_assert (logsz >= 0 && logsz <= 4);
4522
4523 return reloc_ldst_lo12[logsz];
4524 }
4525
4526 /* Check whether a register list REGINFO is valid. The registers must be
4527 numbered in increasing order (modulo 32), in increments of one or two.
4528
4529 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4530 increments of two.
4531
4532 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4533
4534 static bfd_boolean
4535 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4536 {
4537 uint32_t i, nb_regs, prev_regno, incr;
4538
4539 nb_regs = 1 + (reginfo & 0x3);
4540 reginfo >>= 2;
4541 prev_regno = reginfo & 0x1f;
4542 incr = accept_alternate ? 2 : 1;
4543
4544 for (i = 1; i < nb_regs; ++i)
4545 {
4546 uint32_t curr_regno;
4547 reginfo >>= 5;
4548 curr_regno = reginfo & 0x1f;
4549 if (curr_regno != ((prev_regno + incr) & 0x1f))
4550 return FALSE;
4551 prev_regno = curr_regno;
4552 }
4553
4554 return TRUE;
4555 }
4556
4557 /* Generic instruction operand parser. This does no encoding and no
4558 semantic validation; it merely squirrels values away in the inst
4559 structure. Returns TRUE or FALSE depending on whether the
4560 specified grammar matched. */
4561
4562 static bfd_boolean
4563 parse_operands (char *str, const aarch64_opcode *opcode)
4564 {
4565 int i;
4566 char *backtrack_pos = 0;
4567 const enum aarch64_opnd *operands = opcode->operands;
4568
4569 clear_error ();
4570 skip_whitespace (str);
4571
4572 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4573 {
4574 int64_t val;
4575 int isreg32, isregzero;
4576 int comma_skipped_p = 0;
4577 aarch64_reg_type rtype;
4578 struct neon_type_el vectype;
4579 aarch64_opnd_info *info = &inst.base.operands[i];
4580
4581 DEBUG_TRACE ("parse operand %d", i);
4582
4583 /* Assign the operand code. */
4584 info->type = operands[i];
4585
4586 if (optional_operand_p (opcode, i))
4587 {
4588 /* Remember where we are in case we need to backtrack. */
4589 gas_assert (!backtrack_pos);
4590 backtrack_pos = str;
4591 }
4592
4593 /* Expect comma between operands; the backtrack mechanizm will take
4594 care of cases of omitted optional operand. */
4595 if (i > 0 && ! skip_past_char (&str, ','))
4596 {
4597 set_syntax_error (_("comma expected between operands"));
4598 goto failure;
4599 }
4600 else
4601 comma_skipped_p = 1;
4602
4603 switch (operands[i])
4604 {
4605 case AARCH64_OPND_Rd:
4606 case AARCH64_OPND_Rn:
4607 case AARCH64_OPND_Rm:
4608 case AARCH64_OPND_Rt:
4609 case AARCH64_OPND_Rt2:
4610 case AARCH64_OPND_Rs:
4611 case AARCH64_OPND_Ra:
4612 case AARCH64_OPND_Rt_SYS:
4613 po_int_reg_or_fail (1, 0);
4614 break;
4615
4616 case AARCH64_OPND_Rd_SP:
4617 case AARCH64_OPND_Rn_SP:
4618 po_int_reg_or_fail (0, 1);
4619 break;
4620
4621 case AARCH64_OPND_Rm_EXT:
4622 case AARCH64_OPND_Rm_SFT:
4623 po_misc_or_fail (parse_shifter_operand
4624 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4625 ? SHIFTED_ARITH_IMM
4626 : SHIFTED_LOGIC_IMM)));
4627 if (!info->shifter.operator_present)
4628 {
4629 /* Default to LSL if not present. Libopcodes prefers shifter
4630 kind to be explicit. */
4631 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4632 info->shifter.kind = AARCH64_MOD_LSL;
4633 /* For Rm_EXT, libopcodes will carry out further check on whether
4634 or not stack pointer is used in the instruction (Recall that
4635 "the extend operator is not optional unless at least one of
4636 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4637 }
4638 break;
4639
4640 case AARCH64_OPND_Fd:
4641 case AARCH64_OPND_Fn:
4642 case AARCH64_OPND_Fm:
4643 case AARCH64_OPND_Fa:
4644 case AARCH64_OPND_Ft:
4645 case AARCH64_OPND_Ft2:
4646 case AARCH64_OPND_Sd:
4647 case AARCH64_OPND_Sn:
4648 case AARCH64_OPND_Sm:
4649 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4650 if (val == PARSE_FAIL)
4651 {
4652 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4653 goto failure;
4654 }
4655 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4656
4657 info->reg.regno = val;
4658 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4659 break;
4660
4661 case AARCH64_OPND_Vd:
4662 case AARCH64_OPND_Vn:
4663 case AARCH64_OPND_Vm:
4664 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4665 if (val == PARSE_FAIL)
4666 {
4667 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4668 goto failure;
4669 }
4670 if (vectype.defined & NTA_HASINDEX)
4671 goto failure;
4672
4673 info->reg.regno = val;
4674 info->qualifier = vectype_to_qualifier (&vectype);
4675 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4676 goto failure;
4677 break;
4678
4679 case AARCH64_OPND_VdD1:
4680 case AARCH64_OPND_VnD1:
4681 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4682 if (val == PARSE_FAIL)
4683 {
4684 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4685 goto failure;
4686 }
4687 if (vectype.type != NT_d || vectype.index != 1)
4688 {
4689 set_fatal_syntax_error
4690 (_("the top half of a 128-bit FP/SIMD register is expected"));
4691 goto failure;
4692 }
4693 info->reg.regno = val;
4694 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4695 here; it is correct for the purpose of encoding/decoding since
4696 only the register number is explicitly encoded in the related
4697 instructions, although this appears a bit hacky. */
4698 info->qualifier = AARCH64_OPND_QLF_S_D;
4699 break;
4700
4701 case AARCH64_OPND_Ed:
4702 case AARCH64_OPND_En:
4703 case AARCH64_OPND_Em:
4704 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4705 if (val == PARSE_FAIL)
4706 {
4707 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4708 goto failure;
4709 }
4710 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4711 goto failure;
4712
4713 info->reglane.regno = val;
4714 info->reglane.index = vectype.index;
4715 info->qualifier = vectype_to_qualifier (&vectype);
4716 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4717 goto failure;
4718 break;
4719
4720 case AARCH64_OPND_LVn:
4721 case AARCH64_OPND_LVt:
4722 case AARCH64_OPND_LVt_AL:
4723 case AARCH64_OPND_LEt:
4724 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4725 goto failure;
4726 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4727 {
4728 set_fatal_syntax_error (_("invalid register list"));
4729 goto failure;
4730 }
4731 info->reglist.first_regno = (val >> 2) & 0x1f;
4732 info->reglist.num_regs = (val & 0x3) + 1;
4733 if (operands[i] == AARCH64_OPND_LEt)
4734 {
4735 if (!(vectype.defined & NTA_HASINDEX))
4736 goto failure;
4737 info->reglist.has_index = 1;
4738 info->reglist.index = vectype.index;
4739 }
4740 else if (!(vectype.defined & NTA_HASTYPE))
4741 goto failure;
4742 info->qualifier = vectype_to_qualifier (&vectype);
4743 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4744 goto failure;
4745 break;
4746
4747 case AARCH64_OPND_Cn:
4748 case AARCH64_OPND_Cm:
4749 po_reg_or_fail (REG_TYPE_CN);
4750 if (val > 15)
4751 {
4752 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4753 goto failure;
4754 }
4755 inst.base.operands[i].reg.regno = val;
4756 break;
4757
4758 case AARCH64_OPND_SHLL_IMM:
4759 case AARCH64_OPND_IMM_VLSR:
4760 po_imm_or_fail (1, 64);
4761 info->imm.value = val;
4762 break;
4763
4764 case AARCH64_OPND_CCMP_IMM:
4765 case AARCH64_OPND_FBITS:
4766 case AARCH64_OPND_UIMM4:
4767 case AARCH64_OPND_UIMM3_OP1:
4768 case AARCH64_OPND_UIMM3_OP2:
4769 case AARCH64_OPND_IMM_VLSL:
4770 case AARCH64_OPND_IMM:
4771 case AARCH64_OPND_WIDTH:
4772 po_imm_nc_or_fail ();
4773 info->imm.value = val;
4774 break;
4775
4776 case AARCH64_OPND_UIMM7:
4777 po_imm_or_fail (0, 127);
4778 info->imm.value = val;
4779 break;
4780
4781 case AARCH64_OPND_IDX:
4782 case AARCH64_OPND_BIT_NUM:
4783 case AARCH64_OPND_IMMR:
4784 case AARCH64_OPND_IMMS:
4785 po_imm_or_fail (0, 63);
4786 info->imm.value = val;
4787 break;
4788
4789 case AARCH64_OPND_IMM0:
4790 po_imm_nc_or_fail ();
4791 if (val != 0)
4792 {
4793 set_fatal_syntax_error (_("immediate zero expected"));
4794 goto failure;
4795 }
4796 info->imm.value = 0;
4797 break;
4798
4799 case AARCH64_OPND_FPIMM0:
4800 {
4801 int qfloat;
4802 bfd_boolean res1 = FALSE, res2 = FALSE;
4803 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4804 it is probably not worth the effort to support it. */
4805 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4806 && !(res2 = parse_constant_immediate (&str, &val)))
4807 goto failure;
4808 if ((res1 && qfloat == 0) || (res2 && val == 0))
4809 {
4810 info->imm.value = 0;
4811 info->imm.is_fp = 1;
4812 break;
4813 }
4814 set_fatal_syntax_error (_("immediate zero expected"));
4815 goto failure;
4816 }
4817
4818 case AARCH64_OPND_IMM_MOV:
4819 {
4820 char *saved = str;
4821 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4822 reg_name_p (str, REG_TYPE_VN))
4823 goto failure;
4824 str = saved;
4825 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4826 GE_OPT_PREFIX, 1));
4827 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4828 later. fix_mov_imm_insn will try to determine a machine
4829 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4830 message if the immediate cannot be moved by a single
4831 instruction. */
4832 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4833 inst.base.operands[i].skip = 1;
4834 }
4835 break;
4836
4837 case AARCH64_OPND_SIMD_IMM:
4838 case AARCH64_OPND_SIMD_IMM_SFT:
4839 if (! parse_big_immediate (&str, &val))
4840 goto failure;
4841 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4842 /* addr_off_p */ 0,
4843 /* need_libopcodes_p */ 1,
4844 /* skip_p */ 1);
4845 /* Parse shift.
4846 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4847 shift, we don't check it here; we leave the checking to
4848 the libopcodes (operand_general_constraint_met_p). By
4849 doing this, we achieve better diagnostics. */
4850 if (skip_past_comma (&str)
4851 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4852 goto failure;
4853 if (!info->shifter.operator_present
4854 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4855 {
4856 /* Default to LSL if not present. Libopcodes prefers shifter
4857 kind to be explicit. */
4858 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4859 info->shifter.kind = AARCH64_MOD_LSL;
4860 }
4861 break;
4862
4863 case AARCH64_OPND_FPIMM:
4864 case AARCH64_OPND_SIMD_FPIMM:
4865 {
4866 int qfloat;
4867 bfd_boolean dp_p
4868 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4869 == 8);
4870 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4871 goto failure;
4872 if (qfloat == 0)
4873 {
4874 set_fatal_syntax_error (_("invalid floating-point constant"));
4875 goto failure;
4876 }
4877 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4878 inst.base.operands[i].imm.is_fp = 1;
4879 }
4880 break;
4881
4882 case AARCH64_OPND_LIMM:
4883 po_misc_or_fail (parse_shifter_operand (&str, info,
4884 SHIFTED_LOGIC_IMM));
4885 if (info->shifter.operator_present)
4886 {
4887 set_fatal_syntax_error
4888 (_("shift not allowed for bitmask immediate"));
4889 goto failure;
4890 }
4891 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4892 /* addr_off_p */ 0,
4893 /* need_libopcodes_p */ 1,
4894 /* skip_p */ 1);
4895 break;
4896
4897 case AARCH64_OPND_AIMM:
4898 if (opcode->op == OP_ADD)
4899 /* ADD may have relocation types. */
4900 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4901 SHIFTED_ARITH_IMM));
4902 else
4903 po_misc_or_fail (parse_shifter_operand (&str, info,
4904 SHIFTED_ARITH_IMM));
4905 switch (inst.reloc.type)
4906 {
4907 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4908 info->shifter.amount = 12;
4909 break;
4910 case BFD_RELOC_UNUSED:
4911 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4912 if (info->shifter.kind != AARCH64_MOD_NONE)
4913 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4914 inst.reloc.pc_rel = 0;
4915 break;
4916 default:
4917 break;
4918 }
4919 info->imm.value = 0;
4920 if (!info->shifter.operator_present)
4921 {
4922 /* Default to LSL if not present. Libopcodes prefers shifter
4923 kind to be explicit. */
4924 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4925 info->shifter.kind = AARCH64_MOD_LSL;
4926 }
4927 break;
4928
4929 case AARCH64_OPND_HALF:
4930 {
4931 /* #<imm16> or relocation. */
4932 int internal_fixup_p;
4933 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4934 if (internal_fixup_p)
4935 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4936 skip_whitespace (str);
4937 if (skip_past_comma (&str))
4938 {
4939 /* {, LSL #<shift>} */
4940 if (! aarch64_gas_internal_fixup_p ())
4941 {
4942 set_fatal_syntax_error (_("can't mix relocation modifier "
4943 "with explicit shift"));
4944 goto failure;
4945 }
4946 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4947 }
4948 else
4949 inst.base.operands[i].shifter.amount = 0;
4950 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4951 inst.base.operands[i].imm.value = 0;
4952 if (! process_movw_reloc_info ())
4953 goto failure;
4954 }
4955 break;
4956
4957 case AARCH64_OPND_EXCEPTION:
4958 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4959 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4960 /* addr_off_p */ 0,
4961 /* need_libopcodes_p */ 0,
4962 /* skip_p */ 1);
4963 break;
4964
4965 case AARCH64_OPND_NZCV:
4966 {
4967 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
4968 if (nzcv != NULL)
4969 {
4970 str += 4;
4971 info->imm.value = nzcv->value;
4972 break;
4973 }
4974 po_imm_or_fail (0, 15);
4975 info->imm.value = val;
4976 }
4977 break;
4978
4979 case AARCH64_OPND_COND:
4980 case AARCH64_OPND_COND1:
4981 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
4982 str += 2;
4983 if (info->cond == NULL)
4984 {
4985 set_syntax_error (_("invalid condition"));
4986 goto failure;
4987 }
4988 else if (operands[i] == AARCH64_OPND_COND1
4989 && (info->cond->value & 0xe) == 0xe)
4990 {
4991 /* Not allow AL or NV. */
4992 set_default_error ();
4993 goto failure;
4994 }
4995 break;
4996
4997 case AARCH64_OPND_ADDR_ADRP:
4998 po_misc_or_fail (parse_adrp (&str));
4999 /* Clear the value as operand needs to be relocated. */
5000 info->imm.value = 0;
5001 break;
5002
5003 case AARCH64_OPND_ADDR_PCREL14:
5004 case AARCH64_OPND_ADDR_PCREL19:
5005 case AARCH64_OPND_ADDR_PCREL21:
5006 case AARCH64_OPND_ADDR_PCREL26:
5007 po_misc_or_fail (parse_address_reloc (&str, info));
5008 if (!info->addr.pcrel)
5009 {
5010 set_syntax_error (_("invalid pc-relative address"));
5011 goto failure;
5012 }
5013 if (inst.gen_lit_pool
5014 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5015 {
5016 /* Only permit "=value" in the literal load instructions.
5017 The literal will be generated by programmer_friendly_fixup. */
5018 set_syntax_error (_("invalid use of \"=immediate\""));
5019 goto failure;
5020 }
5021 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5022 {
5023 set_syntax_error (_("unrecognized relocation suffix"));
5024 goto failure;
5025 }
5026 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5027 {
5028 info->imm.value = inst.reloc.exp.X_add_number;
5029 inst.reloc.type = BFD_RELOC_UNUSED;
5030 }
5031 else
5032 {
5033 info->imm.value = 0;
5034 if (inst.reloc.type == BFD_RELOC_UNUSED)
5035 switch (opcode->iclass)
5036 {
5037 case compbranch:
5038 case condbranch:
5039 /* e.g. CBZ or B.COND */
5040 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5041 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5042 break;
5043 case testbranch:
5044 /* e.g. TBZ */
5045 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5046 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5047 break;
5048 case branch_imm:
5049 /* e.g. B or BL */
5050 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5051 inst.reloc.type =
5052 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5053 : BFD_RELOC_AARCH64_JUMP26;
5054 break;
5055 case loadlit:
5056 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5057 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5058 break;
5059 case pcreladdr:
5060 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5061 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5062 break;
5063 default:
5064 gas_assert (0);
5065 abort ();
5066 }
5067 inst.reloc.pc_rel = 1;
5068 }
5069 break;
5070
5071 case AARCH64_OPND_ADDR_SIMPLE:
5072 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5073 /* [<Xn|SP>{, #<simm>}] */
5074 po_char_or_fail ('[');
5075 po_reg_or_fail (REG_TYPE_R64_SP);
5076 /* Accept optional ", #0". */
5077 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5078 && skip_past_char (&str, ','))
5079 {
5080 skip_past_char (&str, '#');
5081 if (! skip_past_char (&str, '0'))
5082 {
5083 set_fatal_syntax_error
5084 (_("the optional immediate offset can only be 0"));
5085 goto failure;
5086 }
5087 }
5088 po_char_or_fail (']');
5089 info->addr.base_regno = val;
5090 break;
5091
5092 case AARCH64_OPND_ADDR_REGOFF:
5093 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5094 po_misc_or_fail (parse_address (&str, info, 0));
5095 if (info->addr.pcrel || !info->addr.offset.is_reg
5096 || !info->addr.preind || info->addr.postind
5097 || info->addr.writeback)
5098 {
5099 set_syntax_error (_("invalid addressing mode"));
5100 goto failure;
5101 }
5102 if (!info->shifter.operator_present)
5103 {
5104 /* Default to LSL if not present. Libopcodes prefers shifter
5105 kind to be explicit. */
5106 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5107 info->shifter.kind = AARCH64_MOD_LSL;
5108 }
5109 /* Qualifier to be deduced by libopcodes. */
5110 break;
5111
5112 case AARCH64_OPND_ADDR_SIMM7:
5113 po_misc_or_fail (parse_address (&str, info, 0));
5114 if (info->addr.pcrel || info->addr.offset.is_reg
5115 || (!info->addr.preind && !info->addr.postind))
5116 {
5117 set_syntax_error (_("invalid addressing mode"));
5118 goto failure;
5119 }
5120 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5121 /* addr_off_p */ 1,
5122 /* need_libopcodes_p */ 1,
5123 /* skip_p */ 0);
5124 break;
5125
5126 case AARCH64_OPND_ADDR_SIMM9:
5127 case AARCH64_OPND_ADDR_SIMM9_2:
5128 po_misc_or_fail (parse_address_reloc (&str, info));
5129 if (info->addr.pcrel || info->addr.offset.is_reg
5130 || (!info->addr.preind && !info->addr.postind)
5131 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5132 && info->addr.writeback))
5133 {
5134 set_syntax_error (_("invalid addressing mode"));
5135 goto failure;
5136 }
5137 if (inst.reloc.type != BFD_RELOC_UNUSED)
5138 {
5139 set_syntax_error (_("relocation not allowed"));
5140 goto failure;
5141 }
5142 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5143 /* addr_off_p */ 1,
5144 /* need_libopcodes_p */ 1,
5145 /* skip_p */ 0);
5146 break;
5147
5148 case AARCH64_OPND_ADDR_UIMM12:
5149 po_misc_or_fail (parse_address_reloc (&str, info));
5150 if (info->addr.pcrel || info->addr.offset.is_reg
5151 || !info->addr.preind || info->addr.writeback)
5152 {
5153 set_syntax_error (_("invalid addressing mode"));
5154 goto failure;
5155 }
5156 if (inst.reloc.type == BFD_RELOC_UNUSED)
5157 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5158 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5159 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5160 /* Leave qualifier to be determined by libopcodes. */
5161 break;
5162
5163 case AARCH64_OPND_SIMD_ADDR_POST:
5164 /* [<Xn|SP>], <Xm|#<amount>> */
5165 po_misc_or_fail (parse_address (&str, info, 1));
5166 if (!info->addr.postind || !info->addr.writeback)
5167 {
5168 set_syntax_error (_("invalid addressing mode"));
5169 goto failure;
5170 }
5171 if (!info->addr.offset.is_reg)
5172 {
5173 if (inst.reloc.exp.X_op == O_constant)
5174 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5175 else
5176 {
5177 set_fatal_syntax_error
5178 (_("writeback value should be an immediate constant"));
5179 goto failure;
5180 }
5181 }
5182 /* No qualifier. */
5183 break;
5184
5185 case AARCH64_OPND_SYSREG:
5186 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5187 == PARSE_FAIL)
5188 {
5189 set_syntax_error (_("unknown or missing system register name"));
5190 goto failure;
5191 }
5192 inst.base.operands[i].sysreg = val;
5193 break;
5194
5195 case AARCH64_OPND_PSTATEFIELD:
5196 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5197 == PARSE_FAIL)
5198 {
5199 set_syntax_error (_("unknown or missing PSTATE field name"));
5200 goto failure;
5201 }
5202 inst.base.operands[i].pstatefield = val;
5203 break;
5204
5205 case AARCH64_OPND_SYSREG_IC:
5206 inst.base.operands[i].sysins_op =
5207 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5208 goto sys_reg_ins;
5209 case AARCH64_OPND_SYSREG_DC:
5210 inst.base.operands[i].sysins_op =
5211 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5212 goto sys_reg_ins;
5213 case AARCH64_OPND_SYSREG_AT:
5214 inst.base.operands[i].sysins_op =
5215 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5216 goto sys_reg_ins;
5217 case AARCH64_OPND_SYSREG_TLBI:
5218 inst.base.operands[i].sysins_op =
5219 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5220 sys_reg_ins:
5221 if (inst.base.operands[i].sysins_op == NULL)
5222 {
5223 set_fatal_syntax_error ( _("unknown or missing operation name"));
5224 goto failure;
5225 }
5226 break;
5227
5228 case AARCH64_OPND_BARRIER:
5229 case AARCH64_OPND_BARRIER_ISB:
5230 val = parse_barrier (&str);
5231 if (val != PARSE_FAIL
5232 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5233 {
5234 /* ISB only accepts options name 'sy'. */
5235 set_syntax_error
5236 (_("the specified option is not accepted in ISB"));
5237 /* Turn off backtrack as this optional operand is present. */
5238 backtrack_pos = 0;
5239 goto failure;
5240 }
5241 /* This is an extension to accept a 0..15 immediate. */
5242 if (val == PARSE_FAIL)
5243 po_imm_or_fail (0, 15);
5244 info->barrier = aarch64_barrier_options + val;
5245 break;
5246
5247 case AARCH64_OPND_PRFOP:
5248 val = parse_pldop (&str);
5249 /* This is an extension to accept a 0..31 immediate. */
5250 if (val == PARSE_FAIL)
5251 po_imm_or_fail (0, 31);
5252 inst.base.operands[i].prfop = aarch64_prfops + val;
5253 break;
5254
5255 default:
5256 as_fatal (_("unhandled operand code %d"), operands[i]);
5257 }
5258
5259 /* If we get here, this operand was successfully parsed. */
5260 inst.base.operands[i].present = 1;
5261 continue;
5262
5263 failure:
5264 /* The parse routine should already have set the error, but in case
5265 not, set a default one here. */
5266 if (! error_p ())
5267 set_default_error ();
5268
5269 if (! backtrack_pos)
5270 goto parse_operands_return;
5271
5272 /* Reaching here means we are dealing with an optional operand that is
5273 omitted from the assembly line. */
5274 gas_assert (optional_operand_p (opcode, i));
5275 info->present = 0;
5276 process_omitted_operand (operands[i], opcode, i, info);
5277
5278 /* Try again, skipping the optional operand at backtrack_pos. */
5279 str = backtrack_pos;
5280 backtrack_pos = 0;
5281
5282 /* If this is the last operand that is optional and omitted, but without
5283 the presence of a comma. */
5284 if (i && comma_skipped_p && i == aarch64_num_of_operands (opcode) - 1)
5285 {
5286 set_fatal_syntax_error
5287 (_("unexpected comma before the omitted optional operand"));
5288 goto parse_operands_return;
5289 }
5290
5291 /* Clear any error record after the omitted optional operand has been
5292 successfully handled. */
5293 clear_error ();
5294 }
5295
5296 /* Check if we have parsed all the operands. */
5297 if (*str != '\0' && ! error_p ())
5298 {
5299 /* Set I to the index of the last present operand; this is
5300 for the purpose of diagnostics. */
5301 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5302 ;
5303 set_fatal_syntax_error
5304 (_("unexpected characters following instruction"));
5305 }
5306
5307 parse_operands_return:
5308
5309 if (error_p ())
5310 {
5311 DEBUG_TRACE ("parsing FAIL: %s - %s",
5312 operand_mismatch_kind_names[get_error_kind ()],
5313 get_error_message ());
5314 /* Record the operand error properly; this is useful when there
5315 are multiple instruction templates for a mnemonic name, so that
5316 later on, we can select the error that most closely describes
5317 the problem. */
5318 record_operand_error (opcode, i, get_error_kind (),
5319 get_error_message ());
5320 return FALSE;
5321 }
5322 else
5323 {
5324 DEBUG_TRACE ("parsing SUCCESS");
5325 return TRUE;
5326 }
5327 }
5328
5329 /* It does some fix-up to provide some programmer friendly feature while
5330 keeping the libopcodes happy, i.e. libopcodes only accepts
5331 the preferred architectural syntax.
5332 Return FALSE if there is any failure; otherwise return TRUE. */
5333
5334 static bfd_boolean
5335 programmer_friendly_fixup (aarch64_instruction *instr)
5336 {
5337 aarch64_inst *base = &instr->base;
5338 const aarch64_opcode *opcode = base->opcode;
5339 enum aarch64_op op = opcode->op;
5340 aarch64_opnd_info *operands = base->operands;
5341
5342 DEBUG_TRACE ("enter");
5343
5344 switch (opcode->iclass)
5345 {
5346 case testbranch:
5347 /* TBNZ Xn|Wn, #uimm6, label
5348 Test and Branch Not Zero: conditionally jumps to label if bit number
5349 uimm6 in register Xn is not zero. The bit number implies the width of
5350 the register, which may be written and should be disassembled as Wn if
5351 uimm is less than 32. */
5352 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5353 {
5354 if (operands[1].imm.value >= 32)
5355 {
5356 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5357 0, 31);
5358 return FALSE;
5359 }
5360 operands[0].qualifier = AARCH64_OPND_QLF_X;
5361 }
5362 break;
5363 case loadlit:
5364 /* LDR Wt, label | =value
5365 As a convenience assemblers will typically permit the notation
5366 "=value" in conjunction with the pc-relative literal load instructions
5367 to automatically place an immediate value or symbolic address in a
5368 nearby literal pool and generate a hidden label which references it.
5369 ISREG has been set to 0 in the case of =value. */
5370 if (instr->gen_lit_pool
5371 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5372 {
5373 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5374 if (op == OP_LDRSW_LIT)
5375 size = 4;
5376 if (instr->reloc.exp.X_op != O_constant
5377 && instr->reloc.exp.X_op != O_big
5378 && instr->reloc.exp.X_op != O_symbol)
5379 {
5380 record_operand_error (opcode, 1,
5381 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5382 _("constant expression expected"));
5383 return FALSE;
5384 }
5385 if (! add_to_lit_pool (&instr->reloc.exp, size))
5386 {
5387 record_operand_error (opcode, 1,
5388 AARCH64_OPDE_OTHER_ERROR,
5389 _("literal pool insertion failed"));
5390 return FALSE;
5391 }
5392 }
5393 break;
5394 case log_shift:
5395 case bitfield:
5396 /* UXT[BHW] Wd, Wn
5397 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5398 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5399 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5400 A programmer-friendly assembler should accept a destination Xd in
5401 place of Wd, however that is not the preferred form for disassembly.
5402 */
5403 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5404 && operands[1].qualifier == AARCH64_OPND_QLF_W
5405 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5406 operands[0].qualifier = AARCH64_OPND_QLF_W;
5407 break;
5408
5409 case addsub_ext:
5410 {
5411 /* In the 64-bit form, the final register operand is written as Wm
5412 for all but the (possibly omitted) UXTX/LSL and SXTX
5413 operators.
5414 As a programmer-friendly assembler, we accept e.g.
5415 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5416 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5417 int idx = aarch64_operand_index (opcode->operands,
5418 AARCH64_OPND_Rm_EXT);
5419 gas_assert (idx == 1 || idx == 2);
5420 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5421 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5422 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5423 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5424 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5425 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5426 }
5427 break;
5428
5429 default:
5430 break;
5431 }
5432
5433 DEBUG_TRACE ("exit with SUCCESS");
5434 return TRUE;
5435 }
5436
5437 /* A wrapper function to interface with libopcodes on encoding and
5438 record the error message if there is any.
5439
5440 Return TRUE on success; otherwise return FALSE. */
5441
5442 static bfd_boolean
5443 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5444 aarch64_insn *code)
5445 {
5446 aarch64_operand_error error_info;
5447 error_info.kind = AARCH64_OPDE_NIL;
5448 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5449 return TRUE;
5450 else
5451 {
5452 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5453 record_operand_error_info (opcode, &error_info);
5454 return FALSE;
5455 }
5456 }
5457
5458 #ifdef DEBUG_AARCH64
5459 static inline void
5460 dump_opcode_operands (const aarch64_opcode *opcode)
5461 {
5462 int i = 0;
5463 while (opcode->operands[i] != AARCH64_OPND_NIL)
5464 {
5465 aarch64_verbose ("\t\t opnd%d: %s", i,
5466 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5467 ? aarch64_get_operand_name (opcode->operands[i])
5468 : aarch64_get_operand_desc (opcode->operands[i]));
5469 ++i;
5470 }
5471 }
5472 #endif /* DEBUG_AARCH64 */
5473
5474 /* This is the guts of the machine-dependent assembler. STR points to a
5475 machine dependent instruction. This function is supposed to emit
5476 the frags/bytes it assembles to. */
5477
5478 void
5479 md_assemble (char *str)
5480 {
5481 char *p = str;
5482 templates *template;
5483 aarch64_opcode *opcode;
5484 aarch64_inst *inst_base;
5485 unsigned saved_cond;
5486
5487 /* Align the previous label if needed. */
5488 if (last_label_seen != NULL)
5489 {
5490 symbol_set_frag (last_label_seen, frag_now);
5491 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5492 S_SET_SEGMENT (last_label_seen, now_seg);
5493 }
5494
5495 inst.reloc.type = BFD_RELOC_UNUSED;
5496
5497 DEBUG_TRACE ("\n\n");
5498 DEBUG_TRACE ("==============================");
5499 DEBUG_TRACE ("Enter md_assemble with %s", str);
5500
5501 template = opcode_lookup (&p);
5502 if (!template)
5503 {
5504 /* It wasn't an instruction, but it might be a register alias of
5505 the form alias .req reg directive. */
5506 if (!create_register_alias (str, p))
5507 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5508 str);
5509 return;
5510 }
5511
5512 skip_whitespace (p);
5513 if (*p == ',')
5514 {
5515 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5516 get_mnemonic_name (str), str);
5517 return;
5518 }
5519
5520 init_operand_error_report ();
5521
5522 saved_cond = inst.cond;
5523 reset_aarch64_instruction (&inst);
5524 inst.cond = saved_cond;
5525
5526 /* Iterate through all opcode entries with the same mnemonic name. */
5527 do
5528 {
5529 opcode = template->opcode;
5530
5531 DEBUG_TRACE ("opcode %s found", opcode->name);
5532 #ifdef DEBUG_AARCH64
5533 if (debug_dump)
5534 dump_opcode_operands (opcode);
5535 #endif /* DEBUG_AARCH64 */
5536
5537 /* Check that this instruction is supported for this CPU. */
5538 if (!opcode->avariant
5539 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5540 {
5541 as_bad (_("selected processor does not support `%s'"), str);
5542 return;
5543 }
5544
5545 mapping_state (MAP_INSN);
5546
5547 inst_base = &inst.base;
5548 inst_base->opcode = opcode;
5549
5550 /* Truly conditionally executed instructions, e.g. b.cond. */
5551 if (opcode->flags & F_COND)
5552 {
5553 gas_assert (inst.cond != COND_ALWAYS);
5554 inst_base->cond = get_cond_from_value (inst.cond);
5555 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5556 }
5557 else if (inst.cond != COND_ALWAYS)
5558 {
5559 /* It shouldn't arrive here, where the assembly looks like a
5560 conditional instruction but the found opcode is unconditional. */
5561 gas_assert (0);
5562 continue;
5563 }
5564
5565 if (parse_operands (p, opcode)
5566 && programmer_friendly_fixup (&inst)
5567 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5568 {
5569 if (inst.reloc.type == BFD_RELOC_UNUSED
5570 || !inst.reloc.need_libopcodes_p)
5571 output_inst (NULL);
5572 else
5573 {
5574 /* If there is relocation generated for the instruction,
5575 store the instruction information for the future fix-up. */
5576 struct aarch64_inst *copy;
5577 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5578 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5579 abort ();
5580 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5581 output_inst (copy);
5582 }
5583 return;
5584 }
5585
5586 template = template->next;
5587 if (template != NULL)
5588 {
5589 reset_aarch64_instruction (&inst);
5590 inst.cond = saved_cond;
5591 }
5592 }
5593 while (template != NULL);
5594
5595 /* Issue the error messages if any. */
5596 output_operand_error_report (str);
5597 }
5598
5599 /* Various frobbings of labels and their addresses. */
5600
5601 void
5602 aarch64_start_line_hook (void)
5603 {
5604 last_label_seen = NULL;
5605 }
5606
5607 void
5608 aarch64_frob_label (symbolS * sym)
5609 {
5610 last_label_seen = sym;
5611
5612 dwarf2_emit_label (sym);
5613 }
5614
5615 int
5616 aarch64_data_in_code (void)
5617 {
5618 if (!strncmp (input_line_pointer + 1, "data:", 5))
5619 {
5620 *input_line_pointer = '/';
5621 input_line_pointer += 5;
5622 *input_line_pointer = 0;
5623 return 1;
5624 }
5625
5626 return 0;
5627 }
5628
5629 char *
5630 aarch64_canonicalize_symbol_name (char *name)
5631 {
5632 int len;
5633
5634 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5635 *(name + len - 5) = 0;
5636
5637 return name;
5638 }
5639 \f
5640 /* Table of all register names defined by default. The user can
5641 define additional names with .req. Note that all register names
5642 should appear in both upper and lowercase variants. Some registers
5643 also have mixed-case names. */
5644
5645 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5646 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5647 #define REGSET31(p,t) \
5648 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5649 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5650 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5651 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5652 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5653 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5654 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5655 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5656 #define REGSET(p,t) \
5657 REGSET31(p,t), REGNUM(p,31,t)
5658
5659 /* These go into aarch64_reg_hsh hash-table. */
5660 static const reg_entry reg_names[] = {
5661 /* Integer registers. */
5662 REGSET31 (x, R_64), REGSET31 (X, R_64),
5663 REGSET31 (w, R_32), REGSET31 (W, R_32),
5664
5665 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5666 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5667
5668 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5669 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5670
5671 /* Coprocessor register numbers. */
5672 REGSET (c, CN), REGSET (C, CN),
5673
5674 /* Floating-point single precision registers. */
5675 REGSET (s, FP_S), REGSET (S, FP_S),
5676
5677 /* Floating-point double precision registers. */
5678 REGSET (d, FP_D), REGSET (D, FP_D),
5679
5680 /* Floating-point half precision registers. */
5681 REGSET (h, FP_H), REGSET (H, FP_H),
5682
5683 /* Floating-point byte precision registers. */
5684 REGSET (b, FP_B), REGSET (B, FP_B),
5685
5686 /* Floating-point quad precision registers. */
5687 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5688
5689 /* FP/SIMD registers. */
5690 REGSET (v, VN), REGSET (V, VN),
5691 };
5692
5693 #undef REGDEF
5694 #undef REGNUM
5695 #undef REGSET
5696
5697 #define N 1
5698 #define n 0
5699 #define Z 1
5700 #define z 0
5701 #define C 1
5702 #define c 0
5703 #define V 1
5704 #define v 0
5705 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5706 static const asm_nzcv nzcv_names[] = {
5707 {"nzcv", B (n, z, c, v)},
5708 {"nzcV", B (n, z, c, V)},
5709 {"nzCv", B (n, z, C, v)},
5710 {"nzCV", B (n, z, C, V)},
5711 {"nZcv", B (n, Z, c, v)},
5712 {"nZcV", B (n, Z, c, V)},
5713 {"nZCv", B (n, Z, C, v)},
5714 {"nZCV", B (n, Z, C, V)},
5715 {"Nzcv", B (N, z, c, v)},
5716 {"NzcV", B (N, z, c, V)},
5717 {"NzCv", B (N, z, C, v)},
5718 {"NzCV", B (N, z, C, V)},
5719 {"NZcv", B (N, Z, c, v)},
5720 {"NZcV", B (N, Z, c, V)},
5721 {"NZCv", B (N, Z, C, v)},
5722 {"NZCV", B (N, Z, C, V)}
5723 };
5724
5725 #undef N
5726 #undef n
5727 #undef Z
5728 #undef z
5729 #undef C
5730 #undef c
5731 #undef V
5732 #undef v
5733 #undef B
5734 \f
5735 /* MD interface: bits in the object file. */
5736
5737 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5738 for use in the a.out file, and stores them in the array pointed to by buf.
5739 This knows about the endian-ness of the target machine and does
5740 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5741 2 (short) and 4 (long) Floating numbers are put out as a series of
5742 LITTLENUMS (shorts, here at least). */
5743
5744 void
5745 md_number_to_chars (char *buf, valueT val, int n)
5746 {
5747 if (target_big_endian)
5748 number_to_chars_bigendian (buf, val, n);
5749 else
5750 number_to_chars_littleendian (buf, val, n);
5751 }
5752
5753 /* MD interface: Sections. */
5754
5755 /* Estimate the size of a frag before relaxing. Assume everything fits in
5756 4 bytes. */
5757
5758 int
5759 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5760 {
5761 fragp->fr_var = 4;
5762 return 4;
5763 }
5764
5765 /* Round up a section size to the appropriate boundary. */
5766
5767 valueT
5768 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5769 {
5770 return size;
5771 }
5772
5773 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5774 of an rs_align_code fragment. */
5775
5776 void
5777 aarch64_handle_align (fragS * fragP)
5778 {
5779 /* NOP = d503201f */
5780 /* AArch64 instructions are always little-endian. */
5781 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5782
5783 int bytes, fix, noop_size;
5784 char *p;
5785 const char *noop;
5786
5787 if (fragP->fr_type != rs_align_code)
5788 return;
5789
5790 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5791 p = fragP->fr_literal + fragP->fr_fix;
5792 fix = 0;
5793
5794 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
5795 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
5796
5797 #ifdef OBJ_ELF
5798 gas_assert (fragP->tc_frag_data.recorded);
5799 #endif
5800
5801 noop = aarch64_noop;
5802 noop_size = sizeof (aarch64_noop);
5803 fragP->fr_var = noop_size;
5804
5805 if (bytes & (noop_size - 1))
5806 {
5807 fix = bytes & (noop_size - 1);
5808 #ifdef OBJ_ELF
5809 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5810 #endif
5811 memset (p, 0, fix);
5812 p += fix;
5813 bytes -= fix;
5814 }
5815
5816 while (bytes >= noop_size)
5817 {
5818 memcpy (p, noop, noop_size);
5819 p += noop_size;
5820 bytes -= noop_size;
5821 fix += noop_size;
5822 }
5823
5824 fragP->fr_fix += fix;
5825 }
5826
5827 /* Called from md_do_align. Used to create an alignment
5828 frag in a code section. */
5829
5830 void
5831 aarch64_frag_align_code (int n, int max)
5832 {
5833 char *p;
5834
5835 /* We assume that there will never be a requirement
5836 to support alignments greater than x bytes. */
5837 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
5838 as_fatal (_
5839 ("alignments greater than %d bytes not supported in .text sections"),
5840 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
5841
5842 p = frag_var (rs_align_code,
5843 MAX_MEM_FOR_RS_ALIGN_CODE,
5844 1,
5845 (relax_substateT) max,
5846 (symbolS *) NULL, (offsetT) n, (char *) NULL);
5847 *p = 0;
5848 }
5849
5850 /* Perform target specific initialisation of a frag.
5851 Note - despite the name this initialisation is not done when the frag
5852 is created, but only when its type is assigned. A frag can be created
5853 and used a long time before its type is set, so beware of assuming that
5854 this initialisationis performed first. */
5855
5856 #ifndef OBJ_ELF
5857 void
5858 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5859 int max_chars ATTRIBUTE_UNUSED)
5860 {
5861 }
5862
5863 #else /* OBJ_ELF is defined. */
5864 void
5865 aarch64_init_frag (fragS * fragP, int max_chars)
5866 {
5867 /* Record a mapping symbol for alignment frags. We will delete this
5868 later if the alignment ends up empty. */
5869 if (!fragP->tc_frag_data.recorded)
5870 {
5871 fragP->tc_frag_data.recorded = 1;
5872 switch (fragP->fr_type)
5873 {
5874 case rs_align:
5875 case rs_align_test:
5876 case rs_fill:
5877 mapping_state_2 (MAP_DATA, max_chars);
5878 break;
5879 case rs_align_code:
5880 mapping_state_2 (MAP_INSN, max_chars);
5881 break;
5882 default:
5883 break;
5884 }
5885 }
5886 }
5887 \f
5888 /* Initialize the DWARF-2 unwind information for this procedure. */
5889
5890 void
5891 tc_aarch64_frame_initial_instructions (void)
5892 {
5893 cfi_add_CFA_def_cfa (REG_SP, 0);
5894 }
5895 #endif /* OBJ_ELF */
5896
5897 /* Convert REGNAME to a DWARF-2 register number. */
5898
5899 int
5900 tc_aarch64_regname_to_dw2regnum (char *regname)
5901 {
5902 const reg_entry *reg = parse_reg (&regname);
5903 if (reg == NULL)
5904 return -1;
5905
5906 switch (reg->type)
5907 {
5908 case REG_TYPE_SP_32:
5909 case REG_TYPE_SP_64:
5910 case REG_TYPE_R_32:
5911 case REG_TYPE_R_64:
5912 case REG_TYPE_FP_B:
5913 case REG_TYPE_FP_H:
5914 case REG_TYPE_FP_S:
5915 case REG_TYPE_FP_D:
5916 case REG_TYPE_FP_Q:
5917 return reg->number;
5918 default:
5919 break;
5920 }
5921 return -1;
5922 }
5923
5924 /* Implement DWARF2_ADDR_SIZE. */
5925
5926 int
5927 aarch64_dwarf2_addr_size (void)
5928 {
5929 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
5930 if (ilp32_p)
5931 return 4;
5932 #endif
5933 return bfd_arch_bits_per_address (stdoutput) / 8;
5934 }
5935
5936 /* MD interface: Symbol and relocation handling. */
5937
5938 /* Return the address within the segment that a PC-relative fixup is
5939 relative to. For AArch64 PC-relative fixups applied to instructions
5940 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
5941
5942 long
5943 md_pcrel_from_section (fixS * fixP, segT seg)
5944 {
5945 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
5946
5947 /* If this is pc-relative and we are going to emit a relocation
5948 then we just want to put out any pipeline compensation that the linker
5949 will need. Otherwise we want to use the calculated base. */
5950 if (fixP->fx_pcrel
5951 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
5952 || aarch64_force_relocation (fixP)))
5953 base = 0;
5954
5955 /* AArch64 should be consistent for all pc-relative relocations. */
5956 return base + AARCH64_PCREL_OFFSET;
5957 }
5958
5959 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
5960 Otherwise we have no need to default values of symbols. */
5961
5962 symbolS *
5963 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
5964 {
5965 #ifdef OBJ_ELF
5966 if (name[0] == '_' && name[1] == 'G'
5967 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
5968 {
5969 if (!GOT_symbol)
5970 {
5971 if (symbol_find (name))
5972 as_bad (_("GOT already in the symbol table"));
5973
5974 GOT_symbol = symbol_new (name, undefined_section,
5975 (valueT) 0, &zero_address_frag);
5976 }
5977
5978 return GOT_symbol;
5979 }
5980 #endif
5981
5982 return 0;
5983 }
5984
5985 /* Return non-zero if the indicated VALUE has overflowed the maximum
5986 range expressible by a unsigned number with the indicated number of
5987 BITS. */
5988
5989 static bfd_boolean
5990 unsigned_overflow (valueT value, unsigned bits)
5991 {
5992 valueT lim;
5993 if (bits >= sizeof (valueT) * 8)
5994 return FALSE;
5995 lim = (valueT) 1 << bits;
5996 return (value >= lim);
5997 }
5998
5999
6000 /* Return non-zero if the indicated VALUE has overflowed the maximum
6001 range expressible by an signed number with the indicated number of
6002 BITS. */
6003
6004 static bfd_boolean
6005 signed_overflow (offsetT value, unsigned bits)
6006 {
6007 offsetT lim;
6008 if (bits >= sizeof (offsetT) * 8)
6009 return FALSE;
6010 lim = (offsetT) 1 << (bits - 1);
6011 return (value < -lim || value >= lim);
6012 }
6013
6014 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6015 unsigned immediate offset load/store instruction, try to encode it as
6016 an unscaled, 9-bit, signed immediate offset load/store instruction.
6017 Return TRUE if it is successful; otherwise return FALSE.
6018
6019 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6020 in response to the standard LDR/STR mnemonics when the immediate offset is
6021 unambiguous, i.e. when it is negative or unaligned. */
6022
6023 static bfd_boolean
6024 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6025 {
6026 int idx;
6027 enum aarch64_op new_op;
6028 const aarch64_opcode *new_opcode;
6029
6030 gas_assert (instr->opcode->iclass == ldst_pos);
6031
6032 switch (instr->opcode->op)
6033 {
6034 case OP_LDRB_POS:new_op = OP_LDURB; break;
6035 case OP_STRB_POS: new_op = OP_STURB; break;
6036 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6037 case OP_LDRH_POS: new_op = OP_LDURH; break;
6038 case OP_STRH_POS: new_op = OP_STURH; break;
6039 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6040 case OP_LDR_POS: new_op = OP_LDUR; break;
6041 case OP_STR_POS: new_op = OP_STUR; break;
6042 case OP_LDRF_POS: new_op = OP_LDURV; break;
6043 case OP_STRF_POS: new_op = OP_STURV; break;
6044 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6045 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6046 default: new_op = OP_NIL; break;
6047 }
6048
6049 if (new_op == OP_NIL)
6050 return FALSE;
6051
6052 new_opcode = aarch64_get_opcode (new_op);
6053 gas_assert (new_opcode != NULL);
6054
6055 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6056 instr->opcode->op, new_opcode->op);
6057
6058 aarch64_replace_opcode (instr, new_opcode);
6059
6060 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6061 qualifier matching may fail because the out-of-date qualifier will
6062 prevent the operand being updated with a new and correct qualifier. */
6063 idx = aarch64_operand_index (instr->opcode->operands,
6064 AARCH64_OPND_ADDR_SIMM9);
6065 gas_assert (idx == 1);
6066 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6067
6068 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6069
6070 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6071 return FALSE;
6072
6073 return TRUE;
6074 }
6075
6076 /* Called by fix_insn to fix a MOV immediate alias instruction.
6077
6078 Operand for a generic move immediate instruction, which is an alias
6079 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6080 a 32-bit/64-bit immediate value into general register. An assembler error
6081 shall result if the immediate cannot be created by a single one of these
6082 instructions. If there is a choice, then to ensure reversability an
6083 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6084
6085 static void
6086 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6087 {
6088 const aarch64_opcode *opcode;
6089
6090 /* Need to check if the destination is SP/ZR. The check has to be done
6091 before any aarch64_replace_opcode. */
6092 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6093 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6094
6095 instr->operands[1].imm.value = value;
6096 instr->operands[1].skip = 0;
6097
6098 if (try_mov_wide_p)
6099 {
6100 /* Try the MOVZ alias. */
6101 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6102 aarch64_replace_opcode (instr, opcode);
6103 if (aarch64_opcode_encode (instr->opcode, instr,
6104 &instr->value, NULL, NULL))
6105 {
6106 put_aarch64_insn (buf, instr->value);
6107 return;
6108 }
6109 /* Try the MOVK alias. */
6110 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6111 aarch64_replace_opcode (instr, opcode);
6112 if (aarch64_opcode_encode (instr->opcode, instr,
6113 &instr->value, NULL, NULL))
6114 {
6115 put_aarch64_insn (buf, instr->value);
6116 return;
6117 }
6118 }
6119
6120 if (try_mov_bitmask_p)
6121 {
6122 /* Try the ORR alias. */
6123 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6124 aarch64_replace_opcode (instr, opcode);
6125 if (aarch64_opcode_encode (instr->opcode, instr,
6126 &instr->value, NULL, NULL))
6127 {
6128 put_aarch64_insn (buf, instr->value);
6129 return;
6130 }
6131 }
6132
6133 as_bad_where (fixP->fx_file, fixP->fx_line,
6134 _("immediate cannot be moved by a single instruction"));
6135 }
6136
6137 /* An instruction operand which is immediate related may have symbol used
6138 in the assembly, e.g.
6139
6140 mov w0, u32
6141 .set u32, 0x00ffff00
6142
6143 At the time when the assembly instruction is parsed, a referenced symbol,
6144 like 'u32' in the above example may not have been seen; a fixS is created
6145 in such a case and is handled here after symbols have been resolved.
6146 Instruction is fixed up with VALUE using the information in *FIXP plus
6147 extra information in FLAGS.
6148
6149 This function is called by md_apply_fix to fix up instructions that need
6150 a fix-up described above but does not involve any linker-time relocation. */
6151
6152 static void
6153 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6154 {
6155 int idx;
6156 uint32_t insn;
6157 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6158 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6159 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6160
6161 if (new_inst)
6162 {
6163 /* Now the instruction is about to be fixed-up, so the operand that
6164 was previously marked as 'ignored' needs to be unmarked in order
6165 to get the encoding done properly. */
6166 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6167 new_inst->operands[idx].skip = 0;
6168 }
6169
6170 gas_assert (opnd != AARCH64_OPND_NIL);
6171
6172 switch (opnd)
6173 {
6174 case AARCH64_OPND_EXCEPTION:
6175 if (unsigned_overflow (value, 16))
6176 as_bad_where (fixP->fx_file, fixP->fx_line,
6177 _("immediate out of range"));
6178 insn = get_aarch64_insn (buf);
6179 insn |= encode_svc_imm (value);
6180 put_aarch64_insn (buf, insn);
6181 break;
6182
6183 case AARCH64_OPND_AIMM:
6184 /* ADD or SUB with immediate.
6185 NOTE this assumes we come here with a add/sub shifted reg encoding
6186 3 322|2222|2 2 2 21111 111111
6187 1 098|7654|3 2 1 09876 543210 98765 43210
6188 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6189 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6190 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6191 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6192 ->
6193 3 322|2222|2 2 221111111111
6194 1 098|7654|3 2 109876543210 98765 43210
6195 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6196 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6197 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6198 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6199 Fields sf Rn Rd are already set. */
6200 insn = get_aarch64_insn (buf);
6201 if (value < 0)
6202 {
6203 /* Add <-> sub. */
6204 insn = reencode_addsub_switch_add_sub (insn);
6205 value = -value;
6206 }
6207
6208 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6209 && unsigned_overflow (value, 12))
6210 {
6211 /* Try to shift the value by 12 to make it fit. */
6212 if (((value >> 12) << 12) == value
6213 && ! unsigned_overflow (value, 12 + 12))
6214 {
6215 value >>= 12;
6216 insn |= encode_addsub_imm_shift_amount (1);
6217 }
6218 }
6219
6220 if (unsigned_overflow (value, 12))
6221 as_bad_where (fixP->fx_file, fixP->fx_line,
6222 _("immediate out of range"));
6223
6224 insn |= encode_addsub_imm (value);
6225
6226 put_aarch64_insn (buf, insn);
6227 break;
6228
6229 case AARCH64_OPND_SIMD_IMM:
6230 case AARCH64_OPND_SIMD_IMM_SFT:
6231 case AARCH64_OPND_LIMM:
6232 /* Bit mask immediate. */
6233 gas_assert (new_inst != NULL);
6234 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6235 new_inst->operands[idx].imm.value = value;
6236 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6237 &new_inst->value, NULL, NULL))
6238 put_aarch64_insn (buf, new_inst->value);
6239 else
6240 as_bad_where (fixP->fx_file, fixP->fx_line,
6241 _("invalid immediate"));
6242 break;
6243
6244 case AARCH64_OPND_HALF:
6245 /* 16-bit unsigned immediate. */
6246 if (unsigned_overflow (value, 16))
6247 as_bad_where (fixP->fx_file, fixP->fx_line,
6248 _("immediate out of range"));
6249 insn = get_aarch64_insn (buf);
6250 insn |= encode_movw_imm (value & 0xffff);
6251 put_aarch64_insn (buf, insn);
6252 break;
6253
6254 case AARCH64_OPND_IMM_MOV:
6255 /* Operand for a generic move immediate instruction, which is
6256 an alias instruction that generates a single MOVZ, MOVN or ORR
6257 instruction to loads a 32-bit/64-bit immediate value into general
6258 register. An assembler error shall result if the immediate cannot be
6259 created by a single one of these instructions. If there is a choice,
6260 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6261 and MOVZ or MOVN to ORR. */
6262 gas_assert (new_inst != NULL);
6263 fix_mov_imm_insn (fixP, buf, new_inst, value);
6264 break;
6265
6266 case AARCH64_OPND_ADDR_SIMM7:
6267 case AARCH64_OPND_ADDR_SIMM9:
6268 case AARCH64_OPND_ADDR_SIMM9_2:
6269 case AARCH64_OPND_ADDR_UIMM12:
6270 /* Immediate offset in an address. */
6271 insn = get_aarch64_insn (buf);
6272
6273 gas_assert (new_inst != NULL && new_inst->value == insn);
6274 gas_assert (new_inst->opcode->operands[1] == opnd
6275 || new_inst->opcode->operands[2] == opnd);
6276
6277 /* Get the index of the address operand. */
6278 if (new_inst->opcode->operands[1] == opnd)
6279 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6280 idx = 1;
6281 else
6282 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6283 idx = 2;
6284
6285 /* Update the resolved offset value. */
6286 new_inst->operands[idx].addr.offset.imm = value;
6287
6288 /* Encode/fix-up. */
6289 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6290 &new_inst->value, NULL, NULL))
6291 {
6292 put_aarch64_insn (buf, new_inst->value);
6293 break;
6294 }
6295 else if (new_inst->opcode->iclass == ldst_pos
6296 && try_to_encode_as_unscaled_ldst (new_inst))
6297 {
6298 put_aarch64_insn (buf, new_inst->value);
6299 break;
6300 }
6301
6302 as_bad_where (fixP->fx_file, fixP->fx_line,
6303 _("immediate offset out of range"));
6304 break;
6305
6306 default:
6307 gas_assert (0);
6308 as_fatal (_("unhandled operand code %d"), opnd);
6309 }
6310 }
6311
6312 /* Apply a fixup (fixP) to segment data, once it has been determined
6313 by our caller that we have all the info we need to fix it up.
6314
6315 Parameter valP is the pointer to the value of the bits. */
6316
6317 void
6318 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6319 {
6320 offsetT value = *valP;
6321 uint32_t insn;
6322 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6323 int scale;
6324 unsigned flags = fixP->fx_addnumber;
6325
6326 DEBUG_TRACE ("\n\n");
6327 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6328 DEBUG_TRACE ("Enter md_apply_fix");
6329
6330 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6331
6332 /* Note whether this will delete the relocation. */
6333
6334 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6335 fixP->fx_done = 1;
6336
6337 /* Process the relocations. */
6338 switch (fixP->fx_r_type)
6339 {
6340 case BFD_RELOC_NONE:
6341 /* This will need to go in the object file. */
6342 fixP->fx_done = 0;
6343 break;
6344
6345 case BFD_RELOC_8:
6346 case BFD_RELOC_8_PCREL:
6347 if (fixP->fx_done || !seg->use_rela_p)
6348 md_number_to_chars (buf, value, 1);
6349 break;
6350
6351 case BFD_RELOC_16:
6352 case BFD_RELOC_16_PCREL:
6353 if (fixP->fx_done || !seg->use_rela_p)
6354 md_number_to_chars (buf, value, 2);
6355 break;
6356
6357 case BFD_RELOC_32:
6358 case BFD_RELOC_32_PCREL:
6359 if (fixP->fx_done || !seg->use_rela_p)
6360 md_number_to_chars (buf, value, 4);
6361 break;
6362
6363 case BFD_RELOC_64:
6364 case BFD_RELOC_64_PCREL:
6365 if (fixP->fx_done || !seg->use_rela_p)
6366 md_number_to_chars (buf, value, 8);
6367 break;
6368
6369 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6370 /* We claim that these fixups have been processed here, even if
6371 in fact we generate an error because we do not have a reloc
6372 for them, so tc_gen_reloc() will reject them. */
6373 fixP->fx_done = 1;
6374 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6375 {
6376 as_bad_where (fixP->fx_file, fixP->fx_line,
6377 _("undefined symbol %s used as an immediate value"),
6378 S_GET_NAME (fixP->fx_addsy));
6379 goto apply_fix_return;
6380 }
6381 fix_insn (fixP, flags, value);
6382 break;
6383
6384 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6385 if (fixP->fx_done || !seg->use_rela_p)
6386 {
6387 if (value & 3)
6388 as_bad_where (fixP->fx_file, fixP->fx_line,
6389 _("pc-relative load offset not word aligned"));
6390 if (signed_overflow (value, 21))
6391 as_bad_where (fixP->fx_file, fixP->fx_line,
6392 _("pc-relative load offset out of range"));
6393 insn = get_aarch64_insn (buf);
6394 insn |= encode_ld_lit_ofs_19 (value >> 2);
6395 put_aarch64_insn (buf, insn);
6396 }
6397 break;
6398
6399 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6400 if (fixP->fx_done || !seg->use_rela_p)
6401 {
6402 if (signed_overflow (value, 21))
6403 as_bad_where (fixP->fx_file, fixP->fx_line,
6404 _("pc-relative address offset out of range"));
6405 insn = get_aarch64_insn (buf);
6406 insn |= encode_adr_imm (value);
6407 put_aarch64_insn (buf, insn);
6408 }
6409 break;
6410
6411 case BFD_RELOC_AARCH64_BRANCH19:
6412 if (fixP->fx_done || !seg->use_rela_p)
6413 {
6414 if (value & 3)
6415 as_bad_where (fixP->fx_file, fixP->fx_line,
6416 _("conditional branch target not word aligned"));
6417 if (signed_overflow (value, 21))
6418 as_bad_where (fixP->fx_file, fixP->fx_line,
6419 _("conditional branch out of range"));
6420 insn = get_aarch64_insn (buf);
6421 insn |= encode_cond_branch_ofs_19 (value >> 2);
6422 put_aarch64_insn (buf, insn);
6423 }
6424 break;
6425
6426 case BFD_RELOC_AARCH64_TSTBR14:
6427 if (fixP->fx_done || !seg->use_rela_p)
6428 {
6429 if (value & 3)
6430 as_bad_where (fixP->fx_file, fixP->fx_line,
6431 _("conditional branch target not word aligned"));
6432 if (signed_overflow (value, 16))
6433 as_bad_where (fixP->fx_file, fixP->fx_line,
6434 _("conditional branch out of range"));
6435 insn = get_aarch64_insn (buf);
6436 insn |= encode_tst_branch_ofs_14 (value >> 2);
6437 put_aarch64_insn (buf, insn);
6438 }
6439 break;
6440
6441 case BFD_RELOC_AARCH64_JUMP26:
6442 case BFD_RELOC_AARCH64_CALL26:
6443 if (fixP->fx_done || !seg->use_rela_p)
6444 {
6445 if (value & 3)
6446 as_bad_where (fixP->fx_file, fixP->fx_line,
6447 _("branch target not word aligned"));
6448 if (signed_overflow (value, 28))
6449 as_bad_where (fixP->fx_file, fixP->fx_line,
6450 _("branch out of range"));
6451 insn = get_aarch64_insn (buf);
6452 insn |= encode_branch_ofs_26 (value >> 2);
6453 put_aarch64_insn (buf, insn);
6454 }
6455 break;
6456
6457 case BFD_RELOC_AARCH64_MOVW_G0:
6458 case BFD_RELOC_AARCH64_MOVW_G0_S:
6459 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6460 scale = 0;
6461 goto movw_common;
6462 case BFD_RELOC_AARCH64_MOVW_G1:
6463 case BFD_RELOC_AARCH64_MOVW_G1_S:
6464 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6465 scale = 16;
6466 goto movw_common;
6467 case BFD_RELOC_AARCH64_MOVW_G2:
6468 case BFD_RELOC_AARCH64_MOVW_G2_S:
6469 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6470 scale = 32;
6471 goto movw_common;
6472 case BFD_RELOC_AARCH64_MOVW_G3:
6473 scale = 48;
6474 movw_common:
6475 if (fixP->fx_done || !seg->use_rela_p)
6476 {
6477 insn = get_aarch64_insn (buf);
6478
6479 if (!fixP->fx_done)
6480 {
6481 /* REL signed addend must fit in 16 bits */
6482 if (signed_overflow (value, 16))
6483 as_bad_where (fixP->fx_file, fixP->fx_line,
6484 _("offset out of range"));
6485 }
6486 else
6487 {
6488 /* Check for overflow and scale. */
6489 switch (fixP->fx_r_type)
6490 {
6491 case BFD_RELOC_AARCH64_MOVW_G0:
6492 case BFD_RELOC_AARCH64_MOVW_G1:
6493 case BFD_RELOC_AARCH64_MOVW_G2:
6494 case BFD_RELOC_AARCH64_MOVW_G3:
6495 if (unsigned_overflow (value, scale + 16))
6496 as_bad_where (fixP->fx_file, fixP->fx_line,
6497 _("unsigned value out of range"));
6498 break;
6499 case BFD_RELOC_AARCH64_MOVW_G0_S:
6500 case BFD_RELOC_AARCH64_MOVW_G1_S:
6501 case BFD_RELOC_AARCH64_MOVW_G2_S:
6502 /* NOTE: We can only come here with movz or movn. */
6503 if (signed_overflow (value, scale + 16))
6504 as_bad_where (fixP->fx_file, fixP->fx_line,
6505 _("signed value out of range"));
6506 if (value < 0)
6507 {
6508 /* Force use of MOVN. */
6509 value = ~value;
6510 insn = reencode_movzn_to_movn (insn);
6511 }
6512 else
6513 {
6514 /* Force use of MOVZ. */
6515 insn = reencode_movzn_to_movz (insn);
6516 }
6517 break;
6518 default:
6519 /* Unchecked relocations. */
6520 break;
6521 }
6522 value >>= scale;
6523 }
6524
6525 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6526 insn |= encode_movw_imm (value & 0xffff);
6527
6528 put_aarch64_insn (buf, insn);
6529 }
6530 break;
6531
6532 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6533 fixP->fx_r_type = (ilp32_p
6534 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6535 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6536 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6537 /* Should always be exported to object file, see
6538 aarch64_force_relocation(). */
6539 gas_assert (!fixP->fx_done);
6540 gas_assert (seg->use_rela_p);
6541 break;
6542
6543 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6544 fixP->fx_r_type = (ilp32_p
6545 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6546 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6547 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6548 /* Should always be exported to object file, see
6549 aarch64_force_relocation(). */
6550 gas_assert (!fixP->fx_done);
6551 gas_assert (seg->use_rela_p);
6552 break;
6553
6554 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6555 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6556 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6557 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6558 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6559 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6560 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6561 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6562 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6563 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6564 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6565 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6566 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6567 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6568 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6569 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6570 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6571 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6572 /* Should always be exported to object file, see
6573 aarch64_force_relocation(). */
6574 gas_assert (!fixP->fx_done);
6575 gas_assert (seg->use_rela_p);
6576 break;
6577
6578 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6579 /* Should always be exported to object file, see
6580 aarch64_force_relocation(). */
6581 fixP->fx_r_type = (ilp32_p
6582 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6583 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6584 gas_assert (!fixP->fx_done);
6585 gas_assert (seg->use_rela_p);
6586 break;
6587
6588 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6589 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6590 case BFD_RELOC_AARCH64_ADD_LO12:
6591 case BFD_RELOC_AARCH64_LDST8_LO12:
6592 case BFD_RELOC_AARCH64_LDST16_LO12:
6593 case BFD_RELOC_AARCH64_LDST32_LO12:
6594 case BFD_RELOC_AARCH64_LDST64_LO12:
6595 case BFD_RELOC_AARCH64_LDST128_LO12:
6596 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6597 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6598 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6599 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6600 /* Should always be exported to object file, see
6601 aarch64_force_relocation(). */
6602 gas_assert (!fixP->fx_done);
6603 gas_assert (seg->use_rela_p);
6604 break;
6605
6606 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6607 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6608 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6609 break;
6610
6611 default:
6612 as_bad_where (fixP->fx_file, fixP->fx_line,
6613 _("unexpected %s fixup"),
6614 bfd_get_reloc_code_name (fixP->fx_r_type));
6615 break;
6616 }
6617
6618 apply_fix_return:
6619 /* Free the allocated the struct aarch64_inst.
6620 N.B. currently there are very limited number of fix-up types actually use
6621 this field, so the impact on the performance should be minimal . */
6622 if (fixP->tc_fix_data.inst != NULL)
6623 free (fixP->tc_fix_data.inst);
6624
6625 return;
6626 }
6627
6628 /* Translate internal representation of relocation info to BFD target
6629 format. */
6630
6631 arelent *
6632 tc_gen_reloc (asection * section, fixS * fixp)
6633 {
6634 arelent *reloc;
6635 bfd_reloc_code_real_type code;
6636
6637 reloc = xmalloc (sizeof (arelent));
6638
6639 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6640 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6641 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6642
6643 if (fixp->fx_pcrel)
6644 {
6645 if (section->use_rela_p)
6646 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6647 else
6648 fixp->fx_offset = reloc->address;
6649 }
6650 reloc->addend = fixp->fx_offset;
6651
6652 code = fixp->fx_r_type;
6653 switch (code)
6654 {
6655 case BFD_RELOC_16:
6656 if (fixp->fx_pcrel)
6657 code = BFD_RELOC_16_PCREL;
6658 break;
6659
6660 case BFD_RELOC_32:
6661 if (fixp->fx_pcrel)
6662 code = BFD_RELOC_32_PCREL;
6663 break;
6664
6665 case BFD_RELOC_64:
6666 if (fixp->fx_pcrel)
6667 code = BFD_RELOC_64_PCREL;
6668 break;
6669
6670 default:
6671 break;
6672 }
6673
6674 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6675 if (reloc->howto == NULL)
6676 {
6677 as_bad_where (fixp->fx_file, fixp->fx_line,
6678 _
6679 ("cannot represent %s relocation in this object file format"),
6680 bfd_get_reloc_code_name (code));
6681 return NULL;
6682 }
6683
6684 return reloc;
6685 }
6686
6687 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6688
6689 void
6690 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6691 {
6692 bfd_reloc_code_real_type type;
6693 int pcrel = 0;
6694
6695 /* Pick a reloc.
6696 FIXME: @@ Should look at CPU word size. */
6697 switch (size)
6698 {
6699 case 1:
6700 type = BFD_RELOC_8;
6701 break;
6702 case 2:
6703 type = BFD_RELOC_16;
6704 break;
6705 case 4:
6706 type = BFD_RELOC_32;
6707 break;
6708 case 8:
6709 type = BFD_RELOC_64;
6710 break;
6711 default:
6712 as_bad (_("cannot do %u-byte relocation"), size);
6713 type = BFD_RELOC_UNUSED;
6714 break;
6715 }
6716
6717 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6718 }
6719
6720 int
6721 aarch64_force_relocation (struct fix *fixp)
6722 {
6723 switch (fixp->fx_r_type)
6724 {
6725 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6726 /* Perform these "immediate" internal relocations
6727 even if the symbol is extern or weak. */
6728 return 0;
6729
6730 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6731 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6732 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6733 /* Pseudo relocs that need to be fixed up according to
6734 ilp32_p. */
6735 return 0;
6736
6737 case BFD_RELOC_AARCH64_ADD_LO12:
6738 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6739 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6740 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6741 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6742 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6743 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6744 case BFD_RELOC_AARCH64_LDST128_LO12:
6745 case BFD_RELOC_AARCH64_LDST16_LO12:
6746 case BFD_RELOC_AARCH64_LDST32_LO12:
6747 case BFD_RELOC_AARCH64_LDST64_LO12:
6748 case BFD_RELOC_AARCH64_LDST8_LO12:
6749 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6750 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6751 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6752 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6753 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6754 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6755 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6756 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6757 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6758 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6759 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6760 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6761 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6762 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6763 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6764 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6765 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6766 /* Always leave these relocations for the linker. */
6767 return 1;
6768
6769 default:
6770 break;
6771 }
6772
6773 return generic_force_reloc (fixp);
6774 }
6775
6776 #ifdef OBJ_ELF
6777
6778 const char *
6779 elf64_aarch64_target_format (void)
6780 {
6781 if (target_big_endian)
6782 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6783 else
6784 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6785 }
6786
6787 void
6788 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6789 {
6790 elf_frob_symbol (symp, puntp);
6791 }
6792 #endif
6793
6794 /* MD interface: Finalization. */
6795
6796 /* A good place to do this, although this was probably not intended
6797 for this kind of use. We need to dump the literal pool before
6798 references are made to a null symbol pointer. */
6799
6800 void
6801 aarch64_cleanup (void)
6802 {
6803 literal_pool *pool;
6804
6805 for (pool = list_of_pools; pool; pool = pool->next)
6806 {
6807 /* Put it at the end of the relevant section. */
6808 subseg_set (pool->section, pool->sub_section);
6809 s_ltorg (0);
6810 }
6811 }
6812
6813 #ifdef OBJ_ELF
6814 /* Remove any excess mapping symbols generated for alignment frags in
6815 SEC. We may have created a mapping symbol before a zero byte
6816 alignment; remove it if there's a mapping symbol after the
6817 alignment. */
6818 static void
6819 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6820 void *dummy ATTRIBUTE_UNUSED)
6821 {
6822 segment_info_type *seginfo = seg_info (sec);
6823 fragS *fragp;
6824
6825 if (seginfo == NULL || seginfo->frchainP == NULL)
6826 return;
6827
6828 for (fragp = seginfo->frchainP->frch_root;
6829 fragp != NULL; fragp = fragp->fr_next)
6830 {
6831 symbolS *sym = fragp->tc_frag_data.last_map;
6832 fragS *next = fragp->fr_next;
6833
6834 /* Variable-sized frags have been converted to fixed size by
6835 this point. But if this was variable-sized to start with,
6836 there will be a fixed-size frag after it. So don't handle
6837 next == NULL. */
6838 if (sym == NULL || next == NULL)
6839 continue;
6840
6841 if (S_GET_VALUE (sym) < next->fr_address)
6842 /* Not at the end of this frag. */
6843 continue;
6844 know (S_GET_VALUE (sym) == next->fr_address);
6845
6846 do
6847 {
6848 if (next->tc_frag_data.first_map != NULL)
6849 {
6850 /* Next frag starts with a mapping symbol. Discard this
6851 one. */
6852 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6853 break;
6854 }
6855
6856 if (next->fr_next == NULL)
6857 {
6858 /* This mapping symbol is at the end of the section. Discard
6859 it. */
6860 know (next->fr_fix == 0 && next->fr_var == 0);
6861 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6862 break;
6863 }
6864
6865 /* As long as we have empty frags without any mapping symbols,
6866 keep looking. */
6867 /* If the next frag is non-empty and does not start with a
6868 mapping symbol, then this mapping symbol is required. */
6869 if (next->fr_address != next->fr_next->fr_address)
6870 break;
6871
6872 next = next->fr_next;
6873 }
6874 while (next != NULL);
6875 }
6876 }
6877 #endif
6878
6879 /* Adjust the symbol table. */
6880
6881 void
6882 aarch64_adjust_symtab (void)
6883 {
6884 #ifdef OBJ_ELF
6885 /* Remove any overlapping mapping symbols generated by alignment frags. */
6886 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6887 /* Now do generic ELF adjustments. */
6888 elf_adjust_symtab ();
6889 #endif
6890 }
6891
6892 static void
6893 checked_hash_insert (struct hash_control *table, const char *key, void *value)
6894 {
6895 const char *hash_err;
6896
6897 hash_err = hash_insert (table, key, value);
6898 if (hash_err)
6899 printf ("Internal Error: Can't hash %s\n", key);
6900 }
6901
6902 static void
6903 fill_instruction_hash_table (void)
6904 {
6905 aarch64_opcode *opcode = aarch64_opcode_table;
6906
6907 while (opcode->name != NULL)
6908 {
6909 templates *templ, *new_templ;
6910 templ = hash_find (aarch64_ops_hsh, opcode->name);
6911
6912 new_templ = (templates *) xmalloc (sizeof (templates));
6913 new_templ->opcode = opcode;
6914 new_templ->next = NULL;
6915
6916 if (!templ)
6917 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6918 else
6919 {
6920 new_templ->next = templ->next;
6921 templ->next = new_templ;
6922 }
6923 ++opcode;
6924 }
6925 }
6926
6927 static inline void
6928 convert_to_upper (char *dst, const char *src, size_t num)
6929 {
6930 unsigned int i;
6931 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6932 *dst = TOUPPER (*src);
6933 *dst = '\0';
6934 }
6935
6936 /* Assume STR point to a lower-case string, allocate, convert and return
6937 the corresponding upper-case string. */
6938 static inline const char*
6939 get_upper_str (const char *str)
6940 {
6941 char *ret;
6942 size_t len = strlen (str);
6943 if ((ret = xmalloc (len + 1)) == NULL)
6944 abort ();
6945 convert_to_upper (ret, str, len);
6946 return ret;
6947 }
6948
6949 /* MD interface: Initialization. */
6950
6951 void
6952 md_begin (void)
6953 {
6954 unsigned mach;
6955 unsigned int i;
6956
6957 if ((aarch64_ops_hsh = hash_new ()) == NULL
6958 || (aarch64_cond_hsh = hash_new ()) == NULL
6959 || (aarch64_shift_hsh = hash_new ()) == NULL
6960 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
6961 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
6962 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
6963 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
6964 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
6965 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
6966 || (aarch64_reg_hsh = hash_new ()) == NULL
6967 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
6968 || (aarch64_nzcv_hsh = hash_new ()) == NULL
6969 || (aarch64_pldop_hsh = hash_new ()) == NULL)
6970 as_fatal (_("virtual memory exhausted"));
6971
6972 fill_instruction_hash_table ();
6973
6974 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
6975 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
6976 (void *) (aarch64_sys_regs + i));
6977
6978 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
6979 checked_hash_insert (aarch64_pstatefield_hsh,
6980 aarch64_pstatefields[i].name,
6981 (void *) (aarch64_pstatefields + i));
6982
6983 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
6984 checked_hash_insert (aarch64_sys_regs_ic_hsh,
6985 aarch64_sys_regs_ic[i].template,
6986 (void *) (aarch64_sys_regs_ic + i));
6987
6988 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
6989 checked_hash_insert (aarch64_sys_regs_dc_hsh,
6990 aarch64_sys_regs_dc[i].template,
6991 (void *) (aarch64_sys_regs_dc + i));
6992
6993 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
6994 checked_hash_insert (aarch64_sys_regs_at_hsh,
6995 aarch64_sys_regs_at[i].template,
6996 (void *) (aarch64_sys_regs_at + i));
6997
6998 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
6999 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7000 aarch64_sys_regs_tlbi[i].template,
7001 (void *) (aarch64_sys_regs_tlbi + i));
7002
7003 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7004 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7005 (void *) (reg_names + i));
7006
7007 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7008 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7009 (void *) (nzcv_names + i));
7010
7011 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7012 {
7013 const char *name = aarch64_operand_modifiers[i].name;
7014 checked_hash_insert (aarch64_shift_hsh, name,
7015 (void *) (aarch64_operand_modifiers + i));
7016 /* Also hash the name in the upper case. */
7017 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7018 (void *) (aarch64_operand_modifiers + i));
7019 }
7020
7021 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7022 {
7023 unsigned int j;
7024 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7025 the same condition code. */
7026 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7027 {
7028 const char *name = aarch64_conds[i].names[j];
7029 if (name == NULL)
7030 break;
7031 checked_hash_insert (aarch64_cond_hsh, name,
7032 (void *) (aarch64_conds + i));
7033 /* Also hash the name in the upper case. */
7034 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7035 (void *) (aarch64_conds + i));
7036 }
7037 }
7038
7039 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7040 {
7041 const char *name = aarch64_barrier_options[i].name;
7042 /* Skip xx00 - the unallocated values of option. */
7043 if ((i & 0x3) == 0)
7044 continue;
7045 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7046 (void *) (aarch64_barrier_options + i));
7047 /* Also hash the name in the upper case. */
7048 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7049 (void *) (aarch64_barrier_options + i));
7050 }
7051
7052 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7053 {
7054 const char* name = aarch64_prfops[i].name;
7055 /* Skip the unallocated hint encodings. */
7056 if (name == NULL)
7057 continue;
7058 checked_hash_insert (aarch64_pldop_hsh, name,
7059 (void *) (aarch64_prfops + i));
7060 /* Also hash the name in the upper case. */
7061 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7062 (void *) (aarch64_prfops + i));
7063 }
7064
7065 /* Set the cpu variant based on the command-line options. */
7066 if (!mcpu_cpu_opt)
7067 mcpu_cpu_opt = march_cpu_opt;
7068
7069 if (!mcpu_cpu_opt)
7070 mcpu_cpu_opt = &cpu_default;
7071
7072 cpu_variant = *mcpu_cpu_opt;
7073
7074 /* Record the CPU type. */
7075 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7076
7077 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7078 }
7079
7080 /* Command line processing. */
7081
7082 const char *md_shortopts = "m:";
7083
7084 #ifdef AARCH64_BI_ENDIAN
7085 #define OPTION_EB (OPTION_MD_BASE + 0)
7086 #define OPTION_EL (OPTION_MD_BASE + 1)
7087 #else
7088 #if TARGET_BYTES_BIG_ENDIAN
7089 #define OPTION_EB (OPTION_MD_BASE + 0)
7090 #else
7091 #define OPTION_EL (OPTION_MD_BASE + 1)
7092 #endif
7093 #endif
7094
7095 struct option md_longopts[] = {
7096 #ifdef OPTION_EB
7097 {"EB", no_argument, NULL, OPTION_EB},
7098 #endif
7099 #ifdef OPTION_EL
7100 {"EL", no_argument, NULL, OPTION_EL},
7101 #endif
7102 {NULL, no_argument, NULL, 0}
7103 };
7104
7105 size_t md_longopts_size = sizeof (md_longopts);
7106
7107 struct aarch64_option_table
7108 {
7109 char *option; /* Option name to match. */
7110 char *help; /* Help information. */
7111 int *var; /* Variable to change. */
7112 int value; /* What to change it to. */
7113 char *deprecated; /* If non-null, print this message. */
7114 };
7115
7116 static struct aarch64_option_table aarch64_opts[] = {
7117 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7118 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7119 NULL},
7120 #ifdef DEBUG_AARCH64
7121 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7122 #endif /* DEBUG_AARCH64 */
7123 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7124 NULL},
7125 {NULL, NULL, NULL, 0, NULL}
7126 };
7127
7128 struct aarch64_cpu_option_table
7129 {
7130 char *name;
7131 const aarch64_feature_set value;
7132 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7133 case. */
7134 const char *canonical_name;
7135 };
7136
7137 /* This list should, at a minimum, contain all the cpu names
7138 recognized by GCC. */
7139 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7140 {"all", AARCH64_ANY, NULL},
7141 {"cortex-a53", AARCH64_ARCH_V8, "Cortex-A53"},
7142 {"cortex-a57", AARCH64_ARCH_V8, "Cortex-A57"},
7143 {"generic", AARCH64_ARCH_V8, NULL},
7144
7145 /* These two are example CPUs supported in GCC, once we have real
7146 CPUs they will be removed. */
7147 {"example-1", AARCH64_ARCH_V8, NULL},
7148 {"example-2", AARCH64_ARCH_V8, NULL},
7149
7150 {NULL, AARCH64_ARCH_NONE, NULL}
7151 };
7152
7153 struct aarch64_arch_option_table
7154 {
7155 char *name;
7156 const aarch64_feature_set value;
7157 };
7158
7159 /* This list should, at a minimum, contain all the architecture names
7160 recognized by GCC. */
7161 static const struct aarch64_arch_option_table aarch64_archs[] = {
7162 {"all", AARCH64_ANY},
7163 {"armv8-a", AARCH64_ARCH_V8},
7164 {NULL, AARCH64_ARCH_NONE}
7165 };
7166
7167 /* ISA extensions. */
7168 struct aarch64_option_cpu_value_table
7169 {
7170 char *name;
7171 const aarch64_feature_set value;
7172 };
7173
7174 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7175 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7176 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7177 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7178 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7179 {NULL, AARCH64_ARCH_NONE}
7180 };
7181
7182 struct aarch64_long_option_table
7183 {
7184 char *option; /* Substring to match. */
7185 char *help; /* Help information. */
7186 int (*func) (char *subopt); /* Function to decode sub-option. */
7187 char *deprecated; /* If non-null, print this message. */
7188 };
7189
7190 static int
7191 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7192 {
7193 /* We insist on extensions being added before being removed. We achieve
7194 this by using the ADDING_VALUE variable to indicate whether we are
7195 adding an extension (1) or removing it (0) and only allowing it to
7196 change in the order -1 -> 1 -> 0. */
7197 int adding_value = -1;
7198 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7199
7200 /* Copy the feature set, so that we can modify it. */
7201 *ext_set = **opt_p;
7202 *opt_p = ext_set;
7203
7204 while (str != NULL && *str != 0)
7205 {
7206 const struct aarch64_option_cpu_value_table *opt;
7207 char *ext;
7208 int optlen;
7209
7210 if (*str != '+')
7211 {
7212 as_bad (_("invalid architectural extension"));
7213 return 0;
7214 }
7215
7216 str++;
7217 ext = strchr (str, '+');
7218
7219 if (ext != NULL)
7220 optlen = ext - str;
7221 else
7222 optlen = strlen (str);
7223
7224 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7225 {
7226 if (adding_value != 0)
7227 adding_value = 0;
7228 optlen -= 2;
7229 str += 2;
7230 }
7231 else if (optlen > 0)
7232 {
7233 if (adding_value == -1)
7234 adding_value = 1;
7235 else if (adding_value != 1)
7236 {
7237 as_bad (_("must specify extensions to add before specifying "
7238 "those to remove"));
7239 return FALSE;
7240 }
7241 }
7242
7243 if (optlen == 0)
7244 {
7245 as_bad (_("missing architectural extension"));
7246 return 0;
7247 }
7248
7249 gas_assert (adding_value != -1);
7250
7251 for (opt = aarch64_features; opt->name != NULL; opt++)
7252 if (strncmp (opt->name, str, optlen) == 0)
7253 {
7254 /* Add or remove the extension. */
7255 if (adding_value)
7256 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7257 else
7258 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7259 break;
7260 }
7261
7262 if (opt->name == NULL)
7263 {
7264 as_bad (_("unknown architectural extension `%s'"), str);
7265 return 0;
7266 }
7267
7268 str = ext;
7269 };
7270
7271 return 1;
7272 }
7273
7274 static int
7275 aarch64_parse_cpu (char *str)
7276 {
7277 const struct aarch64_cpu_option_table *opt;
7278 char *ext = strchr (str, '+');
7279 size_t optlen;
7280
7281 if (ext != NULL)
7282 optlen = ext - str;
7283 else
7284 optlen = strlen (str);
7285
7286 if (optlen == 0)
7287 {
7288 as_bad (_("missing cpu name `%s'"), str);
7289 return 0;
7290 }
7291
7292 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7293 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7294 {
7295 mcpu_cpu_opt = &opt->value;
7296 if (ext != NULL)
7297 return aarch64_parse_features (ext, &mcpu_cpu_opt);
7298
7299 return 1;
7300 }
7301
7302 as_bad (_("unknown cpu `%s'"), str);
7303 return 0;
7304 }
7305
7306 static int
7307 aarch64_parse_arch (char *str)
7308 {
7309 const struct aarch64_arch_option_table *opt;
7310 char *ext = strchr (str, '+');
7311 size_t optlen;
7312
7313 if (ext != NULL)
7314 optlen = ext - str;
7315 else
7316 optlen = strlen (str);
7317
7318 if (optlen == 0)
7319 {
7320 as_bad (_("missing architecture name `%s'"), str);
7321 return 0;
7322 }
7323
7324 for (opt = aarch64_archs; opt->name != NULL; opt++)
7325 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7326 {
7327 march_cpu_opt = &opt->value;
7328 if (ext != NULL)
7329 return aarch64_parse_features (ext, &march_cpu_opt);
7330
7331 return 1;
7332 }
7333
7334 as_bad (_("unknown architecture `%s'\n"), str);
7335 return 0;
7336 }
7337
7338 /* ABIs. */
7339 struct aarch64_option_abi_value_table
7340 {
7341 char *name;
7342 enum aarch64_abi_type value;
7343 };
7344
7345 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7346 {"ilp32", AARCH64_ABI_ILP32},
7347 {"lp64", AARCH64_ABI_LP64},
7348 {NULL, 0}
7349 };
7350
7351 static int
7352 aarch64_parse_abi (char *str)
7353 {
7354 const struct aarch64_option_abi_value_table *opt;
7355 size_t optlen = strlen (str);
7356
7357 if (optlen == 0)
7358 {
7359 as_bad (_("missing abi name `%s'"), str);
7360 return 0;
7361 }
7362
7363 for (opt = aarch64_abis; opt->name != NULL; opt++)
7364 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7365 {
7366 aarch64_abi = opt->value;
7367 return 1;
7368 }
7369
7370 as_bad (_("unknown abi `%s'\n"), str);
7371 return 0;
7372 }
7373
7374 static struct aarch64_long_option_table aarch64_long_opts[] = {
7375 #ifdef OBJ_ELF
7376 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7377 aarch64_parse_abi, NULL},
7378 #endif /* OBJ_ELF */
7379 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7380 aarch64_parse_cpu, NULL},
7381 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7382 aarch64_parse_arch, NULL},
7383 {NULL, NULL, 0, NULL}
7384 };
7385
7386 int
7387 md_parse_option (int c, char *arg)
7388 {
7389 struct aarch64_option_table *opt;
7390 struct aarch64_long_option_table *lopt;
7391
7392 switch (c)
7393 {
7394 #ifdef OPTION_EB
7395 case OPTION_EB:
7396 target_big_endian = 1;
7397 break;
7398 #endif
7399
7400 #ifdef OPTION_EL
7401 case OPTION_EL:
7402 target_big_endian = 0;
7403 break;
7404 #endif
7405
7406 case 'a':
7407 /* Listing option. Just ignore these, we don't support additional
7408 ones. */
7409 return 0;
7410
7411 default:
7412 for (opt = aarch64_opts; opt->option != NULL; opt++)
7413 {
7414 if (c == opt->option[0]
7415 && ((arg == NULL && opt->option[1] == 0)
7416 || streq (arg, opt->option + 1)))
7417 {
7418 /* If the option is deprecated, tell the user. */
7419 if (opt->deprecated != NULL)
7420 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7421 arg ? arg : "", _(opt->deprecated));
7422
7423 if (opt->var != NULL)
7424 *opt->var = opt->value;
7425
7426 return 1;
7427 }
7428 }
7429
7430 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7431 {
7432 /* These options are expected to have an argument. */
7433 if (c == lopt->option[0]
7434 && arg != NULL
7435 && strncmp (arg, lopt->option + 1,
7436 strlen (lopt->option + 1)) == 0)
7437 {
7438 /* If the option is deprecated, tell the user. */
7439 if (lopt->deprecated != NULL)
7440 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7441 _(lopt->deprecated));
7442
7443 /* Call the sup-option parser. */
7444 return lopt->func (arg + strlen (lopt->option) - 1);
7445 }
7446 }
7447
7448 return 0;
7449 }
7450
7451 return 1;
7452 }
7453
7454 void
7455 md_show_usage (FILE * fp)
7456 {
7457 struct aarch64_option_table *opt;
7458 struct aarch64_long_option_table *lopt;
7459
7460 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7461
7462 for (opt = aarch64_opts; opt->option != NULL; opt++)
7463 if (opt->help != NULL)
7464 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7465
7466 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7467 if (lopt->help != NULL)
7468 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7469
7470 #ifdef OPTION_EB
7471 fprintf (fp, _("\
7472 -EB assemble code for a big-endian cpu\n"));
7473 #endif
7474
7475 #ifdef OPTION_EL
7476 fprintf (fp, _("\
7477 -EL assemble code for a little-endian cpu\n"));
7478 #endif
7479 }
7480
7481 /* Parse a .cpu directive. */
7482
7483 static void
7484 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7485 {
7486 const struct aarch64_cpu_option_table *opt;
7487 char saved_char;
7488 char *name;
7489 char *ext;
7490 size_t optlen;
7491
7492 name = input_line_pointer;
7493 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7494 input_line_pointer++;
7495 saved_char = *input_line_pointer;
7496 *input_line_pointer = 0;
7497
7498 ext = strchr (name, '+');
7499
7500 if (ext != NULL)
7501 optlen = ext - name;
7502 else
7503 optlen = strlen (name);
7504
7505 /* Skip the first "all" entry. */
7506 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7507 if (strlen (opt->name) == optlen
7508 && strncmp (name, opt->name, optlen) == 0)
7509 {
7510 mcpu_cpu_opt = &opt->value;
7511 if (ext != NULL)
7512 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7513 return;
7514
7515 cpu_variant = *mcpu_cpu_opt;
7516
7517 *input_line_pointer = saved_char;
7518 demand_empty_rest_of_line ();
7519 return;
7520 }
7521 as_bad (_("unknown cpu `%s'"), name);
7522 *input_line_pointer = saved_char;
7523 ignore_rest_of_line ();
7524 }
7525
7526
7527 /* Parse a .arch directive. */
7528
7529 static void
7530 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7531 {
7532 const struct aarch64_arch_option_table *opt;
7533 char saved_char;
7534 char *name;
7535 char *ext;
7536 size_t optlen;
7537
7538 name = input_line_pointer;
7539 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7540 input_line_pointer++;
7541 saved_char = *input_line_pointer;
7542 *input_line_pointer = 0;
7543
7544 ext = strchr (name, '+');
7545
7546 if (ext != NULL)
7547 optlen = ext - name;
7548 else
7549 optlen = strlen (name);
7550
7551 /* Skip the first "all" entry. */
7552 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7553 if (strlen (opt->name) == optlen
7554 && strncmp (name, opt->name, optlen) == 0)
7555 {
7556 mcpu_cpu_opt = &opt->value;
7557 if (ext != NULL)
7558 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7559 return;
7560
7561 cpu_variant = *mcpu_cpu_opt;
7562
7563 *input_line_pointer = saved_char;
7564 demand_empty_rest_of_line ();
7565 return;
7566 }
7567
7568 as_bad (_("unknown architecture `%s'\n"), name);
7569 *input_line_pointer = saved_char;
7570 ignore_rest_of_line ();
7571 }
7572
7573 /* Copy symbol information. */
7574
7575 void
7576 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7577 {
7578 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7579 }