8d5cc5194de17fd8ed2b6ea3e38ce4810ecefed3
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame stack trace info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* Bits for DEFINED field in vector_type_el. */
118 #define NTA_HASTYPE 1
119 #define NTA_HASINDEX 2
120 #define NTA_HASVARWIDTH 4
121
122 struct vector_type_el
123 {
124 enum vector_el_type type;
125 unsigned char defined;
126 unsigned element_size;
127 unsigned width;
128 int64_t index;
129 };
130
131 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
132
133 struct reloc
134 {
135 bfd_reloc_code_real_type type;
136 expressionS exp;
137 int pc_rel;
138 enum aarch64_opnd opnd;
139 uint32_t flags;
140 unsigned need_libopcodes_p : 1;
141 };
142
143 struct aarch64_instruction
144 {
145 /* libopcodes structure for instruction intermediate representation. */
146 aarch64_inst base;
147 /* Record assembly errors found during the parsing. */
148 aarch64_operand_error parsing_error;
149 /* The condition that appears in the assembly line. */
150 int cond;
151 /* Relocation information (including the GAS internal fixup). */
152 struct reloc reloc;
153 /* Need to generate an immediate in the literal pool. */
154 unsigned gen_lit_pool : 1;
155 };
156
157 typedef struct aarch64_instruction aarch64_instruction;
158
159 static aarch64_instruction inst;
160
161 static bool parse_operands (char *, const aarch64_opcode *);
162 static bool programmer_friendly_fixup (aarch64_instruction *);
163
164 /* If an AARCH64_OPDE_SYNTAX_ERROR has no error string, its first three
165 data fields contain the following information:
166
167 data[0].i:
168 A mask of register types that would have been acceptable as bare
169 operands, outside of a register list. In addition, SEF_DEFAULT_ERROR
170 is set if a general parsing error occured for an operand (that is,
171 an error not related to registers, and having no error string).
172
173 data[1].i:
174 A mask of register types that would have been acceptable inside
175 a register list. In addition, SEF_IN_REGLIST is set if the
176 operand contained a '{' and if we got to the point of trying
177 to parse a register inside a list.
178
179 data[2].i:
180 The mask associated with the register that was actually seen, or 0
181 if none. A nonzero value describes a register inside a register
182 list if data[1].i & SEF_IN_REGLIST, otherwise it describes a bare
183 register.
184
185 The idea is that stringless errors from multiple opcode templates can
186 be ORed together to give a summary of the available alternatives. */
187 #define SEF_DEFAULT_ERROR (1U << 31)
188 #define SEF_IN_REGLIST (1U << 31)
189
190 /* Diagnostics inline function utilities.
191
192 These are lightweight utilities which should only be called by parse_operands
193 and other parsers. GAS processes each assembly line by parsing it against
194 instruction template(s), in the case of multiple templates (for the same
195 mnemonic name), those templates are tried one by one until one succeeds or
196 all fail. An assembly line may fail a few templates before being
197 successfully parsed; an error saved here in most cases is not a user error
198 but an error indicating the current template is not the right template.
199 Therefore it is very important that errors can be saved at a low cost during
200 the parsing; we don't want to slow down the whole parsing by recording
201 non-user errors in detail.
202
203 Remember that the objective is to help GAS pick up the most appropriate
204 error message in the case of multiple templates, e.g. FMOV which has 8
205 templates. */
206
207 static inline void
208 clear_error (void)
209 {
210 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
211 inst.parsing_error.kind = AARCH64_OPDE_NIL;
212 }
213
214 static inline bool
215 error_p (void)
216 {
217 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
218 }
219
220 static inline void
221 set_error (enum aarch64_operand_error_kind kind, const char *error)
222 {
223 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
224 inst.parsing_error.index = -1;
225 inst.parsing_error.kind = kind;
226 inst.parsing_error.error = error;
227 }
228
229 static inline void
230 set_recoverable_error (const char *error)
231 {
232 set_error (AARCH64_OPDE_RECOVERABLE, error);
233 }
234
235 /* Use the DESC field of the corresponding aarch64_operand entry to compose
236 the error message. */
237 static inline void
238 set_default_error (void)
239 {
240 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
241 inst.parsing_error.data[0].i = SEF_DEFAULT_ERROR;
242 }
243
244 static inline void
245 set_expected_error (unsigned int flags)
246 {
247 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
248 inst.parsing_error.data[0].i = flags;
249 }
250
251 static inline void
252 set_syntax_error (const char *error)
253 {
254 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
255 }
256
257 static inline void
258 set_first_syntax_error (const char *error)
259 {
260 if (! error_p ())
261 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
262 }
263
264 static inline void
265 set_fatal_syntax_error (const char *error)
266 {
267 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
268 }
269 \f
270 /* Return value for certain parsers when the parsing fails; those parsers
271 return the information of the parsed result, e.g. register number, on
272 success. */
273 #define PARSE_FAIL -1
274
275 /* This is an invalid condition code that means no conditional field is
276 present. */
277 #define COND_ALWAYS 0x10
278
279 typedef struct
280 {
281 const char *template;
282 uint32_t value;
283 } asm_nzcv;
284
285 struct reloc_entry
286 {
287 char *name;
288 bfd_reloc_code_real_type reloc;
289 };
290
291 /* Macros to define the register types and masks for the purpose
292 of parsing. */
293
294 #undef AARCH64_REG_TYPES
295 #define AARCH64_REG_TYPES \
296 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
297 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
298 BASIC_REG_TYPE(SP_32) /* wsp */ \
299 BASIC_REG_TYPE(SP_64) /* sp */ \
300 BASIC_REG_TYPE(ZR_32) /* wzr */ \
301 BASIC_REG_TYPE(ZR_64) /* xzr */ \
302 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
303 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
304 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
305 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
306 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
307 BASIC_REG_TYPE(V) /* v[0-31] */ \
308 BASIC_REG_TYPE(Z) /* z[0-31] */ \
309 BASIC_REG_TYPE(P) /* p[0-15] */ \
310 BASIC_REG_TYPE(PN) /* pn[0-15] */ \
311 BASIC_REG_TYPE(ZA) /* za */ \
312 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
313 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
314 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
315 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
316 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
317 /* Typecheck: same, plus SVE registers. */ \
318 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
319 | REG_TYPE(Z)) \
320 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
321 MULTI_REG_TYPE(R_ZR, REG_TYPE(R_32) | REG_TYPE(R_64) \
322 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
323 /* Typecheck: same, plus SVE registers. */ \
324 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
325 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) \
326 | REG_TYPE(Z)) \
327 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
328 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
329 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
330 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
331 MULTI_REG_TYPE(R_ZR_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
332 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
333 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
334 /* Typecheck: any [BHSDQ]P FP. */ \
335 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
336 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
337 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
338 MULTI_REG_TYPE(R_ZR_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
339 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
340 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
341 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
342 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
343 be used for SVE instructions, since Zn and Pn are valid symbols \
344 in other contexts. */ \
345 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
346 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
347 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
348 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
349 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
350 | REG_TYPE(Z) | REG_TYPE(P)) \
351 /* Any integer register; used for error messages only. */ \
352 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
353 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
354 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
355 /* Any vector register. */ \
356 MULTI_REG_TYPE(VZ, REG_TYPE(V) | REG_TYPE(Z)) \
357 /* An SVE vector or predicate register. */ \
358 MULTI_REG_TYPE(ZP, REG_TYPE(Z) | REG_TYPE(P)) \
359 /* Any vector or predicate register. */ \
360 MULTI_REG_TYPE(VZP, REG_TYPE(V) | REG_TYPE(Z) | REG_TYPE(P)) \
361 /* The whole of ZA or a single tile. */ \
362 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
363 /* A horizontal or vertical slice of a ZA tile. */ \
364 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
365 /* Pseudo type to mark the end of the enumerator sequence. */ \
366 END_REG_TYPE(MAX)
367
368 #undef BASIC_REG_TYPE
369 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
370 #undef MULTI_REG_TYPE
371 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
372 #undef END_REG_TYPE
373 #define END_REG_TYPE(T) BASIC_REG_TYPE(T)
374
375 /* Register type enumerators. */
376 typedef enum aarch64_reg_type_
377 {
378 /* A list of REG_TYPE_*. */
379 AARCH64_REG_TYPES
380 } aarch64_reg_type;
381
382 #undef BASIC_REG_TYPE
383 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
384 #undef REG_TYPE
385 #define REG_TYPE(T) (1 << REG_TYPE_##T)
386 #undef MULTI_REG_TYPE
387 #define MULTI_REG_TYPE(T,V) V,
388 #undef END_REG_TYPE
389 #define END_REG_TYPE(T) 0
390
391 /* Structure for a hash table entry for a register. */
392 typedef struct
393 {
394 const char *name;
395 unsigned char number;
396 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
397 unsigned char builtin;
398 } reg_entry;
399
400 /* Values indexed by aarch64_reg_type to assist the type checking. */
401 static const unsigned reg_type_masks[] =
402 {
403 AARCH64_REG_TYPES
404 };
405
406 #undef BASIC_REG_TYPE
407 #undef REG_TYPE
408 #undef MULTI_REG_TYPE
409 #undef END_REG_TYPE
410 #undef AARCH64_REG_TYPES
411
412 /* We expected one of the registers in MASK to be specified. If a register
413 of some kind was specified, SEEN is a mask that contains that register,
414 otherwise it is zero.
415
416 If it is possible to provide a relatively pithy message that describes
417 the error exactly, return a string that does so, reporting the error
418 against "operand %d". Return null otherwise.
419
420 From a QoI perspective, any REG_TYPE_* that is passed as the first
421 argument to set_expected_reg_error should generally have its own message.
422 Providing messages for combinations of such REG_TYPE_*s can be useful if
423 it is possible to summarize the combination in a relatively natural way.
424 On the other hand, it seems better to avoid long lists of unrelated
425 things. */
426
427 static const char *
428 get_reg_expected_msg (unsigned int mask, unsigned int seen)
429 {
430 /* First handle messages that use SEEN. */
431 if ((mask & reg_type_masks[REG_TYPE_ZAT])
432 && (seen & reg_type_masks[REG_TYPE_ZATHV]))
433 return N_("expected an unsuffixed ZA tile at operand %d");
434
435 if ((mask & reg_type_masks[REG_TYPE_ZATHV])
436 && (seen & reg_type_masks[REG_TYPE_ZAT]))
437 return N_("missing horizontal or vertical suffix at operand %d");
438
439 if ((mask & reg_type_masks[REG_TYPE_ZA])
440 && (seen & (reg_type_masks[REG_TYPE_ZAT]
441 | reg_type_masks[REG_TYPE_ZATHV])))
442 return N_("expected 'za' rather than a ZA tile at operand %d");
443
444 if ((mask & reg_type_masks[REG_TYPE_PN])
445 && (seen & reg_type_masks[REG_TYPE_P]))
446 return N_("expected a predicate-as-counter rather than predicate-as-mask"
447 " register at operand %d");
448
449 if ((mask & reg_type_masks[REG_TYPE_P])
450 && (seen & reg_type_masks[REG_TYPE_PN]))
451 return N_("expected a predicate-as-mask rather than predicate-as-counter"
452 " register at operand %d");
453
454 /* Integer, zero and stack registers. */
455 if (mask == reg_type_masks[REG_TYPE_R_64])
456 return N_("expected a 64-bit integer register at operand %d");
457 if (mask == reg_type_masks[REG_TYPE_R_ZR])
458 return N_("expected an integer or zero register at operand %d");
459 if (mask == reg_type_masks[REG_TYPE_R_SP])
460 return N_("expected an integer or stack pointer register at operand %d");
461
462 /* Floating-point and SIMD registers. */
463 if (mask == reg_type_masks[REG_TYPE_BHSDQ])
464 return N_("expected a scalar SIMD or floating-point register"
465 " at operand %d");
466 if (mask == reg_type_masks[REG_TYPE_V])
467 return N_("expected an Advanced SIMD vector register at operand %d");
468 if (mask == reg_type_masks[REG_TYPE_Z])
469 return N_("expected an SVE vector register at operand %d");
470 if (mask == reg_type_masks[REG_TYPE_P]
471 || mask == (reg_type_masks[REG_TYPE_P] | reg_type_masks[REG_TYPE_PN]))
472 /* Use this error for "predicate-as-mask only" and "either kind of
473 predicate". We report a more specific error if P is used where
474 PN is expected, and vice versa, so the issue at this point is
475 "predicate-like" vs. "not predicate-like". */
476 return N_("expected an SVE predicate register at operand %d");
477 if (mask == reg_type_masks[REG_TYPE_VZ])
478 return N_("expected a vector register at operand %d");
479 if (mask == reg_type_masks[REG_TYPE_ZP])
480 return N_("expected an SVE vector or predicate register at operand %d");
481 if (mask == reg_type_masks[REG_TYPE_VZP])
482 return N_("expected a vector or predicate register at operand %d");
483
484 /* ZA-related registers. */
485 if (mask == reg_type_masks[REG_TYPE_ZA])
486 return N_("expected a ZA array vector at operand %d");
487 if (mask == reg_type_masks[REG_TYPE_ZA_ZAT])
488 return N_("expected 'za' or a ZA tile at operand %d");
489 if (mask == reg_type_masks[REG_TYPE_ZAT])
490 return N_("expected a ZA tile at operand %d");
491 if (mask == reg_type_masks[REG_TYPE_ZATHV])
492 return N_("expected a ZA tile slice at operand %d");
493
494 /* Integer and vector combos. */
495 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_V]))
496 return N_("expected an integer register or Advanced SIMD vector register"
497 " at operand %d");
498 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_Z]))
499 return N_("expected an integer register or SVE vector register"
500 " at operand %d");
501 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZ]))
502 return N_("expected an integer or vector register at operand %d");
503 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_P]))
504 return N_("expected an integer or predicate register at operand %d");
505 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZP]))
506 return N_("expected an integer, vector or predicate register"
507 " at operand %d");
508
509 /* SVE and SME combos. */
510 if (mask == (reg_type_masks[REG_TYPE_Z] | reg_type_masks[REG_TYPE_ZATHV]))
511 return N_("expected an SVE vector register or ZA tile slice"
512 " at operand %d");
513
514 return NULL;
515 }
516
517 /* Record that we expected a register of type TYPE but didn't see one.
518 REG is the register that we actually saw, or null if we didn't see a
519 recognized register. FLAGS is SEF_IN_REGLIST if we are parsing the
520 contents of a register list, otherwise it is zero. */
521
522 static inline void
523 set_expected_reg_error (aarch64_reg_type type, const reg_entry *reg,
524 unsigned int flags)
525 {
526 assert (flags == 0 || flags == SEF_IN_REGLIST);
527 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
528 if (flags & SEF_IN_REGLIST)
529 inst.parsing_error.data[1].i = reg_type_masks[type] | flags;
530 else
531 inst.parsing_error.data[0].i = reg_type_masks[type];
532 if (reg)
533 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
534 }
535
536 /* Record that we expected a register list containing registers of type TYPE,
537 but didn't see the opening '{'. If we saw a register instead, REG is the
538 register that we saw, otherwise it is null. */
539
540 static inline void
541 set_expected_reglist_error (aarch64_reg_type type, const reg_entry *reg)
542 {
543 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
544 inst.parsing_error.data[1].i = reg_type_masks[type];
545 if (reg)
546 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
547 }
548
549 /* Some well known registers that we refer to directly elsewhere. */
550 #define REG_SP 31
551 #define REG_ZR 31
552
553 /* Instructions take 4 bytes in the object file. */
554 #define INSN_SIZE 4
555
556 static htab_t aarch64_ops_hsh;
557 static htab_t aarch64_cond_hsh;
558 static htab_t aarch64_shift_hsh;
559 static htab_t aarch64_sys_regs_hsh;
560 static htab_t aarch64_pstatefield_hsh;
561 static htab_t aarch64_sys_regs_ic_hsh;
562 static htab_t aarch64_sys_regs_dc_hsh;
563 static htab_t aarch64_sys_regs_at_hsh;
564 static htab_t aarch64_sys_regs_tlbi_hsh;
565 static htab_t aarch64_sys_regs_sr_hsh;
566 static htab_t aarch64_reg_hsh;
567 static htab_t aarch64_barrier_opt_hsh;
568 static htab_t aarch64_nzcv_hsh;
569 static htab_t aarch64_pldop_hsh;
570 static htab_t aarch64_hint_opt_hsh;
571
572 /* Stuff needed to resolve the label ambiguity
573 As:
574 ...
575 label: <insn>
576 may differ from:
577 ...
578 label:
579 <insn> */
580
581 static symbolS *last_label_seen;
582
583 /* Literal pool structure. Held on a per-section
584 and per-sub-section basis. */
585
586 #define MAX_LITERAL_POOL_SIZE 1024
587 typedef struct literal_expression
588 {
589 expressionS exp;
590 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
591 LITTLENUM_TYPE * bignum;
592 } literal_expression;
593
594 typedef struct literal_pool
595 {
596 literal_expression literals[MAX_LITERAL_POOL_SIZE];
597 unsigned int next_free_entry;
598 unsigned int id;
599 symbolS *symbol;
600 segT section;
601 subsegT sub_section;
602 int size;
603 struct literal_pool *next;
604 } literal_pool;
605
606 /* Pointer to a linked list of literal pools. */
607 static literal_pool *list_of_pools = NULL;
608 \f
609 /* Pure syntax. */
610
611 /* This array holds the chars that always start a comment. If the
612 pre-processor is disabled, these aren't very useful. */
613 const char comment_chars[] = "";
614
615 /* This array holds the chars that only start a comment at the beginning of
616 a line. If the line seems to have the form '# 123 filename'
617 .line and .file directives will appear in the pre-processed output. */
618 /* Note that input_file.c hand checks for '#' at the beginning of the
619 first line of the input file. This is because the compiler outputs
620 #NO_APP at the beginning of its output. */
621 /* Also note that comments like this one will always work. */
622 const char line_comment_chars[] = "#";
623
624 const char line_separator_chars[] = ";";
625
626 /* Chars that can be used to separate mant
627 from exp in floating point numbers. */
628 const char EXP_CHARS[] = "eE";
629
630 /* Chars that mean this number is a floating point constant. */
631 /* As in 0f12.456 */
632 /* or 0d1.2345e12 */
633
634 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
635
636 /* Prefix character that indicates the start of an immediate value. */
637 #define is_immediate_prefix(C) ((C) == '#')
638
639 /* Separator character handling. */
640
641 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
642
643 static inline bool
644 skip_past_char (char **str, char c)
645 {
646 if (**str == c)
647 {
648 (*str)++;
649 return true;
650 }
651 else
652 return false;
653 }
654
655 #define skip_past_comma(str) skip_past_char (str, ',')
656
657 /* Arithmetic expressions (possibly involving symbols). */
658
659 static bool in_aarch64_get_expression = false;
660
661 /* Third argument to aarch64_get_expression. */
662 #define GE_NO_PREFIX false
663 #define GE_OPT_PREFIX true
664
665 /* Fourth argument to aarch64_get_expression. */
666 #define ALLOW_ABSENT false
667 #define REJECT_ABSENT true
668
669 /* Return TRUE if the string pointed by *STR is successfully parsed
670 as an valid expression; *EP will be filled with the information of
671 such an expression. Otherwise return FALSE.
672
673 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
674 If REJECT_ABSENT is true then trat missing expressions as an error. */
675
676 static bool
677 aarch64_get_expression (expressionS * ep,
678 char ** str,
679 bool allow_immediate_prefix,
680 bool reject_absent)
681 {
682 char *save_in;
683 segT seg;
684 bool prefix_present = false;
685
686 if (allow_immediate_prefix)
687 {
688 if (is_immediate_prefix (**str))
689 {
690 (*str)++;
691 prefix_present = true;
692 }
693 }
694
695 memset (ep, 0, sizeof (expressionS));
696
697 save_in = input_line_pointer;
698 input_line_pointer = *str;
699 in_aarch64_get_expression = true;
700 seg = expression (ep);
701 in_aarch64_get_expression = false;
702
703 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
704 {
705 /* We found a bad expression in md_operand(). */
706 *str = input_line_pointer;
707 input_line_pointer = save_in;
708 if (prefix_present && ! error_p ())
709 set_fatal_syntax_error (_("bad expression"));
710 else
711 set_first_syntax_error (_("bad expression"));
712 return false;
713 }
714
715 #ifdef OBJ_AOUT
716 if (seg != absolute_section
717 && seg != text_section
718 && seg != data_section
719 && seg != bss_section
720 && seg != undefined_section)
721 {
722 set_syntax_error (_("bad segment"));
723 *str = input_line_pointer;
724 input_line_pointer = save_in;
725 return false;
726 }
727 #else
728 (void) seg;
729 #endif
730
731 *str = input_line_pointer;
732 input_line_pointer = save_in;
733 return true;
734 }
735
736 /* Turn a string in input_line_pointer into a floating point constant
737 of type TYPE, and store the appropriate bytes in *LITP. The number
738 of LITTLENUMS emitted is stored in *SIZEP. An error message is
739 returned, or NULL on OK. */
740
741 const char *
742 md_atof (int type, char *litP, int *sizeP)
743 {
744 return ieee_md_atof (type, litP, sizeP, target_big_endian);
745 }
746
747 /* We handle all bad expressions here, so that we can report the faulty
748 instruction in the error message. */
749 void
750 md_operand (expressionS * exp)
751 {
752 if (in_aarch64_get_expression)
753 exp->X_op = O_illegal;
754 }
755
756 /* Immediate values. */
757
758 /* Errors may be set multiple times during parsing or bit encoding
759 (particularly in the Neon bits), but usually the earliest error which is set
760 will be the most meaningful. Avoid overwriting it with later (cascading)
761 errors by calling this function. */
762
763 static void
764 first_error (const char *error)
765 {
766 if (! error_p ())
767 set_syntax_error (error);
768 }
769
770 /* Similar to first_error, but this function accepts formatted error
771 message. */
772 static void
773 first_error_fmt (const char *format, ...)
774 {
775 va_list args;
776 enum
777 { size = 100 };
778 /* N.B. this single buffer will not cause error messages for different
779 instructions to pollute each other; this is because at the end of
780 processing of each assembly line, error message if any will be
781 collected by as_bad. */
782 static char buffer[size];
783
784 if (! error_p ())
785 {
786 int ret ATTRIBUTE_UNUSED;
787 va_start (args, format);
788 ret = vsnprintf (buffer, size, format, args);
789 know (ret <= size - 1 && ret >= 0);
790 va_end (args);
791 set_syntax_error (buffer);
792 }
793 }
794
795 /* Internal helper routine converting a vector_type_el structure *VECTYPE
796 to a corresponding operand qualifier. */
797
798 static inline aarch64_opnd_qualifier_t
799 vectype_to_qualifier (const struct vector_type_el *vectype)
800 {
801 /* Element size in bytes indexed by vector_el_type. */
802 const unsigned char ele_size[5]
803 = {1, 2, 4, 8, 16};
804 const unsigned int ele_base [5] =
805 {
806 AARCH64_OPND_QLF_V_4B,
807 AARCH64_OPND_QLF_V_2H,
808 AARCH64_OPND_QLF_V_2S,
809 AARCH64_OPND_QLF_V_1D,
810 AARCH64_OPND_QLF_V_1Q
811 };
812
813 if (!vectype->defined || vectype->type == NT_invtype)
814 goto vectype_conversion_fail;
815
816 if (vectype->type == NT_zero)
817 return AARCH64_OPND_QLF_P_Z;
818 if (vectype->type == NT_merge)
819 return AARCH64_OPND_QLF_P_M;
820
821 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
822
823 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
824 {
825 /* Special case S_4B. */
826 if (vectype->type == NT_b && vectype->width == 4)
827 return AARCH64_OPND_QLF_S_4B;
828
829 /* Special case S_2H. */
830 if (vectype->type == NT_h && vectype->width == 2)
831 return AARCH64_OPND_QLF_S_2H;
832
833 /* Vector element register. */
834 return AARCH64_OPND_QLF_S_B + vectype->type;
835 }
836 else
837 {
838 /* Vector register. */
839 int reg_size = ele_size[vectype->type] * vectype->width;
840 unsigned offset;
841 unsigned shift;
842 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
843 goto vectype_conversion_fail;
844
845 /* The conversion is by calculating the offset from the base operand
846 qualifier for the vector type. The operand qualifiers are regular
847 enough that the offset can established by shifting the vector width by
848 a vector-type dependent amount. */
849 shift = 0;
850 if (vectype->type == NT_b)
851 shift = 3;
852 else if (vectype->type == NT_h || vectype->type == NT_s)
853 shift = 2;
854 else if (vectype->type >= NT_d)
855 shift = 1;
856 else
857 gas_assert (0);
858
859 offset = ele_base [vectype->type] + (vectype->width >> shift);
860 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
861 && offset <= AARCH64_OPND_QLF_V_1Q);
862 return offset;
863 }
864
865 vectype_conversion_fail:
866 first_error (_("bad vector arrangement type"));
867 return AARCH64_OPND_QLF_NIL;
868 }
869
870 /* Register parsing. */
871
872 /* Generic register parser which is called by other specialized
873 register parsers.
874 CCP points to what should be the beginning of a register name.
875 If it is indeed a valid register name, advance CCP over it and
876 return the reg_entry structure; otherwise return NULL.
877 It does not issue diagnostics. */
878
879 static reg_entry *
880 parse_reg (char **ccp)
881 {
882 char *start = *ccp;
883 char *p;
884 reg_entry *reg;
885
886 #ifdef REGISTER_PREFIX
887 if (*start != REGISTER_PREFIX)
888 return NULL;
889 start++;
890 #endif
891
892 p = start;
893 if (!ISALPHA (*p) || !is_name_beginner (*p))
894 return NULL;
895
896 do
897 p++;
898 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
899
900 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
901
902 if (!reg)
903 return NULL;
904
905 *ccp = p;
906 return reg;
907 }
908
909 /* Return the operand qualifier associated with all uses of REG, or
910 AARCH64_OPND_QLF_NIL if none. AARCH64_OPND_QLF_NIL means either
911 that qualifiers don't apply to REG or that qualifiers are added
912 using suffixes. */
913
914 static aarch64_opnd_qualifier_t
915 inherent_reg_qualifier (const reg_entry *reg)
916 {
917 switch (reg->type)
918 {
919 case REG_TYPE_R_32:
920 case REG_TYPE_SP_32:
921 case REG_TYPE_ZR_32:
922 return AARCH64_OPND_QLF_W;
923
924 case REG_TYPE_R_64:
925 case REG_TYPE_SP_64:
926 case REG_TYPE_ZR_64:
927 return AARCH64_OPND_QLF_X;
928
929 case REG_TYPE_FP_B:
930 case REG_TYPE_FP_H:
931 case REG_TYPE_FP_S:
932 case REG_TYPE_FP_D:
933 case REG_TYPE_FP_Q:
934 return AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
935
936 default:
937 return AARCH64_OPND_QLF_NIL;
938 }
939 }
940
941 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
942 return FALSE. */
943 static bool
944 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
945 {
946 return (reg_type_masks[type] & (1 << reg->type)) != 0;
947 }
948
949 /* Try to parse a base or offset register. Allow SVE base and offset
950 registers if REG_TYPE includes SVE registers. Return the register
951 entry on success, setting *QUALIFIER to the register qualifier.
952 Return null otherwise.
953
954 Note that this function does not issue any diagnostics. */
955
956 static const reg_entry *
957 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
958 aarch64_opnd_qualifier_t *qualifier)
959 {
960 char *str = *ccp;
961 const reg_entry *reg = parse_reg (&str);
962
963 if (reg == NULL)
964 return NULL;
965
966 switch (reg->type)
967 {
968 case REG_TYPE_Z:
969 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_Z)) == 0
970 || str[0] != '.')
971 return NULL;
972 switch (TOLOWER (str[1]))
973 {
974 case 's':
975 *qualifier = AARCH64_OPND_QLF_S_S;
976 break;
977 case 'd':
978 *qualifier = AARCH64_OPND_QLF_S_D;
979 break;
980 default:
981 return NULL;
982 }
983 str += 2;
984 break;
985
986 default:
987 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR_SP))
988 return NULL;
989 *qualifier = inherent_reg_qualifier (reg);
990 break;
991 }
992
993 *ccp = str;
994
995 return reg;
996 }
997
998 /* Try to parse a base or offset register. Return the register entry
999 on success, setting *QUALIFIER to the register qualifier. Return null
1000 otherwise.
1001
1002 Note that this function does not issue any diagnostics. */
1003
1004 static const reg_entry *
1005 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
1006 {
1007 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_ZR_SP, qualifier);
1008 }
1009
1010 /* Parse the qualifier of a vector register or vector element of type
1011 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
1012 succeeds; otherwise return FALSE.
1013
1014 Accept only one occurrence of:
1015 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
1016 b h s d q */
1017 static bool
1018 parse_vector_type_for_operand (aarch64_reg_type reg_type,
1019 struct vector_type_el *parsed_type, char **str)
1020 {
1021 char *ptr = *str;
1022 unsigned width;
1023 unsigned element_size;
1024 enum vector_el_type type;
1025
1026 /* skip '.' */
1027 gas_assert (*ptr == '.');
1028 ptr++;
1029
1030 if (reg_type != REG_TYPE_V || !ISDIGIT (*ptr))
1031 {
1032 width = 0;
1033 goto elt_size;
1034 }
1035 width = strtoul (ptr, &ptr, 10);
1036 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
1037 {
1038 first_error_fmt (_("bad size %d in vector width specifier"), width);
1039 return false;
1040 }
1041
1042 elt_size:
1043 switch (TOLOWER (*ptr))
1044 {
1045 case 'b':
1046 type = NT_b;
1047 element_size = 8;
1048 break;
1049 case 'h':
1050 type = NT_h;
1051 element_size = 16;
1052 break;
1053 case 's':
1054 type = NT_s;
1055 element_size = 32;
1056 break;
1057 case 'd':
1058 type = NT_d;
1059 element_size = 64;
1060 break;
1061 case 'q':
1062 if (reg_type != REG_TYPE_V || width == 1)
1063 {
1064 type = NT_q;
1065 element_size = 128;
1066 break;
1067 }
1068 /* fall through. */
1069 default:
1070 if (*ptr != '\0')
1071 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
1072 else
1073 first_error (_("missing element size"));
1074 return false;
1075 }
1076 if (width != 0 && width * element_size != 64
1077 && width * element_size != 128
1078 && !(width == 2 && element_size == 16)
1079 && !(width == 4 && element_size == 8))
1080 {
1081 first_error_fmt (_
1082 ("invalid element size %d and vector size combination %c"),
1083 width, *ptr);
1084 return false;
1085 }
1086 ptr++;
1087
1088 parsed_type->type = type;
1089 parsed_type->width = width;
1090 parsed_type->element_size = element_size;
1091
1092 *str = ptr;
1093
1094 return true;
1095 }
1096
1097 /* *STR contains an SVE zero/merge predication suffix. Parse it into
1098 *PARSED_TYPE and point *STR at the end of the suffix. */
1099
1100 static bool
1101 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
1102 {
1103 char *ptr = *str;
1104
1105 /* Skip '/'. */
1106 gas_assert (*ptr == '/');
1107 ptr++;
1108 switch (TOLOWER (*ptr))
1109 {
1110 case 'z':
1111 parsed_type->type = NT_zero;
1112 break;
1113 case 'm':
1114 parsed_type->type = NT_merge;
1115 break;
1116 default:
1117 if (*ptr != '\0' && *ptr != ',')
1118 first_error_fmt (_("unexpected character `%c' in predication type"),
1119 *ptr);
1120 else
1121 first_error (_("missing predication type"));
1122 return false;
1123 }
1124 parsed_type->width = 0;
1125 *str = ptr + 1;
1126 return true;
1127 }
1128
1129 /* Return true if CH is a valid suffix character for registers of
1130 type TYPE. */
1131
1132 static bool
1133 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1134 {
1135 switch (type)
1136 {
1137 case REG_TYPE_V:
1138 case REG_TYPE_Z:
1139 case REG_TYPE_ZA:
1140 case REG_TYPE_ZAT:
1141 case REG_TYPE_ZATH:
1142 case REG_TYPE_ZATV:
1143 return ch == '.';
1144
1145 case REG_TYPE_P:
1146 case REG_TYPE_PN:
1147 return ch == '.' || ch == '/';
1148
1149 default:
1150 return false;
1151 }
1152 }
1153
1154 /* Parse an index expression at *STR, storing it in *IMM on success. */
1155
1156 static bool
1157 parse_index_expression (char **str, int64_t *imm)
1158 {
1159 expressionS exp;
1160
1161 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1162 if (exp.X_op != O_constant)
1163 {
1164 first_error (_("constant expression required"));
1165 return false;
1166 }
1167 *imm = exp.X_add_number;
1168 return true;
1169 }
1170
1171 /* Parse a register of the type TYPE.
1172
1173 Return null if the string pointed to by *CCP is not a valid register
1174 name or the parsed register is not of TYPE.
1175
1176 Otherwise return the register, and optionally return the register
1177 shape and element index information in *TYPEINFO.
1178
1179 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1180
1181 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1182 register index.
1183
1184 FLAGS includes PTR_GOOD_MATCH if we are sufficiently far into parsing
1185 an operand that we can be confident that it is a good match. */
1186
1187 #define PTR_IN_REGLIST (1U << 0)
1188 #define PTR_FULL_REG (1U << 1)
1189 #define PTR_GOOD_MATCH (1U << 2)
1190
1191 static const reg_entry *
1192 parse_typed_reg (char **ccp, aarch64_reg_type type,
1193 struct vector_type_el *typeinfo, unsigned int flags)
1194 {
1195 char *str = *ccp;
1196 bool isalpha = ISALPHA (*str);
1197 const reg_entry *reg = parse_reg (&str);
1198 struct vector_type_el atype;
1199 struct vector_type_el parsetype;
1200 bool is_typed_vecreg = false;
1201 unsigned int err_flags = (flags & PTR_IN_REGLIST) ? SEF_IN_REGLIST : 0;
1202
1203 atype.defined = 0;
1204 atype.type = NT_invtype;
1205 atype.width = -1;
1206 atype.element_size = 0;
1207 atype.index = 0;
1208
1209 if (reg == NULL)
1210 {
1211 if (typeinfo)
1212 *typeinfo = atype;
1213 if (!isalpha && (flags & PTR_IN_REGLIST))
1214 set_fatal_syntax_error (_("syntax error in register list"));
1215 else if (flags & PTR_GOOD_MATCH)
1216 set_fatal_syntax_error (NULL);
1217 else
1218 set_expected_reg_error (type, reg, err_flags);
1219 return NULL;
1220 }
1221
1222 if (! aarch64_check_reg_type (reg, type))
1223 {
1224 DEBUG_TRACE ("reg type check failed");
1225 if (flags & PTR_GOOD_MATCH)
1226 set_fatal_syntax_error (NULL);
1227 else
1228 set_expected_reg_error (type, reg, err_flags);
1229 return NULL;
1230 }
1231 type = reg->type;
1232
1233 if (aarch64_valid_suffix_char_p (reg->type, *str))
1234 {
1235 if (*str == '.')
1236 {
1237 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1238 return NULL;
1239 if ((reg->type == REG_TYPE_ZAT
1240 || reg->type == REG_TYPE_ZATH
1241 || reg->type == REG_TYPE_ZATV)
1242 && reg->number * 8 >= parsetype.element_size)
1243 {
1244 set_syntax_error (_("ZA tile number out of range"));
1245 return NULL;
1246 }
1247 }
1248 else
1249 {
1250 if (!parse_predication_for_operand (&parsetype, &str))
1251 return NULL;
1252 }
1253
1254 /* Register if of the form Vn.[bhsdq]. */
1255 is_typed_vecreg = true;
1256
1257 if (type != REG_TYPE_V)
1258 {
1259 /* The width is always variable; we don't allow an integer width
1260 to be specified. */
1261 gas_assert (parsetype.width == 0);
1262 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1263 }
1264 else if (parsetype.width == 0)
1265 /* Expect index. In the new scheme we cannot have
1266 Vn.[bhsdq] represent a scalar. Therefore any
1267 Vn.[bhsdq] should have an index following it.
1268 Except in reglists of course. */
1269 atype.defined |= NTA_HASINDEX;
1270 else
1271 atype.defined |= NTA_HASTYPE;
1272
1273 atype.type = parsetype.type;
1274 atype.width = parsetype.width;
1275 }
1276
1277 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1278 {
1279 /* Reject Sn[index] syntax. */
1280 if (!is_typed_vecreg)
1281 {
1282 first_error (_("this type of register can't be indexed"));
1283 return NULL;
1284 }
1285
1286 if (flags & PTR_IN_REGLIST)
1287 {
1288 first_error (_("index not allowed inside register list"));
1289 return NULL;
1290 }
1291
1292 atype.defined |= NTA_HASINDEX;
1293
1294 if (!parse_index_expression (&str, &atype.index))
1295 return NULL;
1296
1297 if (! skip_past_char (&str, ']'))
1298 return NULL;
1299 }
1300 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1301 {
1302 /* Indexed vector register expected. */
1303 first_error (_("indexed vector register expected"));
1304 return NULL;
1305 }
1306
1307 /* A vector reg Vn should be typed or indexed. */
1308 if (type == REG_TYPE_V && atype.defined == 0)
1309 {
1310 first_error (_("invalid use of vector register"));
1311 }
1312
1313 if (typeinfo)
1314 *typeinfo = atype;
1315
1316 *ccp = str;
1317
1318 return reg;
1319 }
1320
1321 /* Parse register.
1322
1323 Return the register on success; return null otherwise.
1324
1325 If this is a NEON vector register with additional type information, fill
1326 in the struct pointed to by VECTYPE (if non-NULL).
1327
1328 This parser does not handle register lists. */
1329
1330 static const reg_entry *
1331 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1332 struct vector_type_el *vectype)
1333 {
1334 return parse_typed_reg (ccp, type, vectype, 0);
1335 }
1336
1337 static inline bool
1338 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1339 {
1340 return (e1.type == e2.type
1341 && e1.defined == e2.defined
1342 && e1.width == e2.width
1343 && e1.element_size == e2.element_size
1344 && e1.index == e2.index);
1345 }
1346
1347 /* This function parses a list of vector registers of type TYPE.
1348 On success, it returns the parsed register list information in the
1349 following encoded format:
1350
1351 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1352 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1353
1354 The information of the register shape and/or index is returned in
1355 *VECTYPE.
1356
1357 It returns PARSE_FAIL if the register list is invalid.
1358
1359 The list contains one to four registers.
1360 Each register can be one of:
1361 <Vt>.<T>[<index>]
1362 <Vt>.<T>
1363 All <T> should be identical.
1364 All <index> should be identical.
1365 There are restrictions on <Vt> numbers which are checked later
1366 (by reg_list_valid_p). */
1367
1368 static int
1369 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1370 struct vector_type_el *vectype)
1371 {
1372 char *str = *ccp;
1373 int nb_regs;
1374 struct vector_type_el typeinfo, typeinfo_first;
1375 int val, val_range;
1376 int in_range;
1377 int ret_val;
1378 bool error = false;
1379 bool expect_index = false;
1380 unsigned int ptr_flags = PTR_IN_REGLIST;
1381
1382 if (*str != '{')
1383 {
1384 set_expected_reglist_error (type, parse_reg (&str));
1385 return PARSE_FAIL;
1386 }
1387 str++;
1388
1389 nb_regs = 0;
1390 typeinfo_first.defined = 0;
1391 typeinfo_first.type = NT_invtype;
1392 typeinfo_first.width = -1;
1393 typeinfo_first.element_size = 0;
1394 typeinfo_first.index = 0;
1395 ret_val = 0;
1396 val = -1;
1397 val_range = -1;
1398 in_range = 0;
1399 do
1400 {
1401 if (in_range)
1402 {
1403 str++; /* skip over '-' */
1404 val_range = val;
1405 }
1406 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1407 ptr_flags);
1408 if (!reg)
1409 {
1410 set_first_syntax_error (_("invalid vector register in list"));
1411 error = true;
1412 continue;
1413 }
1414 val = reg->number;
1415 /* reject [bhsd]n */
1416 if (type == REG_TYPE_V && typeinfo.defined == 0)
1417 {
1418 set_first_syntax_error (_("invalid scalar register in list"));
1419 error = true;
1420 continue;
1421 }
1422
1423 if (typeinfo.defined & NTA_HASINDEX)
1424 expect_index = true;
1425
1426 if (in_range)
1427 {
1428 if (val == val_range)
1429 {
1430 set_first_syntax_error
1431 (_("invalid range in vector register list"));
1432 error = true;
1433 }
1434 val_range = (val_range + 1) & 0x1f;
1435 }
1436 else
1437 {
1438 val_range = val;
1439 if (nb_regs == 0)
1440 typeinfo_first = typeinfo;
1441 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1442 {
1443 set_first_syntax_error
1444 (_("type mismatch in vector register list"));
1445 error = true;
1446 }
1447 }
1448 if (! error)
1449 for (;;)
1450 {
1451 ret_val |= val_range << (5 * nb_regs);
1452 nb_regs++;
1453 if (val_range == val)
1454 break;
1455 val_range = (val_range + 1) & 0x1f;
1456 }
1457 in_range = 0;
1458 ptr_flags |= PTR_GOOD_MATCH;
1459 }
1460 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1461
1462 skip_whitespace (str);
1463 if (*str != '}')
1464 {
1465 set_first_syntax_error (_("end of vector register list not found"));
1466 error = true;
1467 }
1468 str++;
1469
1470 skip_whitespace (str);
1471
1472 if (expect_index)
1473 {
1474 if (skip_past_char (&str, '['))
1475 {
1476 if (!parse_index_expression (&str, &typeinfo_first.index))
1477 error = true;
1478 if (! skip_past_char (&str, ']'))
1479 error = true;
1480 }
1481 else
1482 {
1483 set_first_syntax_error (_("expected index"));
1484 error = true;
1485 }
1486 }
1487
1488 if (nb_regs > 4)
1489 {
1490 set_first_syntax_error (_("too many registers in vector register list"));
1491 error = true;
1492 }
1493 else if (nb_regs == 0)
1494 {
1495 set_first_syntax_error (_("empty vector register list"));
1496 error = true;
1497 }
1498
1499 *ccp = str;
1500 if (! error)
1501 *vectype = typeinfo_first;
1502
1503 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1504 }
1505
1506 /* Directives: register aliases. */
1507
1508 static reg_entry *
1509 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1510 {
1511 reg_entry *new;
1512 const char *name;
1513
1514 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1515 {
1516 if (new->builtin)
1517 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1518 str);
1519
1520 /* Only warn about a redefinition if it's not defined as the
1521 same register. */
1522 else if (new->number != number || new->type != type)
1523 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1524
1525 return NULL;
1526 }
1527
1528 name = xstrdup (str);
1529 new = XNEW (reg_entry);
1530
1531 new->name = name;
1532 new->number = number;
1533 new->type = type;
1534 new->builtin = false;
1535
1536 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1537
1538 return new;
1539 }
1540
1541 /* Look for the .req directive. This is of the form:
1542
1543 new_register_name .req existing_register_name
1544
1545 If we find one, or if it looks sufficiently like one that we want to
1546 handle any error here, return TRUE. Otherwise return FALSE. */
1547
1548 static bool
1549 create_register_alias (char *newname, char *p)
1550 {
1551 const reg_entry *old;
1552 char *oldname, *nbuf;
1553 size_t nlen;
1554
1555 /* The input scrubber ensures that whitespace after the mnemonic is
1556 collapsed to single spaces. */
1557 oldname = p;
1558 if (!startswith (oldname, " .req "))
1559 return false;
1560
1561 oldname += 6;
1562 if (*oldname == '\0')
1563 return false;
1564
1565 old = str_hash_find (aarch64_reg_hsh, oldname);
1566 if (!old)
1567 {
1568 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1569 return true;
1570 }
1571
1572 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1573 the desired alias name, and p points to its end. If not, then
1574 the desired alias name is in the global original_case_string. */
1575 #ifdef TC_CASE_SENSITIVE
1576 nlen = p - newname;
1577 #else
1578 newname = original_case_string;
1579 nlen = strlen (newname);
1580 #endif
1581
1582 nbuf = xmemdup0 (newname, nlen);
1583
1584 /* Create aliases under the new name as stated; an all-lowercase
1585 version of the new name; and an all-uppercase version of the new
1586 name. */
1587 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1588 {
1589 for (p = nbuf; *p; p++)
1590 *p = TOUPPER (*p);
1591
1592 if (strncmp (nbuf, newname, nlen))
1593 {
1594 /* If this attempt to create an additional alias fails, do not bother
1595 trying to create the all-lower case alias. We will fail and issue
1596 a second, duplicate error message. This situation arises when the
1597 programmer does something like:
1598 foo .req r0
1599 Foo .req r1
1600 The second .req creates the "Foo" alias but then fails to create
1601 the artificial FOO alias because it has already been created by the
1602 first .req. */
1603 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1604 {
1605 free (nbuf);
1606 return true;
1607 }
1608 }
1609
1610 for (p = nbuf; *p; p++)
1611 *p = TOLOWER (*p);
1612
1613 if (strncmp (nbuf, newname, nlen))
1614 insert_reg_alias (nbuf, old->number, old->type);
1615 }
1616
1617 free (nbuf);
1618 return true;
1619 }
1620
1621 /* Should never be called, as .req goes between the alias and the
1622 register name, not at the beginning of the line. */
1623 static void
1624 s_req (int a ATTRIBUTE_UNUSED)
1625 {
1626 as_bad (_("invalid syntax for .req directive"));
1627 }
1628
1629 /* The .unreq directive deletes an alias which was previously defined
1630 by .req. For example:
1631
1632 my_alias .req r11
1633 .unreq my_alias */
1634
1635 static void
1636 s_unreq (int a ATTRIBUTE_UNUSED)
1637 {
1638 char *name;
1639 char saved_char;
1640
1641 name = input_line_pointer;
1642 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1643 saved_char = *input_line_pointer;
1644 *input_line_pointer = 0;
1645
1646 if (!*name)
1647 as_bad (_("invalid syntax for .unreq directive"));
1648 else
1649 {
1650 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1651
1652 if (!reg)
1653 as_bad (_("unknown register alias '%s'"), name);
1654 else if (reg->builtin)
1655 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1656 name);
1657 else
1658 {
1659 char *p;
1660 char *nbuf;
1661
1662 str_hash_delete (aarch64_reg_hsh, name);
1663 free ((char *) reg->name);
1664 free (reg);
1665
1666 /* Also locate the all upper case and all lower case versions.
1667 Do not complain if we cannot find one or the other as it
1668 was probably deleted above. */
1669
1670 nbuf = strdup (name);
1671 for (p = nbuf; *p; p++)
1672 *p = TOUPPER (*p);
1673 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1674 if (reg)
1675 {
1676 str_hash_delete (aarch64_reg_hsh, nbuf);
1677 free ((char *) reg->name);
1678 free (reg);
1679 }
1680
1681 for (p = nbuf; *p; p++)
1682 *p = TOLOWER (*p);
1683 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1684 if (reg)
1685 {
1686 str_hash_delete (aarch64_reg_hsh, nbuf);
1687 free ((char *) reg->name);
1688 free (reg);
1689 }
1690
1691 free (nbuf);
1692 }
1693 }
1694
1695 *input_line_pointer = saved_char;
1696 demand_empty_rest_of_line ();
1697 }
1698
1699 /* Directives: Instruction set selection. */
1700
1701 #if defined OBJ_ELF || defined OBJ_COFF
1702 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1703 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1704 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1705 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1706
1707 /* Create a new mapping symbol for the transition to STATE. */
1708
1709 static void
1710 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1711 {
1712 symbolS *symbolP;
1713 const char *symname;
1714 int type;
1715
1716 switch (state)
1717 {
1718 case MAP_DATA:
1719 symname = "$d";
1720 type = BSF_NO_FLAGS;
1721 break;
1722 case MAP_INSN:
1723 symname = "$x";
1724 type = BSF_NO_FLAGS;
1725 break;
1726 default:
1727 abort ();
1728 }
1729
1730 symbolP = symbol_new (symname, now_seg, frag, value);
1731 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1732
1733 /* Save the mapping symbols for future reference. Also check that
1734 we do not place two mapping symbols at the same offset within a
1735 frag. We'll handle overlap between frags in
1736 check_mapping_symbols.
1737
1738 If .fill or other data filling directive generates zero sized data,
1739 the mapping symbol for the following code will have the same value
1740 as the one generated for the data filling directive. In this case,
1741 we replace the old symbol with the new one at the same address. */
1742 if (value == 0)
1743 {
1744 if (frag->tc_frag_data.first_map != NULL)
1745 {
1746 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1747 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1748 &symbol_lastP);
1749 }
1750 frag->tc_frag_data.first_map = symbolP;
1751 }
1752 if (frag->tc_frag_data.last_map != NULL)
1753 {
1754 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1755 S_GET_VALUE (symbolP));
1756 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1757 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1758 &symbol_lastP);
1759 }
1760 frag->tc_frag_data.last_map = symbolP;
1761 }
1762
1763 /* We must sometimes convert a region marked as code to data during
1764 code alignment, if an odd number of bytes have to be padded. The
1765 code mapping symbol is pushed to an aligned address. */
1766
1767 static void
1768 insert_data_mapping_symbol (enum mstate state,
1769 valueT value, fragS * frag, offsetT bytes)
1770 {
1771 /* If there was already a mapping symbol, remove it. */
1772 if (frag->tc_frag_data.last_map != NULL
1773 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1774 frag->fr_address + value)
1775 {
1776 symbolS *symp = frag->tc_frag_data.last_map;
1777
1778 if (value == 0)
1779 {
1780 know (frag->tc_frag_data.first_map == symp);
1781 frag->tc_frag_data.first_map = NULL;
1782 }
1783 frag->tc_frag_data.last_map = NULL;
1784 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1785 }
1786
1787 make_mapping_symbol (MAP_DATA, value, frag);
1788 make_mapping_symbol (state, value + bytes, frag);
1789 }
1790
1791 static void mapping_state_2 (enum mstate state, int max_chars);
1792
1793 /* Set the mapping state to STATE. Only call this when about to
1794 emit some STATE bytes to the file. */
1795
1796 void
1797 mapping_state (enum mstate state)
1798 {
1799 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1800
1801 if (state == MAP_INSN)
1802 /* AArch64 instructions require 4-byte alignment. When emitting
1803 instructions into any section, record the appropriate section
1804 alignment. */
1805 record_alignment (now_seg, 2);
1806
1807 if (mapstate == state)
1808 /* The mapping symbol has already been emitted.
1809 There is nothing else to do. */
1810 return;
1811
1812 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1813 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1814 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1815 evaluated later in the next else. */
1816 return;
1817 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1818 {
1819 /* Only add the symbol if the offset is > 0:
1820 if we're at the first frag, check it's size > 0;
1821 if we're not at the first frag, then for sure
1822 the offset is > 0. */
1823 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1824 const int add_symbol = (frag_now != frag_first)
1825 || (frag_now_fix () > 0);
1826
1827 if (add_symbol)
1828 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1829 }
1830 #undef TRANSITION
1831
1832 mapping_state_2 (state, 0);
1833 }
1834
1835 /* Same as mapping_state, but MAX_CHARS bytes have already been
1836 allocated. Put the mapping symbol that far back. */
1837
1838 static void
1839 mapping_state_2 (enum mstate state, int max_chars)
1840 {
1841 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1842
1843 if (!SEG_NORMAL (now_seg))
1844 return;
1845
1846 if (mapstate == state)
1847 /* The mapping symbol has already been emitted.
1848 There is nothing else to do. */
1849 return;
1850
1851 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1852 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1853 }
1854 #else
1855 #define mapping_state(x) /* nothing */
1856 #define mapping_state_2(x, y) /* nothing */
1857 #endif
1858
1859 /* Directives: sectioning and alignment. */
1860
1861 static void
1862 s_bss (int ignore ATTRIBUTE_UNUSED)
1863 {
1864 /* We don't support putting frags in the BSS segment, we fake it by
1865 marking in_bss, then looking at s_skip for clues. */
1866 subseg_set (bss_section, 0);
1867 demand_empty_rest_of_line ();
1868 mapping_state (MAP_DATA);
1869 }
1870
1871 static void
1872 s_even (int ignore ATTRIBUTE_UNUSED)
1873 {
1874 /* Never make frag if expect extra pass. */
1875 if (!need_pass_2)
1876 frag_align (1, 0, 0);
1877
1878 record_alignment (now_seg, 1);
1879
1880 demand_empty_rest_of_line ();
1881 }
1882
1883 /* Directives: Literal pools. */
1884
1885 static literal_pool *
1886 find_literal_pool (int size)
1887 {
1888 literal_pool *pool;
1889
1890 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1891 {
1892 if (pool->section == now_seg
1893 && pool->sub_section == now_subseg && pool->size == size)
1894 break;
1895 }
1896
1897 return pool;
1898 }
1899
1900 static literal_pool *
1901 find_or_make_literal_pool (int size)
1902 {
1903 /* Next literal pool ID number. */
1904 static unsigned int latest_pool_num = 1;
1905 literal_pool *pool;
1906
1907 pool = find_literal_pool (size);
1908
1909 if (pool == NULL)
1910 {
1911 /* Create a new pool. */
1912 pool = XNEW (literal_pool);
1913 if (!pool)
1914 return NULL;
1915
1916 /* Currently we always put the literal pool in the current text
1917 section. If we were generating "small" model code where we
1918 knew that all code and initialised data was within 1MB then
1919 we could output literals to mergeable, read-only data
1920 sections. */
1921
1922 pool->next_free_entry = 0;
1923 pool->section = now_seg;
1924 pool->sub_section = now_subseg;
1925 pool->size = size;
1926 pool->next = list_of_pools;
1927 pool->symbol = NULL;
1928
1929 /* Add it to the list. */
1930 list_of_pools = pool;
1931 }
1932
1933 /* New pools, and emptied pools, will have a NULL symbol. */
1934 if (pool->symbol == NULL)
1935 {
1936 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1937 &zero_address_frag, 0);
1938 pool->id = latest_pool_num++;
1939 }
1940
1941 /* Done. */
1942 return pool;
1943 }
1944
1945 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1946 Return TRUE on success, otherwise return FALSE. */
1947 static bool
1948 add_to_lit_pool (expressionS *exp, int size)
1949 {
1950 literal_pool *pool;
1951 unsigned int entry;
1952
1953 pool = find_or_make_literal_pool (size);
1954
1955 /* Check if this literal value is already in the pool. */
1956 for (entry = 0; entry < pool->next_free_entry; entry++)
1957 {
1958 expressionS * litexp = & pool->literals[entry].exp;
1959
1960 if ((litexp->X_op == exp->X_op)
1961 && (exp->X_op == O_constant)
1962 && (litexp->X_add_number == exp->X_add_number)
1963 && (litexp->X_unsigned == exp->X_unsigned))
1964 break;
1965
1966 if ((litexp->X_op == exp->X_op)
1967 && (exp->X_op == O_symbol)
1968 && (litexp->X_add_number == exp->X_add_number)
1969 && (litexp->X_add_symbol == exp->X_add_symbol)
1970 && (litexp->X_op_symbol == exp->X_op_symbol))
1971 break;
1972 }
1973
1974 /* Do we need to create a new entry? */
1975 if (entry == pool->next_free_entry)
1976 {
1977 if (entry >= MAX_LITERAL_POOL_SIZE)
1978 {
1979 set_syntax_error (_("literal pool overflow"));
1980 return false;
1981 }
1982
1983 pool->literals[entry].exp = *exp;
1984 pool->next_free_entry += 1;
1985 if (exp->X_op == O_big)
1986 {
1987 /* PR 16688: Bignums are held in a single global array. We must
1988 copy and preserve that value now, before it is overwritten. */
1989 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1990 exp->X_add_number);
1991 memcpy (pool->literals[entry].bignum, generic_bignum,
1992 CHARS_PER_LITTLENUM * exp->X_add_number);
1993 }
1994 else
1995 pool->literals[entry].bignum = NULL;
1996 }
1997
1998 exp->X_op = O_symbol;
1999 exp->X_add_number = ((int) entry) * size;
2000 exp->X_add_symbol = pool->symbol;
2001
2002 return true;
2003 }
2004
2005 /* Can't use symbol_new here, so have to create a symbol and then at
2006 a later date assign it a value. That's what these functions do. */
2007
2008 static void
2009 symbol_locate (symbolS * symbolP,
2010 const char *name,/* It is copied, the caller can modify. */
2011 segT segment, /* Segment identifier (SEG_<something>). */
2012 valueT valu, /* Symbol value. */
2013 fragS * frag) /* Associated fragment. */
2014 {
2015 size_t name_length;
2016 char *preserved_copy_of_name;
2017
2018 name_length = strlen (name) + 1; /* +1 for \0. */
2019 obstack_grow (&notes, name, name_length);
2020 preserved_copy_of_name = obstack_finish (&notes);
2021
2022 #ifdef tc_canonicalize_symbol_name
2023 preserved_copy_of_name =
2024 tc_canonicalize_symbol_name (preserved_copy_of_name);
2025 #endif
2026
2027 S_SET_NAME (symbolP, preserved_copy_of_name);
2028
2029 S_SET_SEGMENT (symbolP, segment);
2030 S_SET_VALUE (symbolP, valu);
2031 symbol_clear_list_pointers (symbolP);
2032
2033 symbol_set_frag (symbolP, frag);
2034
2035 /* Link to end of symbol chain. */
2036 {
2037 extern int symbol_table_frozen;
2038
2039 if (symbol_table_frozen)
2040 abort ();
2041 }
2042
2043 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
2044
2045 obj_symbol_new_hook (symbolP);
2046
2047 #ifdef tc_symbol_new_hook
2048 tc_symbol_new_hook (symbolP);
2049 #endif
2050
2051 #ifdef DEBUG_SYMS
2052 verify_symbol_chain (symbol_rootP, symbol_lastP);
2053 #endif /* DEBUG_SYMS */
2054 }
2055
2056
2057 static void
2058 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2059 {
2060 unsigned int entry;
2061 literal_pool *pool;
2062 char sym_name[20];
2063 int align;
2064
2065 for (align = 2; align <= 4; align++)
2066 {
2067 int size = 1 << align;
2068
2069 pool = find_literal_pool (size);
2070 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
2071 continue;
2072
2073 /* Align pool as you have word accesses.
2074 Only make a frag if we have to. */
2075 if (!need_pass_2)
2076 frag_align (align, 0, 0);
2077
2078 mapping_state (MAP_DATA);
2079
2080 record_alignment (now_seg, align);
2081
2082 sprintf (sym_name, "$$lit_\002%x", pool->id);
2083
2084 symbol_locate (pool->symbol, sym_name, now_seg,
2085 (valueT) frag_now_fix (), frag_now);
2086 symbol_table_insert (pool->symbol);
2087
2088 for (entry = 0; entry < pool->next_free_entry; entry++)
2089 {
2090 expressionS * exp = & pool->literals[entry].exp;
2091
2092 if (exp->X_op == O_big)
2093 {
2094 /* PR 16688: Restore the global bignum value. */
2095 gas_assert (pool->literals[entry].bignum != NULL);
2096 memcpy (generic_bignum, pool->literals[entry].bignum,
2097 CHARS_PER_LITTLENUM * exp->X_add_number);
2098 }
2099
2100 /* First output the expression in the instruction to the pool. */
2101 emit_expr (exp, size); /* .word|.xword */
2102
2103 if (exp->X_op == O_big)
2104 {
2105 free (pool->literals[entry].bignum);
2106 pool->literals[entry].bignum = NULL;
2107 }
2108 }
2109
2110 /* Mark the pool as empty. */
2111 pool->next_free_entry = 0;
2112 pool->symbol = NULL;
2113 }
2114 }
2115
2116 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2117 /* Forward declarations for functions below, in the MD interface
2118 section. */
2119 static struct reloc_table_entry * find_reloc_table_entry (char **);
2120
2121 /* Directives: Data. */
2122 /* N.B. the support for relocation suffix in this directive needs to be
2123 implemented properly. */
2124
2125 static void
2126 s_aarch64_cons (int nbytes)
2127 {
2128 expressionS exp;
2129
2130 #ifdef md_flush_pending_output
2131 md_flush_pending_output ();
2132 #endif
2133
2134 if (is_it_end_of_statement ())
2135 {
2136 demand_empty_rest_of_line ();
2137 return;
2138 }
2139
2140 #ifdef md_cons_align
2141 md_cons_align (nbytes);
2142 #endif
2143
2144 mapping_state (MAP_DATA);
2145 do
2146 {
2147 struct reloc_table_entry *reloc;
2148
2149 expression (&exp);
2150
2151 if (exp.X_op != O_symbol)
2152 emit_expr (&exp, (unsigned int) nbytes);
2153 else
2154 {
2155 skip_past_char (&input_line_pointer, '#');
2156 if (skip_past_char (&input_line_pointer, ':'))
2157 {
2158 reloc = find_reloc_table_entry (&input_line_pointer);
2159 if (reloc == NULL)
2160 as_bad (_("unrecognized relocation suffix"));
2161 else
2162 as_bad (_("unimplemented relocation suffix"));
2163 ignore_rest_of_line ();
2164 return;
2165 }
2166 else
2167 emit_expr (&exp, (unsigned int) nbytes);
2168 }
2169 }
2170 while (*input_line_pointer++ == ',');
2171
2172 /* Put terminator back into stream. */
2173 input_line_pointer--;
2174 demand_empty_rest_of_line ();
2175 }
2176 #endif
2177
2178 #ifdef OBJ_ELF
2179 /* Forward declarations for functions below, in the MD interface
2180 section. */
2181 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2182
2183 /* Mark symbol that it follows a variant PCS convention. */
2184
2185 static void
2186 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2187 {
2188 char *name;
2189 char c;
2190 symbolS *sym;
2191 asymbol *bfdsym;
2192 elf_symbol_type *elfsym;
2193
2194 c = get_symbol_name (&name);
2195 if (!*name)
2196 as_bad (_("Missing symbol name in directive"));
2197 sym = symbol_find_or_make (name);
2198 restore_line_pointer (c);
2199 demand_empty_rest_of_line ();
2200 bfdsym = symbol_get_bfdsym (sym);
2201 elfsym = elf_symbol_from (bfdsym);
2202 gas_assert (elfsym);
2203 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2204 }
2205 #endif /* OBJ_ELF */
2206
2207 /* Output a 32-bit word, but mark as an instruction. */
2208
2209 static void
2210 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2211 {
2212 expressionS exp;
2213 unsigned n = 0;
2214
2215 #ifdef md_flush_pending_output
2216 md_flush_pending_output ();
2217 #endif
2218
2219 if (is_it_end_of_statement ())
2220 {
2221 demand_empty_rest_of_line ();
2222 return;
2223 }
2224
2225 /* Sections are assumed to start aligned. In executable section, there is no
2226 MAP_DATA symbol pending. So we only align the address during
2227 MAP_DATA --> MAP_INSN transition.
2228 For other sections, this is not guaranteed. */
2229 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2230 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2231 frag_align_code (2, 0);
2232
2233 #ifdef OBJ_ELF
2234 mapping_state (MAP_INSN);
2235 #endif
2236
2237 do
2238 {
2239 expression (&exp);
2240 if (exp.X_op != O_constant)
2241 {
2242 as_bad (_("constant expression required"));
2243 ignore_rest_of_line ();
2244 return;
2245 }
2246
2247 if (target_big_endian)
2248 {
2249 unsigned int val = exp.X_add_number;
2250 exp.X_add_number = SWAP_32 (val);
2251 }
2252 emit_expr (&exp, INSN_SIZE);
2253 ++n;
2254 }
2255 while (*input_line_pointer++ == ',');
2256
2257 dwarf2_emit_insn (n * INSN_SIZE);
2258
2259 /* Put terminator back into stream. */
2260 input_line_pointer--;
2261 demand_empty_rest_of_line ();
2262 }
2263
2264 static void
2265 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2266 {
2267 demand_empty_rest_of_line ();
2268 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2269 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2270 }
2271
2272 #ifdef OBJ_ELF
2273 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2274
2275 static void
2276 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2277 {
2278 expressionS exp;
2279
2280 expression (&exp);
2281 frag_grow (4);
2282 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2283 BFD_RELOC_AARCH64_TLSDESC_ADD);
2284
2285 demand_empty_rest_of_line ();
2286 }
2287
2288 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2289
2290 static void
2291 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2292 {
2293 expressionS exp;
2294
2295 /* Since we're just labelling the code, there's no need to define a
2296 mapping symbol. */
2297 expression (&exp);
2298 /* Make sure there is enough room in this frag for the following
2299 blr. This trick only works if the blr follows immediately after
2300 the .tlsdesc directive. */
2301 frag_grow (4);
2302 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2303 BFD_RELOC_AARCH64_TLSDESC_CALL);
2304
2305 demand_empty_rest_of_line ();
2306 }
2307
2308 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2309
2310 static void
2311 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2312 {
2313 expressionS exp;
2314
2315 expression (&exp);
2316 frag_grow (4);
2317 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2318 BFD_RELOC_AARCH64_TLSDESC_LDR);
2319
2320 demand_empty_rest_of_line ();
2321 }
2322 #endif /* OBJ_ELF */
2323
2324 #ifdef TE_PE
2325 static void
2326 s_secrel (int dummy ATTRIBUTE_UNUSED)
2327 {
2328 expressionS exp;
2329
2330 do
2331 {
2332 expression (&exp);
2333 if (exp.X_op == O_symbol)
2334 exp.X_op = O_secrel;
2335
2336 emit_expr (&exp, 4);
2337 }
2338 while (*input_line_pointer++ == ',');
2339
2340 input_line_pointer--;
2341 demand_empty_rest_of_line ();
2342 }
2343
2344 void
2345 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2346 {
2347 expressionS exp;
2348
2349 exp.X_op = O_secrel;
2350 exp.X_add_symbol = symbol;
2351 exp.X_add_number = 0;
2352 emit_expr (&exp, size);
2353 }
2354
2355 static void
2356 s_secidx (int dummy ATTRIBUTE_UNUSED)
2357 {
2358 expressionS exp;
2359
2360 do
2361 {
2362 expression (&exp);
2363 if (exp.X_op == O_symbol)
2364 exp.X_op = O_secidx;
2365
2366 emit_expr (&exp, 2);
2367 }
2368 while (*input_line_pointer++ == ',');
2369
2370 input_line_pointer--;
2371 demand_empty_rest_of_line ();
2372 }
2373 #endif /* TE_PE */
2374
2375 static void s_aarch64_arch (int);
2376 static void s_aarch64_cpu (int);
2377 static void s_aarch64_arch_extension (int);
2378
2379 /* This table describes all the machine specific pseudo-ops the assembler
2380 has to support. The fields are:
2381 pseudo-op name without dot
2382 function to call to execute this pseudo-op
2383 Integer arg to pass to the function. */
2384
2385 const pseudo_typeS md_pseudo_table[] = {
2386 /* Never called because '.req' does not start a line. */
2387 {"req", s_req, 0},
2388 {"unreq", s_unreq, 0},
2389 {"bss", s_bss, 0},
2390 {"even", s_even, 0},
2391 {"ltorg", s_ltorg, 0},
2392 {"pool", s_ltorg, 0},
2393 {"cpu", s_aarch64_cpu, 0},
2394 {"arch", s_aarch64_arch, 0},
2395 {"arch_extension", s_aarch64_arch_extension, 0},
2396 {"inst", s_aarch64_inst, 0},
2397 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2398 #ifdef OBJ_ELF
2399 {"tlsdescadd", s_tlsdescadd, 0},
2400 {"tlsdesccall", s_tlsdesccall, 0},
2401 {"tlsdescldr", s_tlsdescldr, 0},
2402 {"variant_pcs", s_variant_pcs, 0},
2403 #endif
2404 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2405 {"word", s_aarch64_cons, 4},
2406 {"long", s_aarch64_cons, 4},
2407 {"xword", s_aarch64_cons, 8},
2408 {"dword", s_aarch64_cons, 8},
2409 #endif
2410 #ifdef TE_PE
2411 {"secrel32", s_secrel, 0},
2412 {"secidx", s_secidx, 0},
2413 #endif
2414 {"float16", float_cons, 'h'},
2415 {"bfloat16", float_cons, 'b'},
2416 {0, 0, 0}
2417 };
2418 \f
2419
2420 /* Check whether STR points to a register name followed by a comma or the
2421 end of line; REG_TYPE indicates which register types are checked
2422 against. Return TRUE if STR is such a register name; otherwise return
2423 FALSE. The function does not intend to produce any diagnostics, but since
2424 the register parser aarch64_reg_parse, which is called by this function,
2425 does produce diagnostics, we call clear_error to clear any diagnostics
2426 that may be generated by aarch64_reg_parse.
2427 Also, the function returns FALSE directly if there is any user error
2428 present at the function entry. This prevents the existing diagnostics
2429 state from being spoiled.
2430 The function currently serves parse_constant_immediate and
2431 parse_big_immediate only. */
2432 static bool
2433 reg_name_p (char *str, aarch64_reg_type reg_type)
2434 {
2435 const reg_entry *reg;
2436
2437 /* Prevent the diagnostics state from being spoiled. */
2438 if (error_p ())
2439 return false;
2440
2441 reg = aarch64_reg_parse (&str, reg_type, NULL);
2442
2443 /* Clear the parsing error that may be set by the reg parser. */
2444 clear_error ();
2445
2446 if (!reg)
2447 return false;
2448
2449 skip_whitespace (str);
2450 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2451 return true;
2452
2453 return false;
2454 }
2455
2456 /* Parser functions used exclusively in instruction operands. */
2457
2458 /* Parse an immediate expression which may not be constant.
2459
2460 To prevent the expression parser from pushing a register name
2461 into the symbol table as an undefined symbol, firstly a check is
2462 done to find out whether STR is a register of type REG_TYPE followed
2463 by a comma or the end of line. Return FALSE if STR is such a string. */
2464
2465 static bool
2466 parse_immediate_expression (char **str, expressionS *exp,
2467 aarch64_reg_type reg_type)
2468 {
2469 if (reg_name_p (*str, reg_type))
2470 {
2471 set_recoverable_error (_("immediate operand required"));
2472 return false;
2473 }
2474
2475 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2476
2477 if (exp->X_op == O_absent)
2478 {
2479 set_fatal_syntax_error (_("missing immediate expression"));
2480 return false;
2481 }
2482
2483 return true;
2484 }
2485
2486 /* Constant immediate-value read function for use in insn parsing.
2487 STR points to the beginning of the immediate (with the optional
2488 leading #); *VAL receives the value. REG_TYPE says which register
2489 names should be treated as registers rather than as symbolic immediates.
2490
2491 Return TRUE on success; otherwise return FALSE. */
2492
2493 static bool
2494 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2495 {
2496 expressionS exp;
2497
2498 if (! parse_immediate_expression (str, &exp, reg_type))
2499 return false;
2500
2501 if (exp.X_op != O_constant)
2502 {
2503 set_syntax_error (_("constant expression required"));
2504 return false;
2505 }
2506
2507 *val = exp.X_add_number;
2508 return true;
2509 }
2510
2511 static uint32_t
2512 encode_imm_float_bits (uint32_t imm)
2513 {
2514 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2515 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2516 }
2517
2518 /* Return TRUE if the single-precision floating-point value encoded in IMM
2519 can be expressed in the AArch64 8-bit signed floating-point format with
2520 3-bit exponent and normalized 4 bits of precision; in other words, the
2521 floating-point value must be expressable as
2522 (+/-) n / 16 * power (2, r)
2523 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2524
2525 static bool
2526 aarch64_imm_float_p (uint32_t imm)
2527 {
2528 /* If a single-precision floating-point value has the following bit
2529 pattern, it can be expressed in the AArch64 8-bit floating-point
2530 format:
2531
2532 3 32222222 2221111111111
2533 1 09876543 21098765432109876543210
2534 n Eeeeeexx xxxx0000000000000000000
2535
2536 where n, e and each x are either 0 or 1 independently, with
2537 E == ~ e. */
2538
2539 uint32_t pattern;
2540
2541 /* Prepare the pattern for 'Eeeeee'. */
2542 if (((imm >> 30) & 0x1) == 0)
2543 pattern = 0x3e000000;
2544 else
2545 pattern = 0x40000000;
2546
2547 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2548 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2549 }
2550
2551 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2552 as an IEEE float without any loss of precision. Store the value in
2553 *FPWORD if so. */
2554
2555 static bool
2556 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2557 {
2558 /* If a double-precision floating-point value has the following bit
2559 pattern, it can be expressed in a float:
2560
2561 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2562 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2563 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2564
2565 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2566 if Eeee_eeee != 1111_1111
2567
2568 where n, e, s and S are either 0 or 1 independently and where ~ is the
2569 inverse of E. */
2570
2571 uint32_t pattern;
2572 uint32_t high32 = imm >> 32;
2573 uint32_t low32 = imm;
2574
2575 /* Lower 29 bits need to be 0s. */
2576 if ((imm & 0x1fffffff) != 0)
2577 return false;
2578
2579 /* Prepare the pattern for 'Eeeeeeeee'. */
2580 if (((high32 >> 30) & 0x1) == 0)
2581 pattern = 0x38000000;
2582 else
2583 pattern = 0x40000000;
2584
2585 /* Check E~~~. */
2586 if ((high32 & 0x78000000) != pattern)
2587 return false;
2588
2589 /* Check Eeee_eeee != 1111_1111. */
2590 if ((high32 & 0x7ff00000) == 0x47f00000)
2591 return false;
2592
2593 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2594 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2595 | (low32 >> 29)); /* 3 S bits. */
2596 return true;
2597 }
2598
2599 /* Return true if we should treat OPERAND as a double-precision
2600 floating-point operand rather than a single-precision one. */
2601 static bool
2602 double_precision_operand_p (const aarch64_opnd_info *operand)
2603 {
2604 /* Check for unsuffixed SVE registers, which are allowed
2605 for LDR and STR but not in instructions that require an
2606 immediate. We get better error messages if we arbitrarily
2607 pick one size, parse the immediate normally, and then
2608 report the match failure in the normal way. */
2609 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2610 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2611 }
2612
2613 /* Parse a floating-point immediate. Return TRUE on success and return the
2614 value in *IMMED in the format of IEEE754 single-precision encoding.
2615 *CCP points to the start of the string; DP_P is TRUE when the immediate
2616 is expected to be in double-precision (N.B. this only matters when
2617 hexadecimal representation is involved). REG_TYPE says which register
2618 names should be treated as registers rather than as symbolic immediates.
2619
2620 This routine accepts any IEEE float; it is up to the callers to reject
2621 invalid ones. */
2622
2623 static bool
2624 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2625 aarch64_reg_type reg_type)
2626 {
2627 char *str = *ccp;
2628 char *fpnum;
2629 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2630 int64_t val = 0;
2631 unsigned fpword = 0;
2632 bool hex_p = false;
2633
2634 skip_past_char (&str, '#');
2635
2636 fpnum = str;
2637 skip_whitespace (fpnum);
2638
2639 if (startswith (fpnum, "0x"))
2640 {
2641 /* Support the hexadecimal representation of the IEEE754 encoding.
2642 Double-precision is expected when DP_P is TRUE, otherwise the
2643 representation should be in single-precision. */
2644 if (! parse_constant_immediate (&str, &val, reg_type))
2645 goto invalid_fp;
2646
2647 if (dp_p)
2648 {
2649 if (!can_convert_double_to_float (val, &fpword))
2650 goto invalid_fp;
2651 }
2652 else if ((uint64_t) val > 0xffffffff)
2653 goto invalid_fp;
2654 else
2655 fpword = val;
2656
2657 hex_p = true;
2658 }
2659 else if (reg_name_p (str, reg_type))
2660 {
2661 set_recoverable_error (_("immediate operand required"));
2662 return false;
2663 }
2664
2665 if (! hex_p)
2666 {
2667 int i;
2668
2669 if ((str = atof_ieee (str, 's', words)) == NULL)
2670 goto invalid_fp;
2671
2672 /* Our FP word must be 32 bits (single-precision FP). */
2673 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2674 {
2675 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2676 fpword |= words[i];
2677 }
2678 }
2679
2680 *immed = fpword;
2681 *ccp = str;
2682 return true;
2683
2684 invalid_fp:
2685 set_fatal_syntax_error (_("invalid floating-point constant"));
2686 return false;
2687 }
2688
2689 /* Less-generic immediate-value read function with the possibility of loading
2690 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2691 instructions.
2692
2693 To prevent the expression parser from pushing a register name into the
2694 symbol table as an undefined symbol, a check is firstly done to find
2695 out whether STR is a register of type REG_TYPE followed by a comma or
2696 the end of line. Return FALSE if STR is such a register. */
2697
2698 static bool
2699 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2700 {
2701 char *ptr = *str;
2702
2703 if (reg_name_p (ptr, reg_type))
2704 {
2705 set_syntax_error (_("immediate operand required"));
2706 return false;
2707 }
2708
2709 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2710
2711 if (inst.reloc.exp.X_op == O_constant)
2712 *imm = inst.reloc.exp.X_add_number;
2713
2714 *str = ptr;
2715
2716 return true;
2717 }
2718
2719 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2720 if NEED_LIBOPCODES is non-zero, the fixup will need
2721 assistance from the libopcodes. */
2722
2723 static inline void
2724 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2725 const aarch64_opnd_info *operand,
2726 int need_libopcodes_p)
2727 {
2728 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2729 reloc->opnd = operand->type;
2730 if (need_libopcodes_p)
2731 reloc->need_libopcodes_p = 1;
2732 };
2733
2734 /* Return TRUE if the instruction needs to be fixed up later internally by
2735 the GAS; otherwise return FALSE. */
2736
2737 static inline bool
2738 aarch64_gas_internal_fixup_p (void)
2739 {
2740 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2741 }
2742
2743 /* Assign the immediate value to the relevant field in *OPERAND if
2744 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2745 needs an internal fixup in a later stage.
2746 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2747 IMM.VALUE that may get assigned with the constant. */
2748 static inline void
2749 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2750 aarch64_opnd_info *operand,
2751 int addr_off_p,
2752 int need_libopcodes_p,
2753 int skip_p)
2754 {
2755 if (reloc->exp.X_op == O_constant)
2756 {
2757 if (addr_off_p)
2758 operand->addr.offset.imm = reloc->exp.X_add_number;
2759 else
2760 operand->imm.value = reloc->exp.X_add_number;
2761 reloc->type = BFD_RELOC_UNUSED;
2762 }
2763 else
2764 {
2765 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2766 /* Tell libopcodes to ignore this operand or not. This is helpful
2767 when one of the operands needs to be fixed up later but we need
2768 libopcodes to check the other operands. */
2769 operand->skip = skip_p;
2770 }
2771 }
2772
2773 /* Relocation modifiers. Each entry in the table contains the textual
2774 name for the relocation which may be placed before a symbol used as
2775 a load/store offset, or add immediate. It must be surrounded by a
2776 leading and trailing colon, for example:
2777
2778 ldr x0, [x1, #:rello:varsym]
2779 add x0, x1, #:rello:varsym */
2780
2781 struct reloc_table_entry
2782 {
2783 const char *name;
2784 int pc_rel;
2785 bfd_reloc_code_real_type adr_type;
2786 bfd_reloc_code_real_type adrp_type;
2787 bfd_reloc_code_real_type movw_type;
2788 bfd_reloc_code_real_type add_type;
2789 bfd_reloc_code_real_type ldst_type;
2790 bfd_reloc_code_real_type ld_literal_type;
2791 };
2792
2793 static struct reloc_table_entry reloc_table[] =
2794 {
2795 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2796 {"lo12", 0,
2797 0, /* adr_type */
2798 0,
2799 0,
2800 BFD_RELOC_AARCH64_ADD_LO12,
2801 BFD_RELOC_AARCH64_LDST_LO12,
2802 0},
2803
2804 /* Higher 21 bits of pc-relative page offset: ADRP */
2805 {"pg_hi21", 1,
2806 0, /* adr_type */
2807 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2808 0,
2809 0,
2810 0,
2811 0},
2812
2813 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2814 {"pg_hi21_nc", 1,
2815 0, /* adr_type */
2816 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2817 0,
2818 0,
2819 0,
2820 0},
2821
2822 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2823 {"abs_g0", 0,
2824 0, /* adr_type */
2825 0,
2826 BFD_RELOC_AARCH64_MOVW_G0,
2827 0,
2828 0,
2829 0},
2830
2831 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2832 {"abs_g0_s", 0,
2833 0, /* adr_type */
2834 0,
2835 BFD_RELOC_AARCH64_MOVW_G0_S,
2836 0,
2837 0,
2838 0},
2839
2840 /* Less significant bits 0-15 of address/value: MOVK, no check */
2841 {"abs_g0_nc", 0,
2842 0, /* adr_type */
2843 0,
2844 BFD_RELOC_AARCH64_MOVW_G0_NC,
2845 0,
2846 0,
2847 0},
2848
2849 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2850 {"abs_g1", 0,
2851 0, /* adr_type */
2852 0,
2853 BFD_RELOC_AARCH64_MOVW_G1,
2854 0,
2855 0,
2856 0},
2857
2858 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2859 {"abs_g1_s", 0,
2860 0, /* adr_type */
2861 0,
2862 BFD_RELOC_AARCH64_MOVW_G1_S,
2863 0,
2864 0,
2865 0},
2866
2867 /* Less significant bits 16-31 of address/value: MOVK, no check */
2868 {"abs_g1_nc", 0,
2869 0, /* adr_type */
2870 0,
2871 BFD_RELOC_AARCH64_MOVW_G1_NC,
2872 0,
2873 0,
2874 0},
2875
2876 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2877 {"abs_g2", 0,
2878 0, /* adr_type */
2879 0,
2880 BFD_RELOC_AARCH64_MOVW_G2,
2881 0,
2882 0,
2883 0},
2884
2885 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2886 {"abs_g2_s", 0,
2887 0, /* adr_type */
2888 0,
2889 BFD_RELOC_AARCH64_MOVW_G2_S,
2890 0,
2891 0,
2892 0},
2893
2894 /* Less significant bits 32-47 of address/value: MOVK, no check */
2895 {"abs_g2_nc", 0,
2896 0, /* adr_type */
2897 0,
2898 BFD_RELOC_AARCH64_MOVW_G2_NC,
2899 0,
2900 0,
2901 0},
2902
2903 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2904 {"abs_g3", 0,
2905 0, /* adr_type */
2906 0,
2907 BFD_RELOC_AARCH64_MOVW_G3,
2908 0,
2909 0,
2910 0},
2911
2912 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2913 {"prel_g0", 1,
2914 0, /* adr_type */
2915 0,
2916 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2917 0,
2918 0,
2919 0},
2920
2921 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2922 {"prel_g0_nc", 1,
2923 0, /* adr_type */
2924 0,
2925 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2926 0,
2927 0,
2928 0},
2929
2930 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2931 {"prel_g1", 1,
2932 0, /* adr_type */
2933 0,
2934 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2935 0,
2936 0,
2937 0},
2938
2939 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2940 {"prel_g1_nc", 1,
2941 0, /* adr_type */
2942 0,
2943 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2944 0,
2945 0,
2946 0},
2947
2948 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2949 {"prel_g2", 1,
2950 0, /* adr_type */
2951 0,
2952 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2953 0,
2954 0,
2955 0},
2956
2957 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2958 {"prel_g2_nc", 1,
2959 0, /* adr_type */
2960 0,
2961 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2962 0,
2963 0,
2964 0},
2965
2966 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2967 {"prel_g3", 1,
2968 0, /* adr_type */
2969 0,
2970 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2971 0,
2972 0,
2973 0},
2974
2975 /* Get to the page containing GOT entry for a symbol. */
2976 {"got", 1,
2977 0, /* adr_type */
2978 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2979 0,
2980 0,
2981 0,
2982 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2983
2984 /* 12 bit offset into the page containing GOT entry for that symbol. */
2985 {"got_lo12", 0,
2986 0, /* adr_type */
2987 0,
2988 0,
2989 0,
2990 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2991 0},
2992
2993 /* 0-15 bits of address/value: MOVk, no check. */
2994 {"gotoff_g0_nc", 0,
2995 0, /* adr_type */
2996 0,
2997 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2998 0,
2999 0,
3000 0},
3001
3002 /* Most significant bits 16-31 of address/value: MOVZ. */
3003 {"gotoff_g1", 0,
3004 0, /* adr_type */
3005 0,
3006 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
3007 0,
3008 0,
3009 0},
3010
3011 /* 15 bit offset into the page containing GOT entry for that symbol. */
3012 {"gotoff_lo15", 0,
3013 0, /* adr_type */
3014 0,
3015 0,
3016 0,
3017 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
3018 0},
3019
3020 /* Get to the page containing GOT TLS entry for a symbol */
3021 {"gottprel_g0_nc", 0,
3022 0, /* adr_type */
3023 0,
3024 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
3025 0,
3026 0,
3027 0},
3028
3029 /* Get to the page containing GOT TLS entry for a symbol */
3030 {"gottprel_g1", 0,
3031 0, /* adr_type */
3032 0,
3033 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
3034 0,
3035 0,
3036 0},
3037
3038 /* Get to the page containing GOT TLS entry for a symbol */
3039 {"tlsgd", 0,
3040 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
3041 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
3042 0,
3043 0,
3044 0,
3045 0},
3046
3047 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3048 {"tlsgd_lo12", 0,
3049 0, /* adr_type */
3050 0,
3051 0,
3052 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
3053 0,
3054 0},
3055
3056 /* Lower 16 bits address/value: MOVk. */
3057 {"tlsgd_g0_nc", 0,
3058 0, /* adr_type */
3059 0,
3060 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
3061 0,
3062 0,
3063 0},
3064
3065 /* Most significant bits 16-31 of address/value: MOVZ. */
3066 {"tlsgd_g1", 0,
3067 0, /* adr_type */
3068 0,
3069 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
3070 0,
3071 0,
3072 0},
3073
3074 /* Get to the page containing GOT TLS entry for a symbol */
3075 {"tlsdesc", 0,
3076 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
3077 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
3078 0,
3079 0,
3080 0,
3081 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
3082
3083 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3084 {"tlsdesc_lo12", 0,
3085 0, /* adr_type */
3086 0,
3087 0,
3088 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
3089 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
3090 0},
3091
3092 /* Get to the page containing GOT TLS entry for a symbol.
3093 The same as GD, we allocate two consecutive GOT slots
3094 for module index and module offset, the only difference
3095 with GD is the module offset should be initialized to
3096 zero without any outstanding runtime relocation. */
3097 {"tlsldm", 0,
3098 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
3099 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
3100 0,
3101 0,
3102 0,
3103 0},
3104
3105 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3106 {"tlsldm_lo12_nc", 0,
3107 0, /* adr_type */
3108 0,
3109 0,
3110 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
3111 0,
3112 0},
3113
3114 /* 12 bit offset into the module TLS base address. */
3115 {"dtprel_lo12", 0,
3116 0, /* adr_type */
3117 0,
3118 0,
3119 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
3120 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
3121 0},
3122
3123 /* Same as dtprel_lo12, no overflow check. */
3124 {"dtprel_lo12_nc", 0,
3125 0, /* adr_type */
3126 0,
3127 0,
3128 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
3129 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
3130 0},
3131
3132 /* bits[23:12] of offset to the module TLS base address. */
3133 {"dtprel_hi12", 0,
3134 0, /* adr_type */
3135 0,
3136 0,
3137 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3138 0,
3139 0},
3140
3141 /* bits[15:0] of offset to the module TLS base address. */
3142 {"dtprel_g0", 0,
3143 0, /* adr_type */
3144 0,
3145 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3146 0,
3147 0,
3148 0},
3149
3150 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3151 {"dtprel_g0_nc", 0,
3152 0, /* adr_type */
3153 0,
3154 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3155 0,
3156 0,
3157 0},
3158
3159 /* bits[31:16] of offset to the module TLS base address. */
3160 {"dtprel_g1", 0,
3161 0, /* adr_type */
3162 0,
3163 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3164 0,
3165 0,
3166 0},
3167
3168 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3169 {"dtprel_g1_nc", 0,
3170 0, /* adr_type */
3171 0,
3172 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3173 0,
3174 0,
3175 0},
3176
3177 /* bits[47:32] of offset to the module TLS base address. */
3178 {"dtprel_g2", 0,
3179 0, /* adr_type */
3180 0,
3181 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3182 0,
3183 0,
3184 0},
3185
3186 /* Lower 16 bit offset into GOT entry for a symbol */
3187 {"tlsdesc_off_g0_nc", 0,
3188 0, /* adr_type */
3189 0,
3190 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3191 0,
3192 0,
3193 0},
3194
3195 /* Higher 16 bit offset into GOT entry for a symbol */
3196 {"tlsdesc_off_g1", 0,
3197 0, /* adr_type */
3198 0,
3199 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3200 0,
3201 0,
3202 0},
3203
3204 /* Get to the page containing GOT TLS entry for a symbol */
3205 {"gottprel", 0,
3206 0, /* adr_type */
3207 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3208 0,
3209 0,
3210 0,
3211 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3212
3213 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3214 {"gottprel_lo12", 0,
3215 0, /* adr_type */
3216 0,
3217 0,
3218 0,
3219 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3220 0},
3221
3222 /* Get tp offset for a symbol. */
3223 {"tprel", 0,
3224 0, /* adr_type */
3225 0,
3226 0,
3227 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3228 0,
3229 0},
3230
3231 /* Get tp offset for a symbol. */
3232 {"tprel_lo12", 0,
3233 0, /* adr_type */
3234 0,
3235 0,
3236 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3237 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3238 0},
3239
3240 /* Get tp offset for a symbol. */
3241 {"tprel_hi12", 0,
3242 0, /* adr_type */
3243 0,
3244 0,
3245 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3246 0,
3247 0},
3248
3249 /* Get tp offset for a symbol. */
3250 {"tprel_lo12_nc", 0,
3251 0, /* adr_type */
3252 0,
3253 0,
3254 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3255 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3256 0},
3257
3258 /* Most significant bits 32-47 of address/value: MOVZ. */
3259 {"tprel_g2", 0,
3260 0, /* adr_type */
3261 0,
3262 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3263 0,
3264 0,
3265 0},
3266
3267 /* Most significant bits 16-31 of address/value: MOVZ. */
3268 {"tprel_g1", 0,
3269 0, /* adr_type */
3270 0,
3271 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3272 0,
3273 0,
3274 0},
3275
3276 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3277 {"tprel_g1_nc", 0,
3278 0, /* adr_type */
3279 0,
3280 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3281 0,
3282 0,
3283 0},
3284
3285 /* Most significant bits 0-15 of address/value: MOVZ. */
3286 {"tprel_g0", 0,
3287 0, /* adr_type */
3288 0,
3289 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3290 0,
3291 0,
3292 0},
3293
3294 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3295 {"tprel_g0_nc", 0,
3296 0, /* adr_type */
3297 0,
3298 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3299 0,
3300 0,
3301 0},
3302
3303 /* 15bit offset from got entry to base address of GOT table. */
3304 {"gotpage_lo15", 0,
3305 0,
3306 0,
3307 0,
3308 0,
3309 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3310 0},
3311
3312 /* 14bit offset from got entry to base address of GOT table. */
3313 {"gotpage_lo14", 0,
3314 0,
3315 0,
3316 0,
3317 0,
3318 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3319 0},
3320 };
3321
3322 /* Given the address of a pointer pointing to the textual name of a
3323 relocation as may appear in assembler source, attempt to find its
3324 details in reloc_table. The pointer will be updated to the character
3325 after the trailing colon. On failure, NULL will be returned;
3326 otherwise return the reloc_table_entry. */
3327
3328 static struct reloc_table_entry *
3329 find_reloc_table_entry (char **str)
3330 {
3331 unsigned int i;
3332 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3333 {
3334 int length = strlen (reloc_table[i].name);
3335
3336 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3337 && (*str)[length] == ':')
3338 {
3339 *str += (length + 1);
3340 return &reloc_table[i];
3341 }
3342 }
3343
3344 return NULL;
3345 }
3346
3347 /* Returns 0 if the relocation should never be forced,
3348 1 if the relocation must be forced, and -1 if either
3349 result is OK. */
3350
3351 static signed int
3352 aarch64_force_reloc (unsigned int type)
3353 {
3354 switch (type)
3355 {
3356 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3357 /* Perform these "immediate" internal relocations
3358 even if the symbol is extern or weak. */
3359 return 0;
3360
3361 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3362 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3363 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3364 /* Pseudo relocs that need to be fixed up according to
3365 ilp32_p. */
3366 return 1;
3367
3368 case BFD_RELOC_AARCH64_ADD_LO12:
3369 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3370 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3371 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3372 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3373 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3374 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3375 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3376 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3377 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3378 case BFD_RELOC_AARCH64_LDST128_LO12:
3379 case BFD_RELOC_AARCH64_LDST16_LO12:
3380 case BFD_RELOC_AARCH64_LDST32_LO12:
3381 case BFD_RELOC_AARCH64_LDST64_LO12:
3382 case BFD_RELOC_AARCH64_LDST8_LO12:
3383 case BFD_RELOC_AARCH64_LDST_LO12:
3384 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3385 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3386 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3387 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3388 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3389 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3390 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3391 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3392 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3393 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3394 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3395 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3396 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3397 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3398 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3399 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3400 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3401 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3402 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3403 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3404 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3405 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3406 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3407 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3408 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3409 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3410 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3411 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3412 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3413 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3414 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3415 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3416 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3417 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3418 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3419 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3420 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3421 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3422 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3423 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3424 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3425 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3426 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3427 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3428 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3429 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3430 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3431 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3432 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3433 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3434 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3435 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3436 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3437 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3438 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3439 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3440 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3441 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3442 /* Always leave these relocations for the linker. */
3443 return 1;
3444
3445 default:
3446 return -1;
3447 }
3448 }
3449
3450 int
3451 aarch64_force_relocation (struct fix *fixp)
3452 {
3453 int res = aarch64_force_reloc (fixp->fx_r_type);
3454
3455 if (res == -1)
3456 return generic_force_reloc (fixp);
3457 return res;
3458 }
3459
3460 /* Mode argument to parse_shift and parser_shifter_operand. */
3461 enum parse_shift_mode
3462 {
3463 SHIFTED_NONE, /* no shifter allowed */
3464 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3465 "#imm{,lsl #n}" */
3466 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3467 "#imm" */
3468 SHIFTED_LSL, /* bare "lsl #n" */
3469 SHIFTED_MUL, /* bare "mul #n" */
3470 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3471 SHIFTED_MUL_VL, /* "mul vl" */
3472 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3473 };
3474
3475 /* Parse a <shift> operator on an AArch64 data processing instruction.
3476 Return TRUE on success; otherwise return FALSE. */
3477 static bool
3478 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3479 {
3480 const struct aarch64_name_value_pair *shift_op;
3481 enum aarch64_modifier_kind kind;
3482 expressionS exp;
3483 int exp_has_prefix;
3484 char *s = *str;
3485 char *p = s;
3486
3487 for (p = *str; ISALPHA (*p); p++)
3488 ;
3489
3490 if (p == *str)
3491 {
3492 set_syntax_error (_("shift expression expected"));
3493 return false;
3494 }
3495
3496 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3497
3498 if (shift_op == NULL)
3499 {
3500 set_syntax_error (_("shift operator expected"));
3501 return false;
3502 }
3503
3504 kind = aarch64_get_operand_modifier (shift_op);
3505
3506 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3507 {
3508 set_syntax_error (_("invalid use of 'MSL'"));
3509 return false;
3510 }
3511
3512 if (kind == AARCH64_MOD_MUL
3513 && mode != SHIFTED_MUL
3514 && mode != SHIFTED_MUL_VL)
3515 {
3516 set_syntax_error (_("invalid use of 'MUL'"));
3517 return false;
3518 }
3519
3520 switch (mode)
3521 {
3522 case SHIFTED_LOGIC_IMM:
3523 if (aarch64_extend_operator_p (kind))
3524 {
3525 set_syntax_error (_("extending shift is not permitted"));
3526 return false;
3527 }
3528 break;
3529
3530 case SHIFTED_ARITH_IMM:
3531 if (kind == AARCH64_MOD_ROR)
3532 {
3533 set_syntax_error (_("'ROR' shift is not permitted"));
3534 return false;
3535 }
3536 break;
3537
3538 case SHIFTED_LSL:
3539 if (kind != AARCH64_MOD_LSL)
3540 {
3541 set_syntax_error (_("only 'LSL' shift is permitted"));
3542 return false;
3543 }
3544 break;
3545
3546 case SHIFTED_MUL:
3547 if (kind != AARCH64_MOD_MUL)
3548 {
3549 set_syntax_error (_("only 'MUL' is permitted"));
3550 return false;
3551 }
3552 break;
3553
3554 case SHIFTED_MUL_VL:
3555 /* "MUL VL" consists of two separate tokens. Require the first
3556 token to be "MUL" and look for a following "VL". */
3557 if (kind == AARCH64_MOD_MUL)
3558 {
3559 skip_whitespace (p);
3560 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3561 {
3562 p += 2;
3563 kind = AARCH64_MOD_MUL_VL;
3564 break;
3565 }
3566 }
3567 set_syntax_error (_("only 'MUL VL' is permitted"));
3568 return false;
3569
3570 case SHIFTED_REG_OFFSET:
3571 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3572 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3573 {
3574 set_fatal_syntax_error
3575 (_("invalid shift for the register offset addressing mode"));
3576 return false;
3577 }
3578 break;
3579
3580 case SHIFTED_LSL_MSL:
3581 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3582 {
3583 set_syntax_error (_("invalid shift operator"));
3584 return false;
3585 }
3586 break;
3587
3588 default:
3589 abort ();
3590 }
3591
3592 /* Whitespace can appear here if the next thing is a bare digit. */
3593 skip_whitespace (p);
3594
3595 /* Parse shift amount. */
3596 exp_has_prefix = 0;
3597 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3598 exp.X_op = O_absent;
3599 else
3600 {
3601 if (is_immediate_prefix (*p))
3602 {
3603 p++;
3604 exp_has_prefix = 1;
3605 }
3606 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3607 }
3608 if (kind == AARCH64_MOD_MUL_VL)
3609 /* For consistency, give MUL VL the same shift amount as an implicit
3610 MUL #1. */
3611 operand->shifter.amount = 1;
3612 else if (exp.X_op == O_absent)
3613 {
3614 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3615 {
3616 set_syntax_error (_("missing shift amount"));
3617 return false;
3618 }
3619 operand->shifter.amount = 0;
3620 }
3621 else if (exp.X_op != O_constant)
3622 {
3623 set_syntax_error (_("constant shift amount required"));
3624 return false;
3625 }
3626 /* For parsing purposes, MUL #n has no inherent range. The range
3627 depends on the operand and will be checked by operand-specific
3628 routines. */
3629 else if (kind != AARCH64_MOD_MUL
3630 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3631 {
3632 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3633 return false;
3634 }
3635 else
3636 {
3637 operand->shifter.amount = exp.X_add_number;
3638 operand->shifter.amount_present = 1;
3639 }
3640
3641 operand->shifter.operator_present = 1;
3642 operand->shifter.kind = kind;
3643
3644 *str = p;
3645 return true;
3646 }
3647
3648 /* Parse a <shifter_operand> for a data processing instruction:
3649
3650 #<immediate>
3651 #<immediate>, LSL #imm
3652
3653 Validation of immediate operands is deferred to md_apply_fix.
3654
3655 Return TRUE on success; otherwise return FALSE. */
3656
3657 static bool
3658 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3659 enum parse_shift_mode mode)
3660 {
3661 char *p;
3662
3663 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3664 return false;
3665
3666 p = *str;
3667
3668 /* Accept an immediate expression. */
3669 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3670 REJECT_ABSENT))
3671 return false;
3672
3673 /* Accept optional LSL for arithmetic immediate values. */
3674 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3675 if (! parse_shift (&p, operand, SHIFTED_LSL))
3676 return false;
3677
3678 /* Not accept any shifter for logical immediate values. */
3679 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3680 && parse_shift (&p, operand, mode))
3681 {
3682 set_syntax_error (_("unexpected shift operator"));
3683 return false;
3684 }
3685
3686 *str = p;
3687 return true;
3688 }
3689
3690 /* Parse a <shifter_operand> for a data processing instruction:
3691
3692 <Rm>
3693 <Rm>, <shift>
3694 #<immediate>
3695 #<immediate>, LSL #imm
3696
3697 where <shift> is handled by parse_shift above, and the last two
3698 cases are handled by the function above.
3699
3700 Validation of immediate operands is deferred to md_apply_fix.
3701
3702 Return TRUE on success; otherwise return FALSE. */
3703
3704 static bool
3705 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3706 enum parse_shift_mode mode)
3707 {
3708 const reg_entry *reg;
3709 aarch64_opnd_qualifier_t qualifier;
3710 enum aarch64_operand_class opd_class
3711 = aarch64_get_operand_class (operand->type);
3712
3713 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3714 if (reg)
3715 {
3716 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3717 {
3718 set_syntax_error (_("unexpected register in the immediate operand"));
3719 return false;
3720 }
3721
3722 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR))
3723 {
3724 set_expected_reg_error (REG_TYPE_R_ZR, reg, 0);
3725 return false;
3726 }
3727
3728 operand->reg.regno = reg->number;
3729 operand->qualifier = qualifier;
3730
3731 /* Accept optional shift operation on register. */
3732 if (! skip_past_comma (str))
3733 return true;
3734
3735 if (! parse_shift (str, operand, mode))
3736 return false;
3737
3738 return true;
3739 }
3740 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3741 {
3742 set_syntax_error
3743 (_("integer register expected in the extended/shifted operand "
3744 "register"));
3745 return false;
3746 }
3747
3748 /* We have a shifted immediate variable. */
3749 return parse_shifter_operand_imm (str, operand, mode);
3750 }
3751
3752 /* Return TRUE on success; return FALSE otherwise. */
3753
3754 static bool
3755 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3756 enum parse_shift_mode mode)
3757 {
3758 char *p = *str;
3759
3760 /* Determine if we have the sequence of characters #: or just :
3761 coming next. If we do, then we check for a :rello: relocation
3762 modifier. If we don't, punt the whole lot to
3763 parse_shifter_operand. */
3764
3765 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3766 {
3767 struct reloc_table_entry *entry;
3768
3769 if (p[0] == '#')
3770 p += 2;
3771 else
3772 p++;
3773 *str = p;
3774
3775 /* Try to parse a relocation. Anything else is an error. */
3776 if (!(entry = find_reloc_table_entry (str)))
3777 {
3778 set_syntax_error (_("unknown relocation modifier"));
3779 return false;
3780 }
3781
3782 if (entry->add_type == 0)
3783 {
3784 set_syntax_error
3785 (_("this relocation modifier is not allowed on this instruction"));
3786 return false;
3787 }
3788
3789 /* Save str before we decompose it. */
3790 p = *str;
3791
3792 /* Next, we parse the expression. */
3793 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3794 REJECT_ABSENT))
3795 return false;
3796
3797 /* Record the relocation type (use the ADD variant here). */
3798 inst.reloc.type = entry->add_type;
3799 inst.reloc.pc_rel = entry->pc_rel;
3800
3801 /* If str is empty, we've reached the end, stop here. */
3802 if (**str == '\0')
3803 return true;
3804
3805 /* Otherwise, we have a shifted reloc modifier, so rewind to
3806 recover the variable name and continue parsing for the shifter. */
3807 *str = p;
3808 return parse_shifter_operand_imm (str, operand, mode);
3809 }
3810
3811 return parse_shifter_operand (str, operand, mode);
3812 }
3813
3814 /* Parse all forms of an address expression. Information is written
3815 to *OPERAND and/or inst.reloc.
3816
3817 The A64 instruction set has the following addressing modes:
3818
3819 Offset
3820 [base] // in SIMD ld/st structure
3821 [base{,#0}] // in ld/st exclusive
3822 [base{,#imm}]
3823 [base,Xm{,LSL #imm}]
3824 [base,Xm,SXTX {#imm}]
3825 [base,Wm,(S|U)XTW {#imm}]
3826 Pre-indexed
3827 [base]! // in ldraa/ldrab exclusive
3828 [base,#imm]!
3829 Post-indexed
3830 [base],#imm
3831 [base],Xm // in SIMD ld/st structure
3832 PC-relative (literal)
3833 label
3834 SVE:
3835 [base,#imm,MUL VL]
3836 [base,Zm.D{,LSL #imm}]
3837 [base,Zm.S,(S|U)XTW {#imm}]
3838 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3839 [Zn.S,#imm]
3840 [Zn.D,#imm]
3841 [Zn.S{, Xm}]
3842 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3843 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3844 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3845
3846 (As a convenience, the notation "=immediate" is permitted in conjunction
3847 with the pc-relative literal load instructions to automatically place an
3848 immediate value or symbolic address in a nearby literal pool and generate
3849 a hidden label which references it.)
3850
3851 Upon a successful parsing, the address structure in *OPERAND will be
3852 filled in the following way:
3853
3854 .base_regno = <base>
3855 .offset.is_reg // 1 if the offset is a register
3856 .offset.imm = <imm>
3857 .offset.regno = <Rm>
3858
3859 For different addressing modes defined in the A64 ISA:
3860
3861 Offset
3862 .pcrel=0; .preind=1; .postind=0; .writeback=0
3863 Pre-indexed
3864 .pcrel=0; .preind=1; .postind=0; .writeback=1
3865 Post-indexed
3866 .pcrel=0; .preind=0; .postind=1; .writeback=1
3867 PC-relative (literal)
3868 .pcrel=1; .preind=1; .postind=0; .writeback=0
3869
3870 The shift/extension information, if any, will be stored in .shifter.
3871 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3872 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3873 corresponding register.
3874
3875 BASE_TYPE says which types of base register should be accepted and
3876 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3877 is the type of shifter that is allowed for immediate offsets,
3878 or SHIFTED_NONE if none.
3879
3880 In all other respects, it is the caller's responsibility to check
3881 for addressing modes not supported by the instruction, and to set
3882 inst.reloc.type. */
3883
3884 static bool
3885 parse_address_main (char **str, aarch64_opnd_info *operand,
3886 aarch64_opnd_qualifier_t *base_qualifier,
3887 aarch64_opnd_qualifier_t *offset_qualifier,
3888 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3889 enum parse_shift_mode imm_shift_mode)
3890 {
3891 char *p = *str;
3892 const reg_entry *reg;
3893 expressionS *exp = &inst.reloc.exp;
3894
3895 *base_qualifier = AARCH64_OPND_QLF_NIL;
3896 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3897 if (! skip_past_char (&p, '['))
3898 {
3899 /* =immediate or label. */
3900 operand->addr.pcrel = 1;
3901 operand->addr.preind = 1;
3902
3903 /* #:<reloc_op>:<symbol> */
3904 skip_past_char (&p, '#');
3905 if (skip_past_char (&p, ':'))
3906 {
3907 bfd_reloc_code_real_type ty;
3908 struct reloc_table_entry *entry;
3909
3910 /* Try to parse a relocation modifier. Anything else is
3911 an error. */
3912 entry = find_reloc_table_entry (&p);
3913 if (! entry)
3914 {
3915 set_syntax_error (_("unknown relocation modifier"));
3916 return false;
3917 }
3918
3919 switch (operand->type)
3920 {
3921 case AARCH64_OPND_ADDR_PCREL21:
3922 /* adr */
3923 ty = entry->adr_type;
3924 break;
3925
3926 default:
3927 ty = entry->ld_literal_type;
3928 break;
3929 }
3930
3931 if (ty == 0)
3932 {
3933 set_syntax_error
3934 (_("this relocation modifier is not allowed on this "
3935 "instruction"));
3936 return false;
3937 }
3938
3939 /* #:<reloc_op>: */
3940 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3941 {
3942 set_syntax_error (_("invalid relocation expression"));
3943 return false;
3944 }
3945 /* #:<reloc_op>:<expr> */
3946 /* Record the relocation type. */
3947 inst.reloc.type = ty;
3948 inst.reloc.pc_rel = entry->pc_rel;
3949 }
3950 else
3951 {
3952 if (skip_past_char (&p, '='))
3953 /* =immediate; need to generate the literal in the literal pool. */
3954 inst.gen_lit_pool = 1;
3955
3956 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3957 {
3958 set_syntax_error (_("invalid address"));
3959 return false;
3960 }
3961 }
3962
3963 *str = p;
3964 return true;
3965 }
3966
3967 /* [ */
3968
3969 bool alpha_base_p = ISALPHA (*p);
3970 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3971 if (!reg || !aarch64_check_reg_type (reg, base_type))
3972 {
3973 if (reg
3974 && aarch64_check_reg_type (reg, REG_TYPE_R_SP)
3975 && *base_qualifier == AARCH64_OPND_QLF_W)
3976 set_syntax_error (_("expected a 64-bit base register"));
3977 else if (alpha_base_p)
3978 set_syntax_error (_("invalid base register"));
3979 else
3980 set_syntax_error (_("expected a base register"));
3981 return false;
3982 }
3983 operand->addr.base_regno = reg->number;
3984
3985 /* [Xn */
3986 if (skip_past_comma (&p))
3987 {
3988 /* [Xn, */
3989 operand->addr.preind = 1;
3990
3991 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3992 if (reg)
3993 {
3994 if (!aarch64_check_reg_type (reg, offset_type))
3995 {
3996 set_syntax_error (_("invalid offset register"));
3997 return false;
3998 }
3999
4000 /* [Xn,Rm */
4001 operand->addr.offset.regno = reg->number;
4002 operand->addr.offset.is_reg = 1;
4003 /* Shifted index. */
4004 if (skip_past_comma (&p))
4005 {
4006 /* [Xn,Rm, */
4007 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
4008 /* Use the diagnostics set in parse_shift, so not set new
4009 error message here. */
4010 return false;
4011 }
4012 /* We only accept:
4013 [base,Xm] # For vector plus scalar SVE2 indexing.
4014 [base,Xm{,LSL #imm}]
4015 [base,Xm,SXTX {#imm}]
4016 [base,Wm,(S|U)XTW {#imm}] */
4017 if (operand->shifter.kind == AARCH64_MOD_NONE
4018 || operand->shifter.kind == AARCH64_MOD_LSL
4019 || operand->shifter.kind == AARCH64_MOD_SXTX)
4020 {
4021 if (*offset_qualifier == AARCH64_OPND_QLF_W)
4022 {
4023 set_syntax_error (_("invalid use of 32-bit register offset"));
4024 return false;
4025 }
4026 if (aarch64_get_qualifier_esize (*base_qualifier)
4027 != aarch64_get_qualifier_esize (*offset_qualifier)
4028 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
4029 || *base_qualifier != AARCH64_OPND_QLF_S_S
4030 || *offset_qualifier != AARCH64_OPND_QLF_X))
4031 {
4032 set_syntax_error (_("offset has different size from base"));
4033 return false;
4034 }
4035 }
4036 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
4037 {
4038 set_syntax_error (_("invalid use of 64-bit register offset"));
4039 return false;
4040 }
4041 }
4042 else
4043 {
4044 /* [Xn,#:<reloc_op>:<symbol> */
4045 skip_past_char (&p, '#');
4046 if (skip_past_char (&p, ':'))
4047 {
4048 struct reloc_table_entry *entry;
4049
4050 /* Try to parse a relocation modifier. Anything else is
4051 an error. */
4052 if (!(entry = find_reloc_table_entry (&p)))
4053 {
4054 set_syntax_error (_("unknown relocation modifier"));
4055 return false;
4056 }
4057
4058 if (entry->ldst_type == 0)
4059 {
4060 set_syntax_error
4061 (_("this relocation modifier is not allowed on this "
4062 "instruction"));
4063 return false;
4064 }
4065
4066 /* [Xn,#:<reloc_op>: */
4067 /* We now have the group relocation table entry corresponding to
4068 the name in the assembler source. Next, we parse the
4069 expression. */
4070 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4071 {
4072 set_syntax_error (_("invalid relocation expression"));
4073 return false;
4074 }
4075
4076 /* [Xn,#:<reloc_op>:<expr> */
4077 /* Record the load/store relocation type. */
4078 inst.reloc.type = entry->ldst_type;
4079 inst.reloc.pc_rel = entry->pc_rel;
4080 }
4081 else
4082 {
4083 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4084 {
4085 set_syntax_error (_("invalid expression in the address"));
4086 return false;
4087 }
4088 /* [Xn,<expr> */
4089 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
4090 /* [Xn,<expr>,<shifter> */
4091 if (! parse_shift (&p, operand, imm_shift_mode))
4092 return false;
4093 }
4094 }
4095 }
4096
4097 if (! skip_past_char (&p, ']'))
4098 {
4099 set_syntax_error (_("']' expected"));
4100 return false;
4101 }
4102
4103 if (skip_past_char (&p, '!'))
4104 {
4105 if (operand->addr.preind && operand->addr.offset.is_reg)
4106 {
4107 set_syntax_error (_("register offset not allowed in pre-indexed "
4108 "addressing mode"));
4109 return false;
4110 }
4111 /* [Xn]! */
4112 operand->addr.writeback = 1;
4113 }
4114 else if (skip_past_comma (&p))
4115 {
4116 /* [Xn], */
4117 operand->addr.postind = 1;
4118 operand->addr.writeback = 1;
4119
4120 if (operand->addr.preind)
4121 {
4122 set_syntax_error (_("cannot combine pre- and post-indexing"));
4123 return false;
4124 }
4125
4126 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
4127 if (reg)
4128 {
4129 /* [Xn],Xm */
4130 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
4131 {
4132 set_syntax_error (_("invalid offset register"));
4133 return false;
4134 }
4135
4136 operand->addr.offset.regno = reg->number;
4137 operand->addr.offset.is_reg = 1;
4138 }
4139 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4140 {
4141 /* [Xn],#expr */
4142 set_syntax_error (_("invalid expression in the address"));
4143 return false;
4144 }
4145 }
4146
4147 /* If at this point neither .preind nor .postind is set, we have a
4148 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4149 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4150 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4151 [Zn.<T>, xzr]. */
4152 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4153 {
4154 if (operand->addr.writeback)
4155 {
4156 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4157 {
4158 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4159 operand->addr.offset.is_reg = 0;
4160 operand->addr.offset.imm = 0;
4161 operand->addr.preind = 1;
4162 }
4163 else
4164 {
4165 /* Reject [Rn]! */
4166 set_syntax_error (_("missing offset in the pre-indexed address"));
4167 return false;
4168 }
4169 }
4170 else
4171 {
4172 operand->addr.preind = 1;
4173 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4174 {
4175 operand->addr.offset.is_reg = 1;
4176 operand->addr.offset.regno = REG_ZR;
4177 *offset_qualifier = AARCH64_OPND_QLF_X;
4178 }
4179 else
4180 {
4181 inst.reloc.exp.X_op = O_constant;
4182 inst.reloc.exp.X_add_number = 0;
4183 }
4184 }
4185 }
4186
4187 *str = p;
4188 return true;
4189 }
4190
4191 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4192 on success. */
4193 static bool
4194 parse_address (char **str, aarch64_opnd_info *operand)
4195 {
4196 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4197 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4198 REG_TYPE_R64_SP, REG_TYPE_R_ZR, SHIFTED_NONE);
4199 }
4200
4201 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4202 The arguments have the same meaning as for parse_address_main.
4203 Return TRUE on success. */
4204 static bool
4205 parse_sve_address (char **str, aarch64_opnd_info *operand,
4206 aarch64_opnd_qualifier_t *base_qualifier,
4207 aarch64_opnd_qualifier_t *offset_qualifier)
4208 {
4209 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4210 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4211 SHIFTED_MUL_VL);
4212 }
4213
4214 /* Parse a register X0-X30. The register must be 64-bit and register 31
4215 is unallocated. */
4216 static bool
4217 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4218 {
4219 const reg_entry *reg = parse_reg (str);
4220 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4221 {
4222 set_expected_reg_error (REG_TYPE_R_64, reg, 0);
4223 return false;
4224 }
4225 operand->reg.regno = reg->number;
4226 operand->qualifier = AARCH64_OPND_QLF_X;
4227 return true;
4228 }
4229
4230 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4231 Return TRUE on success; otherwise return FALSE. */
4232 static bool
4233 parse_half (char **str, int *internal_fixup_p)
4234 {
4235 char *p = *str;
4236
4237 skip_past_char (&p, '#');
4238
4239 gas_assert (internal_fixup_p);
4240 *internal_fixup_p = 0;
4241
4242 if (*p == ':')
4243 {
4244 struct reloc_table_entry *entry;
4245
4246 /* Try to parse a relocation. Anything else is an error. */
4247 ++p;
4248
4249 if (!(entry = find_reloc_table_entry (&p)))
4250 {
4251 set_syntax_error (_("unknown relocation modifier"));
4252 return false;
4253 }
4254
4255 if (entry->movw_type == 0)
4256 {
4257 set_syntax_error
4258 (_("this relocation modifier is not allowed on this instruction"));
4259 return false;
4260 }
4261
4262 inst.reloc.type = entry->movw_type;
4263 }
4264 else
4265 *internal_fixup_p = 1;
4266
4267 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4268 return false;
4269
4270 *str = p;
4271 return true;
4272 }
4273
4274 /* Parse an operand for an ADRP instruction:
4275 ADRP <Xd>, <label>
4276 Return TRUE on success; otherwise return FALSE. */
4277
4278 static bool
4279 parse_adrp (char **str)
4280 {
4281 char *p;
4282
4283 p = *str;
4284 if (*p == ':')
4285 {
4286 struct reloc_table_entry *entry;
4287
4288 /* Try to parse a relocation. Anything else is an error. */
4289 ++p;
4290 if (!(entry = find_reloc_table_entry (&p)))
4291 {
4292 set_syntax_error (_("unknown relocation modifier"));
4293 return false;
4294 }
4295
4296 if (entry->adrp_type == 0)
4297 {
4298 set_syntax_error
4299 (_("this relocation modifier is not allowed on this instruction"));
4300 return false;
4301 }
4302
4303 inst.reloc.type = entry->adrp_type;
4304 }
4305 else
4306 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4307
4308 inst.reloc.pc_rel = 1;
4309 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4310 return false;
4311 *str = p;
4312 return true;
4313 }
4314
4315 /* Miscellaneous. */
4316
4317 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4318 of SIZE tokens in which index I gives the token for field value I,
4319 or is null if field value I is invalid. REG_TYPE says which register
4320 names should be treated as registers rather than as symbolic immediates.
4321
4322 Return true on success, moving *STR past the operand and storing the
4323 field value in *VAL. */
4324
4325 static int
4326 parse_enum_string (char **str, int64_t *val, const char *const *array,
4327 size_t size, aarch64_reg_type reg_type)
4328 {
4329 expressionS exp;
4330 char *p, *q;
4331 size_t i;
4332
4333 /* Match C-like tokens. */
4334 p = q = *str;
4335 while (ISALNUM (*q))
4336 q++;
4337
4338 for (i = 0; i < size; ++i)
4339 if (array[i]
4340 && strncasecmp (array[i], p, q - p) == 0
4341 && array[i][q - p] == 0)
4342 {
4343 *val = i;
4344 *str = q;
4345 return true;
4346 }
4347
4348 if (!parse_immediate_expression (&p, &exp, reg_type))
4349 return false;
4350
4351 if (exp.X_op == O_constant
4352 && (uint64_t) exp.X_add_number < size)
4353 {
4354 *val = exp.X_add_number;
4355 *str = p;
4356 return true;
4357 }
4358
4359 /* Use the default error for this operand. */
4360 return false;
4361 }
4362
4363 /* Parse an option for a preload instruction. Returns the encoding for the
4364 option, or PARSE_FAIL. */
4365
4366 static int
4367 parse_pldop (char **str)
4368 {
4369 char *p, *q;
4370 const struct aarch64_name_value_pair *o;
4371
4372 p = q = *str;
4373 while (ISALNUM (*q))
4374 q++;
4375
4376 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4377 if (!o)
4378 return PARSE_FAIL;
4379
4380 *str = q;
4381 return o->value;
4382 }
4383
4384 /* Parse an option for a barrier instruction. Returns the encoding for the
4385 option, or PARSE_FAIL. */
4386
4387 static int
4388 parse_barrier (char **str)
4389 {
4390 char *p, *q;
4391 const struct aarch64_name_value_pair *o;
4392
4393 p = q = *str;
4394 while (ISALPHA (*q))
4395 q++;
4396
4397 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4398 if (!o)
4399 return PARSE_FAIL;
4400
4401 *str = q;
4402 return o->value;
4403 }
4404
4405 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4406 return 0 if successful. Otherwise return PARSE_FAIL. */
4407
4408 static int
4409 parse_barrier_psb (char **str,
4410 const struct aarch64_name_value_pair ** hint_opt)
4411 {
4412 char *p, *q;
4413 const struct aarch64_name_value_pair *o;
4414
4415 p = q = *str;
4416 while (ISALPHA (*q))
4417 q++;
4418
4419 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4420 if (!o)
4421 {
4422 set_fatal_syntax_error
4423 ( _("unknown or missing option to PSB/TSB"));
4424 return PARSE_FAIL;
4425 }
4426
4427 if (o->value != 0x11)
4428 {
4429 /* PSB only accepts option name 'CSYNC'. */
4430 set_syntax_error
4431 (_("the specified option is not accepted for PSB/TSB"));
4432 return PARSE_FAIL;
4433 }
4434
4435 *str = q;
4436 *hint_opt = o;
4437 return 0;
4438 }
4439
4440 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4441 return 0 if successful. Otherwise return PARSE_FAIL. */
4442
4443 static int
4444 parse_bti_operand (char **str,
4445 const struct aarch64_name_value_pair ** hint_opt)
4446 {
4447 char *p, *q;
4448 const struct aarch64_name_value_pair *o;
4449
4450 p = q = *str;
4451 while (ISALPHA (*q))
4452 q++;
4453
4454 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4455 if (!o)
4456 {
4457 set_fatal_syntax_error
4458 ( _("unknown option to BTI"));
4459 return PARSE_FAIL;
4460 }
4461
4462 switch (o->value)
4463 {
4464 /* Valid BTI operands. */
4465 case HINT_OPD_C:
4466 case HINT_OPD_J:
4467 case HINT_OPD_JC:
4468 break;
4469
4470 default:
4471 set_syntax_error
4472 (_("unknown option to BTI"));
4473 return PARSE_FAIL;
4474 }
4475
4476 *str = q;
4477 *hint_opt = o;
4478 return 0;
4479 }
4480
4481 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4482 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4483 on failure. Format:
4484
4485 REG_TYPE.QUALIFIER
4486
4487 Side effect: Update STR with current parse position of success.
4488
4489 FLAGS is as for parse_typed_reg. */
4490
4491 static const reg_entry *
4492 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4493 aarch64_opnd_qualifier_t *qualifier, unsigned int flags)
4494 {
4495 struct vector_type_el vectype;
4496 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4497 PTR_FULL_REG | flags);
4498 if (!reg)
4499 return NULL;
4500
4501 if (vectype.type == NT_invtype)
4502 *qualifier = AARCH64_OPND_QLF_NIL;
4503 else
4504 {
4505 *qualifier = vectype_to_qualifier (&vectype);
4506 if (*qualifier == AARCH64_OPND_QLF_NIL)
4507 return NULL;
4508 }
4509
4510 return reg;
4511 }
4512
4513 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4514
4515 #<imm>
4516 <imm>
4517
4518 Function return TRUE if immediate was found, or FALSE.
4519 */
4520 static bool
4521 parse_sme_immediate (char **str, int64_t *imm)
4522 {
4523 int64_t val;
4524 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4525 return false;
4526
4527 *imm = val;
4528 return true;
4529 }
4530
4531 /* Parse index with selection register and immediate offset:
4532
4533 [<Wv>, <imm>]
4534 [<Wv>, #<imm>]
4535
4536 Return true on success, populating OPND with the parsed index. */
4537
4538 static bool
4539 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4540 {
4541 const reg_entry *reg;
4542
4543 if (!skip_past_char (str, '['))
4544 {
4545 set_syntax_error (_("expected '['"));
4546 return false;
4547 }
4548
4549 /* The selection register, encoded in the 2-bit Rv field. */
4550 reg = parse_reg (str);
4551 if (reg == NULL || reg->type != REG_TYPE_R_32)
4552 {
4553 set_syntax_error (_("expected a 32-bit selection register"));
4554 return false;
4555 }
4556 opnd->index.regno = reg->number;
4557
4558 if (!skip_past_char (str, ','))
4559 {
4560 set_syntax_error (_("missing immediate offset"));
4561 return false;
4562 }
4563
4564 if (!parse_sme_immediate (str, &opnd->index.imm))
4565 {
4566 set_syntax_error (_("expected a constant immediate offset"));
4567 return false;
4568 }
4569
4570 if (skip_past_char (str, ':'))
4571 {
4572 int64_t end;
4573 if (!parse_sme_immediate (str, &end))
4574 {
4575 set_syntax_error (_("expected a constant immediate offset"));
4576 return false;
4577 }
4578 if (end < opnd->index.imm)
4579 {
4580 set_syntax_error (_("the last offset is less than the"
4581 " first offset"));
4582 return false;
4583 }
4584 if (end == opnd->index.imm)
4585 {
4586 set_syntax_error (_("the last offset is equal to the"
4587 " first offset"));
4588 return false;
4589 }
4590 opnd->index.countm1 = (uint64_t) end - opnd->index.imm;
4591 }
4592
4593 opnd->group_size = 0;
4594 if (skip_past_char (str, ','))
4595 {
4596 if (strncasecmp (*str, "vgx2", 4) == 0 && !ISALPHA ((*str)[4]))
4597 {
4598 *str += 4;
4599 opnd->group_size = 2;
4600 }
4601 else if (strncasecmp (*str, "vgx4", 4) == 0 && !ISALPHA ((*str)[4]))
4602 {
4603 *str += 4;
4604 opnd->group_size = 4;
4605 }
4606 else
4607 {
4608 set_syntax_error (_("invalid vector group size"));
4609 return false;
4610 }
4611 }
4612
4613 if (!skip_past_char (str, ']'))
4614 {
4615 set_syntax_error (_("expected ']'"));
4616 return false;
4617 }
4618
4619 return true;
4620 }
4621
4622 /* Parse a register of type REG_TYPE that might have an element type
4623 qualifier and that is indexed by two values: a 32-bit register,
4624 followed by an immediate. The ranges of the register and the
4625 immediate vary by opcode and are checked in libopcodes.
4626
4627 Return true on success, populating OPND with information about
4628 the operand and setting QUALIFIER to the register qualifier.
4629
4630 Field format examples:
4631
4632 <Pm>.<T>[<Wv>< #<imm>]
4633 ZA[<Wv>, #<imm>]
4634 <ZAn><HV>.<T>[<Wv>, #<imm>]
4635
4636 FLAGS is as for parse_typed_reg. */
4637
4638 static bool
4639 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4640 struct aarch64_indexed_za *opnd,
4641 aarch64_opnd_qualifier_t *qualifier,
4642 unsigned int flags)
4643 {
4644 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier, flags);
4645 if (!reg)
4646 return false;
4647
4648 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4649 opnd->regno = reg->number;
4650
4651 return parse_sme_za_index (str, opnd);
4652 }
4653
4654 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4655 operand. */
4656
4657 static bool
4658 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4659 struct aarch64_indexed_za *opnd,
4660 aarch64_opnd_qualifier_t *qualifier)
4661 {
4662 if (!skip_past_char (str, '{'))
4663 {
4664 set_expected_reglist_error (REG_TYPE_ZATHV, parse_reg (str));
4665 return false;
4666 }
4667
4668 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier,
4669 PTR_IN_REGLIST))
4670 return false;
4671
4672 if (!skip_past_char (str, '}'))
4673 {
4674 set_syntax_error (_("expected '}'"));
4675 return false;
4676 }
4677
4678 return true;
4679 }
4680
4681 /* Parse list of up to eight 64-bit element tile names separated by commas in
4682 SME's ZERO instruction:
4683
4684 ZERO { <mask> }
4685
4686 Function returns <mask>:
4687
4688 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4689 */
4690 static int
4691 parse_sme_zero_mask(char **str)
4692 {
4693 char *q;
4694 int mask;
4695 aarch64_opnd_qualifier_t qualifier;
4696 unsigned int ptr_flags = PTR_IN_REGLIST;
4697
4698 mask = 0x00;
4699 q = *str;
4700 do
4701 {
4702 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4703 &qualifier, ptr_flags);
4704 if (!reg)
4705 return PARSE_FAIL;
4706
4707 if (reg->type == REG_TYPE_ZA)
4708 {
4709 if (qualifier != AARCH64_OPND_QLF_NIL)
4710 {
4711 set_syntax_error ("ZA should not have a size suffix");
4712 return PARSE_FAIL;
4713 }
4714 /* { ZA } is assembled as all-ones immediate. */
4715 mask = 0xff;
4716 }
4717 else
4718 {
4719 int regno = reg->number;
4720 if (qualifier == AARCH64_OPND_QLF_S_B)
4721 {
4722 /* { ZA0.B } is assembled as all-ones immediate. */
4723 mask = 0xff;
4724 }
4725 else if (qualifier == AARCH64_OPND_QLF_S_H)
4726 mask |= 0x55 << regno;
4727 else if (qualifier == AARCH64_OPND_QLF_S_S)
4728 mask |= 0x11 << regno;
4729 else if (qualifier == AARCH64_OPND_QLF_S_D)
4730 mask |= 0x01 << regno;
4731 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4732 {
4733 set_syntax_error (_("ZA tile masks do not operate at .Q"
4734 " granularity"));
4735 return PARSE_FAIL;
4736 }
4737 else if (qualifier == AARCH64_OPND_QLF_NIL)
4738 {
4739 set_syntax_error (_("missing ZA tile size"));
4740 return PARSE_FAIL;
4741 }
4742 else
4743 {
4744 set_syntax_error (_("invalid ZA tile"));
4745 return PARSE_FAIL;
4746 }
4747 }
4748 ptr_flags |= PTR_GOOD_MATCH;
4749 }
4750 while (skip_past_char (&q, ','));
4751
4752 *str = q;
4753 return mask;
4754 }
4755
4756 /* Wraps in curly braces <mask> operand ZERO instruction:
4757
4758 ZERO { <mask> }
4759
4760 Function returns value of <mask> bit-field.
4761 */
4762 static int
4763 parse_sme_list_of_64bit_tiles (char **str)
4764 {
4765 int regno;
4766
4767 if (!skip_past_char (str, '{'))
4768 {
4769 set_syntax_error (_("expected '{'"));
4770 return PARSE_FAIL;
4771 }
4772
4773 /* Empty <mask> list is an all-zeros immediate. */
4774 if (!skip_past_char (str, '}'))
4775 {
4776 regno = parse_sme_zero_mask (str);
4777 if (regno == PARSE_FAIL)
4778 return PARSE_FAIL;
4779
4780 if (!skip_past_char (str, '}'))
4781 {
4782 set_syntax_error (_("expected '}'"));
4783 return PARSE_FAIL;
4784 }
4785 }
4786 else
4787 regno = 0x00;
4788
4789 return regno;
4790 }
4791
4792 /* Parse streaming mode operand for SMSTART and SMSTOP.
4793
4794 {SM | ZA}
4795
4796 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4797 */
4798 static int
4799 parse_sme_sm_za (char **str)
4800 {
4801 char *p, *q;
4802
4803 p = q = *str;
4804 while (ISALPHA (*q))
4805 q++;
4806
4807 if ((q - p != 2)
4808 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4809 {
4810 set_syntax_error (_("expected SM or ZA operand"));
4811 return PARSE_FAIL;
4812 }
4813
4814 *str = q;
4815 return TOLOWER (p[0]);
4816 }
4817
4818 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4819 Returns the encoding for the option, or PARSE_FAIL.
4820
4821 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4822 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4823
4824 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4825 field, otherwise as a system register.
4826 */
4827
4828 static int
4829 parse_sys_reg (char **str, htab_t sys_regs,
4830 int imple_defined_p, int pstatefield_p,
4831 uint32_t* flags)
4832 {
4833 char *p, *q;
4834 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4835 const aarch64_sys_reg *o;
4836 int value;
4837
4838 p = buf;
4839 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4840 if (p < buf + (sizeof (buf) - 1))
4841 *p++ = TOLOWER (*q);
4842 *p = '\0';
4843
4844 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4845 valid system register. This is enforced by construction of the hash
4846 table. */
4847 if (p - buf != q - *str)
4848 return PARSE_FAIL;
4849
4850 o = str_hash_find (sys_regs, buf);
4851 if (!o)
4852 {
4853 if (!imple_defined_p)
4854 return PARSE_FAIL;
4855 else
4856 {
4857 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4858 unsigned int op0, op1, cn, cm, op2;
4859
4860 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4861 != 5)
4862 return PARSE_FAIL;
4863 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4864 return PARSE_FAIL;
4865 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4866 if (flags)
4867 *flags = 0;
4868 }
4869 }
4870 else
4871 {
4872 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4873 as_bad (_("selected processor does not support PSTATE field "
4874 "name '%s'"), buf);
4875 if (!pstatefield_p
4876 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4877 o->value, o->flags, o->features))
4878 as_bad (_("selected processor does not support system register "
4879 "name '%s'"), buf);
4880 if (aarch64_sys_reg_deprecated_p (o->flags))
4881 as_warn (_("system register name '%s' is deprecated and may be "
4882 "removed in a future release"), buf);
4883 value = o->value;
4884 if (flags)
4885 *flags = o->flags;
4886 }
4887
4888 *str = q;
4889 return value;
4890 }
4891
4892 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4893 for the option, or NULL. */
4894
4895 static const aarch64_sys_ins_reg *
4896 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4897 {
4898 char *p, *q;
4899 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4900 const aarch64_sys_ins_reg *o;
4901
4902 p = buf;
4903 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4904 if (p < buf + (sizeof (buf) - 1))
4905 *p++ = TOLOWER (*q);
4906 *p = '\0';
4907
4908 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4909 valid system register. This is enforced by construction of the hash
4910 table. */
4911 if (p - buf != q - *str)
4912 return NULL;
4913
4914 o = str_hash_find (sys_ins_regs, buf);
4915 if (!o)
4916 return NULL;
4917
4918 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4919 o->name, o->value, o->flags, 0))
4920 as_bad (_("selected processor does not support system register "
4921 "name '%s'"), buf);
4922 if (aarch64_sys_reg_deprecated_p (o->flags))
4923 as_warn (_("system register name '%s' is deprecated and may be "
4924 "removed in a future release"), buf);
4925
4926 *str = q;
4927 return o;
4928 }
4929 \f
4930 #define po_char_or_fail(chr) do { \
4931 if (! skip_past_char (&str, chr)) \
4932 goto failure; \
4933 } while (0)
4934
4935 #define po_reg_or_fail(regtype) do { \
4936 reg = aarch64_reg_parse (&str, regtype, NULL); \
4937 if (!reg) \
4938 goto failure; \
4939 } while (0)
4940
4941 #define po_int_fp_reg_or_fail(reg_type) do { \
4942 reg = parse_reg (&str); \
4943 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4944 { \
4945 set_expected_reg_error (reg_type, reg, 0); \
4946 goto failure; \
4947 } \
4948 info->reg.regno = reg->number; \
4949 info->qualifier = inherent_reg_qualifier (reg); \
4950 } while (0)
4951
4952 #define po_imm_nc_or_fail() do { \
4953 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4954 goto failure; \
4955 } while (0)
4956
4957 #define po_imm_or_fail(min, max) do { \
4958 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4959 goto failure; \
4960 if (val < min || val > max) \
4961 { \
4962 set_fatal_syntax_error (_("immediate value out of range "\
4963 #min " to "#max)); \
4964 goto failure; \
4965 } \
4966 } while (0)
4967
4968 #define po_enum_or_fail(array) do { \
4969 if (!parse_enum_string (&str, &val, array, \
4970 ARRAY_SIZE (array), imm_reg_type)) \
4971 goto failure; \
4972 } while (0)
4973
4974 #define po_misc_or_fail(expr) do { \
4975 if (!expr) \
4976 goto failure; \
4977 } while (0)
4978 \f
4979 /* A primitive log calculator. */
4980
4981 static inline unsigned int
4982 get_log2 (unsigned int n)
4983 {
4984 unsigned int count = 0;
4985 while (n > 1)
4986 {
4987 n >>= 1;
4988 count += 1;
4989 }
4990 return count;
4991 }
4992
4993 /* encode the 12-bit imm field of Add/sub immediate */
4994 static inline uint32_t
4995 encode_addsub_imm (uint32_t imm)
4996 {
4997 return imm << 10;
4998 }
4999
5000 /* encode the shift amount field of Add/sub immediate */
5001 static inline uint32_t
5002 encode_addsub_imm_shift_amount (uint32_t cnt)
5003 {
5004 return cnt << 22;
5005 }
5006
5007
5008 /* encode the imm field of Adr instruction */
5009 static inline uint32_t
5010 encode_adr_imm (uint32_t imm)
5011 {
5012 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
5013 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
5014 }
5015
5016 /* encode the immediate field of Move wide immediate */
5017 static inline uint32_t
5018 encode_movw_imm (uint32_t imm)
5019 {
5020 return imm << 5;
5021 }
5022
5023 /* encode the 26-bit offset of unconditional branch */
5024 static inline uint32_t
5025 encode_branch_ofs_26 (uint32_t ofs)
5026 {
5027 return ofs & ((1 << 26) - 1);
5028 }
5029
5030 /* encode the 19-bit offset of conditional branch and compare & branch */
5031 static inline uint32_t
5032 encode_cond_branch_ofs_19 (uint32_t ofs)
5033 {
5034 return (ofs & ((1 << 19) - 1)) << 5;
5035 }
5036
5037 /* encode the 19-bit offset of ld literal */
5038 static inline uint32_t
5039 encode_ld_lit_ofs_19 (uint32_t ofs)
5040 {
5041 return (ofs & ((1 << 19) - 1)) << 5;
5042 }
5043
5044 /* Encode the 14-bit offset of test & branch. */
5045 static inline uint32_t
5046 encode_tst_branch_ofs_14 (uint32_t ofs)
5047 {
5048 return (ofs & ((1 << 14) - 1)) << 5;
5049 }
5050
5051 /* Encode the 16-bit imm field of svc/hvc/smc. */
5052 static inline uint32_t
5053 encode_svc_imm (uint32_t imm)
5054 {
5055 return imm << 5;
5056 }
5057
5058 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
5059 static inline uint32_t
5060 reencode_addsub_switch_add_sub (uint32_t opcode)
5061 {
5062 return opcode ^ (1 << 30);
5063 }
5064
5065 static inline uint32_t
5066 reencode_movzn_to_movz (uint32_t opcode)
5067 {
5068 return opcode | (1 << 30);
5069 }
5070
5071 static inline uint32_t
5072 reencode_movzn_to_movn (uint32_t opcode)
5073 {
5074 return opcode & ~(1 << 30);
5075 }
5076
5077 /* Overall per-instruction processing. */
5078
5079 /* We need to be able to fix up arbitrary expressions in some statements.
5080 This is so that we can handle symbols that are an arbitrary distance from
5081 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5082 which returns part of an address in a form which will be valid for
5083 a data instruction. We do this by pushing the expression into a symbol
5084 in the expr_section, and creating a fix for that. */
5085
5086 static fixS *
5087 fix_new_aarch64 (fragS * frag,
5088 int where,
5089 short int size,
5090 expressionS * exp,
5091 int pc_rel,
5092 int reloc)
5093 {
5094 fixS *new_fix;
5095
5096 switch (exp->X_op)
5097 {
5098 case O_constant:
5099 case O_symbol:
5100 case O_add:
5101 case O_subtract:
5102 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5103 break;
5104
5105 default:
5106 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5107 pc_rel, reloc);
5108 break;
5109 }
5110 return new_fix;
5111 }
5112 \f
5113 /* Diagnostics on operands errors. */
5114
5115 /* By default, output verbose error message.
5116 Disable the verbose error message by -mno-verbose-error. */
5117 static int verbose_error_p = 1;
5118
5119 #ifdef DEBUG_AARCH64
5120 /* N.B. this is only for the purpose of debugging. */
5121 const char* operand_mismatch_kind_names[] =
5122 {
5123 "AARCH64_OPDE_NIL",
5124 "AARCH64_OPDE_RECOVERABLE",
5125 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5126 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5127 "AARCH64_OPDE_SYNTAX_ERROR",
5128 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5129 "AARCH64_OPDE_INVALID_VARIANT",
5130 "AARCH64_OPDE_INVALID_VG_SIZE",
5131 "AARCH64_OPDE_REG_LIST_LENGTH",
5132 "AARCH64_OPDE_REG_LIST_STRIDE",
5133 "AARCH64_OPDE_UNTIED_IMMS",
5134 "AARCH64_OPDE_UNTIED_OPERAND",
5135 "AARCH64_OPDE_OUT_OF_RANGE",
5136 "AARCH64_OPDE_UNALIGNED",
5137 "AARCH64_OPDE_OTHER_ERROR",
5138 "AARCH64_OPDE_INVALID_REGNO",
5139 };
5140 #endif /* DEBUG_AARCH64 */
5141
5142 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5143
5144 When multiple errors of different kinds are found in the same assembly
5145 line, only the error of the highest severity will be picked up for
5146 issuing the diagnostics. */
5147
5148 static inline bool
5149 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5150 enum aarch64_operand_error_kind rhs)
5151 {
5152 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5153 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5154 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5155 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5156 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5157 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5158 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5159 gas_assert (AARCH64_OPDE_INVALID_VG_SIZE > AARCH64_OPDE_INVALID_VARIANT);
5160 gas_assert (AARCH64_OPDE_REG_LIST_LENGTH > AARCH64_OPDE_INVALID_VG_SIZE);
5161 gas_assert (AARCH64_OPDE_REG_LIST_STRIDE > AARCH64_OPDE_REG_LIST_LENGTH);
5162 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_REG_LIST_STRIDE);
5163 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5164 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST_STRIDE);
5165 gas_assert (AARCH64_OPDE_INVALID_REGNO > AARCH64_OPDE_OTHER_ERROR);
5166 return lhs > rhs;
5167 }
5168
5169 /* Helper routine to get the mnemonic name from the assembly instruction
5170 line; should only be called for the diagnosis purpose, as there is
5171 string copy operation involved, which may affect the runtime
5172 performance if used in elsewhere. */
5173
5174 static const char*
5175 get_mnemonic_name (const char *str)
5176 {
5177 static char mnemonic[32];
5178 char *ptr;
5179
5180 /* Get the first 15 bytes and assume that the full name is included. */
5181 strncpy (mnemonic, str, 31);
5182 mnemonic[31] = '\0';
5183
5184 /* Scan up to the end of the mnemonic, which must end in white space,
5185 '.', or end of string. */
5186 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5187 ;
5188
5189 *ptr = '\0';
5190
5191 /* Append '...' to the truncated long name. */
5192 if (ptr - mnemonic == 31)
5193 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5194
5195 return mnemonic;
5196 }
5197
5198 static void
5199 reset_aarch64_instruction (aarch64_instruction *instruction)
5200 {
5201 memset (instruction, '\0', sizeof (aarch64_instruction));
5202 instruction->reloc.type = BFD_RELOC_UNUSED;
5203 }
5204
5205 /* Data structures storing one user error in the assembly code related to
5206 operands. */
5207
5208 struct operand_error_record
5209 {
5210 const aarch64_opcode *opcode;
5211 aarch64_operand_error detail;
5212 struct operand_error_record *next;
5213 };
5214
5215 typedef struct operand_error_record operand_error_record;
5216
5217 struct operand_errors
5218 {
5219 operand_error_record *head;
5220 operand_error_record *tail;
5221 };
5222
5223 typedef struct operand_errors operand_errors;
5224
5225 /* Top-level data structure reporting user errors for the current line of
5226 the assembly code.
5227 The way md_assemble works is that all opcodes sharing the same mnemonic
5228 name are iterated to find a match to the assembly line. In this data
5229 structure, each of the such opcodes will have one operand_error_record
5230 allocated and inserted. In other words, excessive errors related with
5231 a single opcode are disregarded. */
5232 operand_errors operand_error_report;
5233
5234 /* Free record nodes. */
5235 static operand_error_record *free_opnd_error_record_nodes = NULL;
5236
5237 /* Initialize the data structure that stores the operand mismatch
5238 information on assembling one line of the assembly code. */
5239 static void
5240 init_operand_error_report (void)
5241 {
5242 if (operand_error_report.head != NULL)
5243 {
5244 gas_assert (operand_error_report.tail != NULL);
5245 operand_error_report.tail->next = free_opnd_error_record_nodes;
5246 free_opnd_error_record_nodes = operand_error_report.head;
5247 operand_error_report.head = NULL;
5248 operand_error_report.tail = NULL;
5249 return;
5250 }
5251 gas_assert (operand_error_report.tail == NULL);
5252 }
5253
5254 /* Return TRUE if some operand error has been recorded during the
5255 parsing of the current assembly line using the opcode *OPCODE;
5256 otherwise return FALSE. */
5257 static inline bool
5258 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5259 {
5260 operand_error_record *record = operand_error_report.head;
5261 return record && record->opcode == opcode;
5262 }
5263
5264 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5265 OPCODE field is initialized with OPCODE.
5266 N.B. only one record for each opcode, i.e. the maximum of one error is
5267 recorded for each instruction template. */
5268
5269 static void
5270 add_operand_error_record (const operand_error_record* new_record)
5271 {
5272 const aarch64_opcode *opcode = new_record->opcode;
5273 operand_error_record* record = operand_error_report.head;
5274
5275 /* The record may have been created for this opcode. If not, we need
5276 to prepare one. */
5277 if (! opcode_has_operand_error_p (opcode))
5278 {
5279 /* Get one empty record. */
5280 if (free_opnd_error_record_nodes == NULL)
5281 {
5282 record = XNEW (operand_error_record);
5283 }
5284 else
5285 {
5286 record = free_opnd_error_record_nodes;
5287 free_opnd_error_record_nodes = record->next;
5288 }
5289 record->opcode = opcode;
5290 /* Insert at the head. */
5291 record->next = operand_error_report.head;
5292 operand_error_report.head = record;
5293 if (operand_error_report.tail == NULL)
5294 operand_error_report.tail = record;
5295 }
5296 else if (record->detail.kind != AARCH64_OPDE_NIL
5297 && record->detail.index <= new_record->detail.index
5298 && operand_error_higher_severity_p (record->detail.kind,
5299 new_record->detail.kind))
5300 {
5301 /* In the case of multiple errors found on operands related with a
5302 single opcode, only record the error of the leftmost operand and
5303 only if the error is of higher severity. */
5304 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5305 " the existing error %s on operand %d",
5306 operand_mismatch_kind_names[new_record->detail.kind],
5307 new_record->detail.index,
5308 operand_mismatch_kind_names[record->detail.kind],
5309 record->detail.index);
5310 return;
5311 }
5312
5313 record->detail = new_record->detail;
5314 }
5315
5316 static inline void
5317 record_operand_error_info (const aarch64_opcode *opcode,
5318 aarch64_operand_error *error_info)
5319 {
5320 operand_error_record record;
5321 record.opcode = opcode;
5322 record.detail = *error_info;
5323 add_operand_error_record (&record);
5324 }
5325
5326 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5327 error message *ERROR, for operand IDX (count from 0). */
5328
5329 static void
5330 record_operand_error (const aarch64_opcode *opcode, int idx,
5331 enum aarch64_operand_error_kind kind,
5332 const char* error)
5333 {
5334 aarch64_operand_error info;
5335 memset(&info, 0, sizeof (info));
5336 info.index = idx;
5337 info.kind = kind;
5338 info.error = error;
5339 info.non_fatal = false;
5340 record_operand_error_info (opcode, &info);
5341 }
5342
5343 static void
5344 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5345 enum aarch64_operand_error_kind kind,
5346 const char* error, const int *extra_data)
5347 {
5348 aarch64_operand_error info;
5349 info.index = idx;
5350 info.kind = kind;
5351 info.error = error;
5352 info.data[0].i = extra_data[0];
5353 info.data[1].i = extra_data[1];
5354 info.data[2].i = extra_data[2];
5355 info.non_fatal = false;
5356 record_operand_error_info (opcode, &info);
5357 }
5358
5359 static void
5360 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5361 const char* error, int lower_bound,
5362 int upper_bound)
5363 {
5364 int data[3] = {lower_bound, upper_bound, 0};
5365 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5366 error, data);
5367 }
5368
5369 /* Remove the operand error record for *OPCODE. */
5370 static void ATTRIBUTE_UNUSED
5371 remove_operand_error_record (const aarch64_opcode *opcode)
5372 {
5373 if (opcode_has_operand_error_p (opcode))
5374 {
5375 operand_error_record* record = operand_error_report.head;
5376 gas_assert (record != NULL && operand_error_report.tail != NULL);
5377 operand_error_report.head = record->next;
5378 record->next = free_opnd_error_record_nodes;
5379 free_opnd_error_record_nodes = record;
5380 if (operand_error_report.head == NULL)
5381 {
5382 gas_assert (operand_error_report.tail == record);
5383 operand_error_report.tail = NULL;
5384 }
5385 }
5386 }
5387
5388 /* Given the instruction in *INSTR, return the index of the best matched
5389 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5390
5391 Return -1 if there is no qualifier sequence; return the first match
5392 if there is multiple matches found. */
5393
5394 static int
5395 find_best_match (const aarch64_inst *instr,
5396 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5397 {
5398 int i, num_opnds, max_num_matched, idx;
5399
5400 num_opnds = aarch64_num_of_operands (instr->opcode);
5401 if (num_opnds == 0)
5402 {
5403 DEBUG_TRACE ("no operand");
5404 return -1;
5405 }
5406
5407 max_num_matched = 0;
5408 idx = 0;
5409
5410 /* For each pattern. */
5411 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5412 {
5413 int j, num_matched;
5414 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5415
5416 /* Most opcodes has much fewer patterns in the list. */
5417 if (empty_qualifier_sequence_p (qualifiers))
5418 {
5419 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5420 break;
5421 }
5422
5423 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5424 if (*qualifiers == instr->operands[j].qualifier)
5425 ++num_matched;
5426
5427 if (num_matched > max_num_matched)
5428 {
5429 max_num_matched = num_matched;
5430 idx = i;
5431 }
5432 }
5433
5434 DEBUG_TRACE ("return with %d", idx);
5435 return idx;
5436 }
5437
5438 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5439 corresponding operands in *INSTR. */
5440
5441 static inline void
5442 assign_qualifier_sequence (aarch64_inst *instr,
5443 const aarch64_opnd_qualifier_t *qualifiers)
5444 {
5445 int i = 0;
5446 int num_opnds = aarch64_num_of_operands (instr->opcode);
5447 gas_assert (num_opnds);
5448 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5449 instr->operands[i].qualifier = *qualifiers;
5450 }
5451
5452 /* Callback used by aarch64_print_operand to apply STYLE to the
5453 disassembler output created from FMT and ARGS. The STYLER object holds
5454 any required state. Must return a pointer to a string (created from FMT
5455 and ARGS) that will continue to be valid until the complete disassembled
5456 instruction has been printed.
5457
5458 We don't currently add any styling to the output of the disassembler as
5459 used within assembler error messages, and so STYLE is ignored here. A
5460 new string is allocated on the obstack help within STYLER and returned
5461 to the caller. */
5462
5463 static const char *aarch64_apply_style
5464 (struct aarch64_styler *styler,
5465 enum disassembler_style style ATTRIBUTE_UNUSED,
5466 const char *fmt, va_list args)
5467 {
5468 int res;
5469 char *ptr;
5470 struct obstack *stack = (struct obstack *) styler->state;
5471 va_list ap;
5472
5473 /* Calculate the required space. */
5474 va_copy (ap, args);
5475 res = vsnprintf (NULL, 0, fmt, ap);
5476 va_end (ap);
5477 gas_assert (res >= 0);
5478
5479 /* Allocate space on the obstack and format the result. */
5480 ptr = (char *) obstack_alloc (stack, res + 1);
5481 res = vsnprintf (ptr, (res + 1), fmt, args);
5482 gas_assert (res >= 0);
5483
5484 return ptr;
5485 }
5486
5487 /* Print operands for the diagnosis purpose. */
5488
5489 static void
5490 print_operands (char *buf, const aarch64_opcode *opcode,
5491 const aarch64_opnd_info *opnds)
5492 {
5493 int i;
5494 struct aarch64_styler styler;
5495 struct obstack content;
5496 obstack_init (&content);
5497
5498 styler.apply_style = aarch64_apply_style;
5499 styler.state = (void *) &content;
5500
5501 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5502 {
5503 char str[128];
5504 char cmt[128];
5505
5506 /* We regard the opcode operand info more, however we also look into
5507 the inst->operands to support the disassembling of the optional
5508 operand.
5509 The two operand code should be the same in all cases, apart from
5510 when the operand can be optional. */
5511 if (opcode->operands[i] == AARCH64_OPND_NIL
5512 || opnds[i].type == AARCH64_OPND_NIL)
5513 break;
5514
5515 /* Generate the operand string in STR. */
5516 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5517 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5518
5519 /* Delimiter. */
5520 if (str[0] != '\0')
5521 strcat (buf, i == 0 ? " " : ", ");
5522
5523 /* Append the operand string. */
5524 strcat (buf, str);
5525
5526 /* Append a comment. This works because only the last operand ever
5527 adds a comment. If that ever changes then we'll need to be
5528 smarter here. */
5529 if (cmt[0] != '\0')
5530 {
5531 strcat (buf, "\t// ");
5532 strcat (buf, cmt);
5533 }
5534 }
5535
5536 obstack_free (&content, NULL);
5537 }
5538
5539 /* Send to stderr a string as information. */
5540
5541 static void
5542 output_info (const char *format, ...)
5543 {
5544 const char *file;
5545 unsigned int line;
5546 va_list args;
5547
5548 file = as_where (&line);
5549 if (file)
5550 {
5551 if (line != 0)
5552 fprintf (stderr, "%s:%u: ", file, line);
5553 else
5554 fprintf (stderr, "%s: ", file);
5555 }
5556 fprintf (stderr, _("Info: "));
5557 va_start (args, format);
5558 vfprintf (stderr, format, args);
5559 va_end (args);
5560 (void) putc ('\n', stderr);
5561 }
5562
5563 /* See if the AARCH64_OPDE_SYNTAX_ERROR error described by DETAIL
5564 relates to registers or register lists. If so, return a string that
5565 reports the error against "operand %d", otherwise return null. */
5566
5567 static const char *
5568 get_reg_error_message (const aarch64_operand_error *detail)
5569 {
5570 /* Handle the case where we found a register that was expected
5571 to be in a register list outside of a register list. */
5572 if ((detail->data[1].i & detail->data[2].i) != 0
5573 && (detail->data[1].i & SEF_IN_REGLIST) == 0)
5574 return _("missing braces at operand %d");
5575
5576 /* If some opcodes expected a register, and we found a register,
5577 complain about the difference. */
5578 if (detail->data[2].i)
5579 {
5580 unsigned int expected = (detail->data[1].i & SEF_IN_REGLIST
5581 ? detail->data[1].i & ~SEF_IN_REGLIST
5582 : detail->data[0].i & ~SEF_DEFAULT_ERROR);
5583 const char *msg = get_reg_expected_msg (expected, detail->data[2].i);
5584 if (!msg)
5585 msg = N_("unexpected register type at operand %d");
5586 return msg;
5587 }
5588
5589 /* Handle the case where we got to the point of trying to parse a
5590 register within a register list, but didn't find a known register. */
5591 if (detail->data[1].i & SEF_IN_REGLIST)
5592 {
5593 unsigned int expected = detail->data[1].i & ~SEF_IN_REGLIST;
5594 const char *msg = get_reg_expected_msg (expected, 0);
5595 if (!msg)
5596 msg = _("invalid register list at operand %d");
5597 return msg;
5598 }
5599
5600 /* Punt if register-related problems weren't the only errors. */
5601 if (detail->data[0].i & SEF_DEFAULT_ERROR)
5602 return NULL;
5603
5604 /* Handle the case where the only acceptable things are registers. */
5605 if (detail->data[1].i == 0)
5606 {
5607 const char *msg = get_reg_expected_msg (detail->data[0].i, 0);
5608 if (!msg)
5609 msg = _("expected a register at operand %d");
5610 return msg;
5611 }
5612
5613 /* Handle the case where the only acceptable things are register lists,
5614 and there was no opening '{'. */
5615 if (detail->data[0].i == 0)
5616 return _("expected '{' at operand %d");
5617
5618 return _("expected a register or register list at operand %d");
5619 }
5620
5621 /* Output one operand error record. */
5622
5623 static void
5624 output_operand_error_record (const operand_error_record *record, char *str)
5625 {
5626 const aarch64_operand_error *detail = &record->detail;
5627 int idx = detail->index;
5628 const aarch64_opcode *opcode = record->opcode;
5629 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5630 : AARCH64_OPND_NIL);
5631
5632 typedef void (*handler_t)(const char *format, ...);
5633 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5634 const char *msg = detail->error;
5635
5636 switch (detail->kind)
5637 {
5638 case AARCH64_OPDE_NIL:
5639 gas_assert (0);
5640 break;
5641
5642 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5643 handler (_("this `%s' should have an immediately preceding `%s'"
5644 " -- `%s'"),
5645 detail->data[0].s, detail->data[1].s, str);
5646 break;
5647
5648 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5649 handler (_("the preceding `%s' should be followed by `%s` rather"
5650 " than `%s` -- `%s'"),
5651 detail->data[1].s, detail->data[0].s, opcode->name, str);
5652 break;
5653
5654 case AARCH64_OPDE_SYNTAX_ERROR:
5655 if (!msg && idx >= 0)
5656 {
5657 msg = get_reg_error_message (detail);
5658 if (msg)
5659 {
5660 char *full_msg = xasprintf (msg, idx + 1);
5661 handler (_("%s -- `%s'"), full_msg, str);
5662 free (full_msg);
5663 break;
5664 }
5665 }
5666 /* Fall through. */
5667
5668 case AARCH64_OPDE_RECOVERABLE:
5669 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5670 case AARCH64_OPDE_OTHER_ERROR:
5671 /* Use the prepared error message if there is, otherwise use the
5672 operand description string to describe the error. */
5673 if (msg != NULL)
5674 {
5675 if (idx < 0)
5676 handler (_("%s -- `%s'"), msg, str);
5677 else
5678 handler (_("%s at operand %d -- `%s'"),
5679 msg, idx + 1, str);
5680 }
5681 else
5682 {
5683 gas_assert (idx >= 0);
5684 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5685 aarch64_get_operand_desc (opd_code), str);
5686 }
5687 break;
5688
5689 case AARCH64_OPDE_INVALID_VARIANT:
5690 handler (_("operand mismatch -- `%s'"), str);
5691 if (verbose_error_p)
5692 {
5693 /* We will try to correct the erroneous instruction and also provide
5694 more information e.g. all other valid variants.
5695
5696 The string representation of the corrected instruction and other
5697 valid variants are generated by
5698
5699 1) obtaining the intermediate representation of the erroneous
5700 instruction;
5701 2) manipulating the IR, e.g. replacing the operand qualifier;
5702 3) printing out the instruction by calling the printer functions
5703 shared with the disassembler.
5704
5705 The limitation of this method is that the exact input assembly
5706 line cannot be accurately reproduced in some cases, for example an
5707 optional operand present in the actual assembly line will be
5708 omitted in the output; likewise for the optional syntax rules,
5709 e.g. the # before the immediate. Another limitation is that the
5710 assembly symbols and relocation operations in the assembly line
5711 currently cannot be printed out in the error report. Last but not
5712 least, when there is other error(s) co-exist with this error, the
5713 'corrected' instruction may be still incorrect, e.g. given
5714 'ldnp h0,h1,[x0,#6]!'
5715 this diagnosis will provide the version:
5716 'ldnp s0,s1,[x0,#6]!'
5717 which is still not right. */
5718 size_t len = strlen (get_mnemonic_name (str));
5719 int i, qlf_idx;
5720 bool result;
5721 char buf[2048];
5722 aarch64_inst *inst_base = &inst.base;
5723 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5724
5725 /* Init inst. */
5726 reset_aarch64_instruction (&inst);
5727 inst_base->opcode = opcode;
5728
5729 /* Reset the error report so that there is no side effect on the
5730 following operand parsing. */
5731 init_operand_error_report ();
5732
5733 /* Fill inst. */
5734 result = parse_operands (str + len, opcode)
5735 && programmer_friendly_fixup (&inst);
5736 gas_assert (result);
5737 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5738 NULL, NULL, insn_sequence);
5739 gas_assert (!result);
5740
5741 /* Find the most matched qualifier sequence. */
5742 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5743 gas_assert (qlf_idx > -1);
5744
5745 /* Assign the qualifiers. */
5746 assign_qualifier_sequence (inst_base,
5747 opcode->qualifiers_list[qlf_idx]);
5748
5749 /* Print the hint. */
5750 output_info (_(" did you mean this?"));
5751 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5752 print_operands (buf, opcode, inst_base->operands);
5753 output_info (_(" %s"), buf);
5754
5755 /* Print out other variant(s) if there is any. */
5756 if (qlf_idx != 0 ||
5757 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5758 output_info (_(" other valid variant(s):"));
5759
5760 /* For each pattern. */
5761 qualifiers_list = opcode->qualifiers_list;
5762 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5763 {
5764 /* Most opcodes has much fewer patterns in the list.
5765 First NIL qualifier indicates the end in the list. */
5766 if (empty_qualifier_sequence_p (*qualifiers_list))
5767 break;
5768
5769 if (i != qlf_idx)
5770 {
5771 /* Mnemonics name. */
5772 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5773
5774 /* Assign the qualifiers. */
5775 assign_qualifier_sequence (inst_base, *qualifiers_list);
5776
5777 /* Print instruction. */
5778 print_operands (buf, opcode, inst_base->operands);
5779
5780 output_info (_(" %s"), buf);
5781 }
5782 }
5783 }
5784 break;
5785
5786 case AARCH64_OPDE_UNTIED_IMMS:
5787 handler (_("operand %d must have the same immediate value "
5788 "as operand 1 -- `%s'"),
5789 detail->index + 1, str);
5790 break;
5791
5792 case AARCH64_OPDE_UNTIED_OPERAND:
5793 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5794 detail->index + 1, str);
5795 break;
5796
5797 case AARCH64_OPDE_INVALID_REGNO:
5798 handler (_("%s%d-%s%d expected at operand %d -- `%s'"),
5799 detail->data[0].s, detail->data[1].i,
5800 detail->data[0].s, detail->data[2].i, idx + 1, str);
5801 break;
5802
5803 case AARCH64_OPDE_OUT_OF_RANGE:
5804 if (detail->data[0].i != detail->data[1].i)
5805 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5806 msg ? msg : _("immediate value"),
5807 detail->data[0].i, detail->data[1].i, idx + 1, str);
5808 else
5809 handler (_("%s must be %d at operand %d -- `%s'"),
5810 msg ? msg : _("immediate value"),
5811 detail->data[0].i, idx + 1, str);
5812 break;
5813
5814 case AARCH64_OPDE_INVALID_VG_SIZE:
5815 if (detail->data[0].i == 0)
5816 handler (_("unexpected vector group size at operand %d -- `%s'"),
5817 idx + 1, str);
5818 else
5819 handler (_("operand %d must have a vector group size of %d -- `%s'"),
5820 idx + 1, detail->data[0].i, str);
5821 break;
5822
5823 case AARCH64_OPDE_REG_LIST_LENGTH:
5824 if (detail->data[0].i == (1 << 1))
5825 handler (_("expected a single-register list at operand %d -- `%s'"),
5826 idx + 1, str);
5827 else if ((detail->data[0].i & -detail->data[0].i) == detail->data[0].i)
5828 handler (_("expected a list of %d registers at operand %d -- `%s'"),
5829 get_log2 (detail->data[0].i), idx + 1, str);
5830 else
5831 handler (_("invalid number of registers in the list"
5832 " at operand %d -- `%s'"), idx + 1, str);
5833 break;
5834
5835 case AARCH64_OPDE_REG_LIST_STRIDE:
5836 if (detail->data[0].i == (1 << 1))
5837 handler (_("the register list must have a stride of %d"
5838 " at operand %d -- `%s'"), 1, idx + 1, str);
5839 else
5840 handler (_("invalid register stride at operand %d -- `%s'"),
5841 idx + 1, str);
5842 break;
5843
5844 case AARCH64_OPDE_UNALIGNED:
5845 handler (_("immediate value must be a multiple of "
5846 "%d at operand %d -- `%s'"),
5847 detail->data[0].i, idx + 1, str);
5848 break;
5849
5850 default:
5851 gas_assert (0);
5852 break;
5853 }
5854 }
5855
5856 /* Return true if the presence of error A against an instruction means
5857 that error B should not be reported. This is only used as a first pass,
5858 to pick the kind of error that we should report. */
5859
5860 static bool
5861 better_error_p (operand_error_record *a, operand_error_record *b)
5862 {
5863 /* For errors reported during parsing, prefer errors that relate to
5864 later operands, since that implies that the earlier operands were
5865 syntactically valid.
5866
5867 For example, if we see a register R instead of an immediate in
5868 operand N, we'll report that as a recoverable "immediate operand
5869 required" error. This is because there is often another opcode
5870 entry that accepts a register operand N, and any errors about R
5871 should be reported against the register forms of the instruction.
5872 But if no such register form exists, the recoverable error should
5873 still win over a syntax error against operand N-1.
5874
5875 For these purposes, count an error reported at the end of the
5876 assembly string as equivalent to an error reported against the
5877 final operand. This means that opcode entries that expect more
5878 operands win over "unexpected characters following instruction". */
5879 if (a->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR
5880 && b->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR)
5881 {
5882 int a_index = (a->detail.index < 0
5883 ? aarch64_num_of_operands (a->opcode) - 1
5884 : a->detail.index);
5885 int b_index = (b->detail.index < 0
5886 ? aarch64_num_of_operands (b->opcode) - 1
5887 : b->detail.index);
5888 if (a_index != b_index)
5889 return a_index > b_index;
5890 }
5891 return operand_error_higher_severity_p (a->detail.kind, b->detail.kind);
5892 }
5893
5894 /* Process and output the error message about the operand mismatching.
5895
5896 When this function is called, the operand error information had
5897 been collected for an assembly line and there will be multiple
5898 errors in the case of multiple instruction templates; output the
5899 error message that most closely describes the problem.
5900
5901 The errors to be printed can be filtered on printing all errors
5902 or only non-fatal errors. This distinction has to be made because
5903 the error buffer may already be filled with fatal errors we don't want to
5904 print due to the different instruction templates. */
5905
5906 static void
5907 output_operand_error_report (char *str, bool non_fatal_only)
5908 {
5909 enum aarch64_operand_error_kind kind;
5910 operand_error_record *curr;
5911 operand_error_record *head = operand_error_report.head;
5912 operand_error_record *record;
5913
5914 /* No error to report. */
5915 if (head == NULL)
5916 return;
5917
5918 gas_assert (head != NULL && operand_error_report.tail != NULL);
5919
5920 /* Only one error. */
5921 if (head == operand_error_report.tail)
5922 {
5923 /* If the only error is a non-fatal one and we don't want to print it,
5924 just exit. */
5925 if (!non_fatal_only || head->detail.non_fatal)
5926 {
5927 DEBUG_TRACE ("single opcode entry with error kind: %s",
5928 operand_mismatch_kind_names[head->detail.kind]);
5929 output_operand_error_record (head, str);
5930 }
5931 return;
5932 }
5933
5934 /* Find the error kind of the highest severity. */
5935 DEBUG_TRACE ("multiple opcode entries with error kind");
5936 record = NULL;
5937 for (curr = head; curr != NULL; curr = curr->next)
5938 {
5939 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5940 if (curr->detail.kind == AARCH64_OPDE_SYNTAX_ERROR)
5941 {
5942 DEBUG_TRACE ("\t%s [%x, %x, %x]",
5943 operand_mismatch_kind_names[curr->detail.kind],
5944 curr->detail.data[0].i, curr->detail.data[1].i,
5945 curr->detail.data[2].i);
5946 }
5947 else if (curr->detail.kind == AARCH64_OPDE_REG_LIST_LENGTH
5948 || curr->detail.kind == AARCH64_OPDE_REG_LIST_STRIDE)
5949 {
5950 DEBUG_TRACE ("\t%s [%x]",
5951 operand_mismatch_kind_names[curr->detail.kind],
5952 curr->detail.data[0].i);
5953 }
5954 else
5955 {
5956 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5957 }
5958 if ((!non_fatal_only || curr->detail.non_fatal)
5959 && (!record || better_error_p (curr, record)))
5960 record = curr;
5961 }
5962
5963 kind = (record ? record->detail.kind : AARCH64_OPDE_NIL);
5964 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5965
5966 /* Pick up one of errors of KIND to report. */
5967 record = NULL;
5968 for (curr = head; curr != NULL; curr = curr->next)
5969 {
5970 /* If we don't want to print non-fatal errors then don't consider them
5971 at all. */
5972 if (curr->detail.kind != kind
5973 || (non_fatal_only && !curr->detail.non_fatal))
5974 continue;
5975 /* If there are multiple errors, pick up the one with the highest
5976 mismatching operand index. In the case of multiple errors with
5977 the equally highest operand index, pick up the first one or the
5978 first one with non-NULL error message. */
5979 if (!record || curr->detail.index > record->detail.index)
5980 record = curr;
5981 else if (curr->detail.index == record->detail.index
5982 && !record->detail.error)
5983 {
5984 if (curr->detail.error)
5985 record = curr;
5986 else if (kind == AARCH64_OPDE_SYNTAX_ERROR)
5987 {
5988 record->detail.data[0].i |= curr->detail.data[0].i;
5989 record->detail.data[1].i |= curr->detail.data[1].i;
5990 record->detail.data[2].i |= curr->detail.data[2].i;
5991 DEBUG_TRACE ("\t--> %s [%x, %x, %x]",
5992 operand_mismatch_kind_names[kind],
5993 curr->detail.data[0].i, curr->detail.data[1].i,
5994 curr->detail.data[2].i);
5995 }
5996 else if (kind == AARCH64_OPDE_REG_LIST_LENGTH
5997 || kind == AARCH64_OPDE_REG_LIST_STRIDE)
5998 {
5999 record->detail.data[0].i |= curr->detail.data[0].i;
6000 DEBUG_TRACE ("\t--> %s [%x]",
6001 operand_mismatch_kind_names[kind],
6002 curr->detail.data[0].i);
6003 }
6004 /* Pick the variant with the cloest match. */
6005 else if (kind == AARCH64_OPDE_INVALID_VARIANT
6006 && record->detail.data[0].i > curr->detail.data[0].i)
6007 record = curr;
6008 }
6009 }
6010
6011 /* The way errors are collected in the back-end is a bit non-intuitive. But
6012 essentially, because each operand template is tried recursively you may
6013 always have errors collected from the previous tried OPND. These are
6014 usually skipped if there is one successful match. However now with the
6015 non-fatal errors we have to ignore those previously collected hard errors
6016 when we're only interested in printing the non-fatal ones. This condition
6017 prevents us from printing errors that are not appropriate, since we did
6018 match a condition, but it also has warnings that it wants to print. */
6019 if (non_fatal_only && !record)
6020 return;
6021
6022 gas_assert (record);
6023 DEBUG_TRACE ("Pick up error kind %s to report",
6024 operand_mismatch_kind_names[kind]);
6025
6026 /* Output. */
6027 output_operand_error_record (record, str);
6028 }
6029 \f
6030 /* Write an AARCH64 instruction to buf - always little-endian. */
6031 static void
6032 put_aarch64_insn (char *buf, uint32_t insn)
6033 {
6034 unsigned char *where = (unsigned char *) buf;
6035 where[0] = insn;
6036 where[1] = insn >> 8;
6037 where[2] = insn >> 16;
6038 where[3] = insn >> 24;
6039 }
6040
6041 static uint32_t
6042 get_aarch64_insn (char *buf)
6043 {
6044 unsigned char *where = (unsigned char *) buf;
6045 uint32_t result;
6046 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
6047 | ((uint32_t) where[3] << 24)));
6048 return result;
6049 }
6050
6051 static void
6052 output_inst (struct aarch64_inst *new_inst)
6053 {
6054 char *to = NULL;
6055
6056 to = frag_more (INSN_SIZE);
6057
6058 frag_now->tc_frag_data.recorded = 1;
6059
6060 put_aarch64_insn (to, inst.base.value);
6061
6062 if (inst.reloc.type != BFD_RELOC_UNUSED)
6063 {
6064 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
6065 INSN_SIZE, &inst.reloc.exp,
6066 inst.reloc.pc_rel,
6067 inst.reloc.type);
6068 DEBUG_TRACE ("Prepared relocation fix up");
6069 /* Don't check the addend value against the instruction size,
6070 that's the job of our code in md_apply_fix(). */
6071 fixp->fx_no_overflow = 1;
6072 if (new_inst != NULL)
6073 fixp->tc_fix_data.inst = new_inst;
6074 if (aarch64_gas_internal_fixup_p ())
6075 {
6076 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
6077 fixp->tc_fix_data.opnd = inst.reloc.opnd;
6078 fixp->fx_addnumber = inst.reloc.flags;
6079 }
6080 }
6081
6082 dwarf2_emit_insn (INSN_SIZE);
6083 }
6084
6085 /* Link together opcodes of the same name. */
6086
6087 struct templates
6088 {
6089 const aarch64_opcode *opcode;
6090 struct templates *next;
6091 };
6092
6093 typedef struct templates templates;
6094
6095 static templates *
6096 lookup_mnemonic (const char *start, int len)
6097 {
6098 templates *templ = NULL;
6099
6100 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
6101 return templ;
6102 }
6103
6104 /* Subroutine of md_assemble, responsible for looking up the primary
6105 opcode from the mnemonic the user wrote. BASE points to the beginning
6106 of the mnemonic, DOT points to the first '.' within the mnemonic
6107 (if any) and END points to the end of the mnemonic. */
6108
6109 static templates *
6110 opcode_lookup (char *base, char *dot, char *end)
6111 {
6112 const aarch64_cond *cond;
6113 char condname[16];
6114 int len;
6115
6116 if (dot == end)
6117 return 0;
6118
6119 inst.cond = COND_ALWAYS;
6120
6121 /* Handle a possible condition. */
6122 if (dot)
6123 {
6124 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
6125 if (!cond)
6126 return 0;
6127 inst.cond = cond->value;
6128 len = dot - base;
6129 }
6130 else
6131 len = end - base;
6132
6133 if (inst.cond == COND_ALWAYS)
6134 {
6135 /* Look for unaffixed mnemonic. */
6136 return lookup_mnemonic (base, len);
6137 }
6138 else if (len <= 13)
6139 {
6140 /* append ".c" to mnemonic if conditional */
6141 memcpy (condname, base, len);
6142 memcpy (condname + len, ".c", 2);
6143 base = condname;
6144 len += 2;
6145 return lookup_mnemonic (base, len);
6146 }
6147
6148 return NULL;
6149 }
6150
6151 /* Process an optional operand that is found omitted from the assembly line.
6152 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
6153 instruction's opcode entry while IDX is the index of this omitted operand.
6154 */
6155
6156 static void
6157 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
6158 int idx, aarch64_opnd_info *operand)
6159 {
6160 aarch64_insn default_value = get_optional_operand_default_value (opcode);
6161 gas_assert (optional_operand_p (opcode, idx));
6162 gas_assert (!operand->present);
6163
6164 switch (type)
6165 {
6166 case AARCH64_OPND_Rd:
6167 case AARCH64_OPND_Rn:
6168 case AARCH64_OPND_Rm:
6169 case AARCH64_OPND_Rt:
6170 case AARCH64_OPND_Rt2:
6171 case AARCH64_OPND_Rt_LS64:
6172 case AARCH64_OPND_Rt_SP:
6173 case AARCH64_OPND_Rs:
6174 case AARCH64_OPND_Ra:
6175 case AARCH64_OPND_Rt_SYS:
6176 case AARCH64_OPND_Rd_SP:
6177 case AARCH64_OPND_Rn_SP:
6178 case AARCH64_OPND_Rm_SP:
6179 case AARCH64_OPND_Fd:
6180 case AARCH64_OPND_Fn:
6181 case AARCH64_OPND_Fm:
6182 case AARCH64_OPND_Fa:
6183 case AARCH64_OPND_Ft:
6184 case AARCH64_OPND_Ft2:
6185 case AARCH64_OPND_Sd:
6186 case AARCH64_OPND_Sn:
6187 case AARCH64_OPND_Sm:
6188 case AARCH64_OPND_Va:
6189 case AARCH64_OPND_Vd:
6190 case AARCH64_OPND_Vn:
6191 case AARCH64_OPND_Vm:
6192 case AARCH64_OPND_VdD1:
6193 case AARCH64_OPND_VnD1:
6194 operand->reg.regno = default_value;
6195 break;
6196
6197 case AARCH64_OPND_Ed:
6198 case AARCH64_OPND_En:
6199 case AARCH64_OPND_Em:
6200 case AARCH64_OPND_Em16:
6201 case AARCH64_OPND_SM3_IMM2:
6202 operand->reglane.regno = default_value;
6203 break;
6204
6205 case AARCH64_OPND_IDX:
6206 case AARCH64_OPND_BIT_NUM:
6207 case AARCH64_OPND_IMMR:
6208 case AARCH64_OPND_IMMS:
6209 case AARCH64_OPND_SHLL_IMM:
6210 case AARCH64_OPND_IMM_VLSL:
6211 case AARCH64_OPND_IMM_VLSR:
6212 case AARCH64_OPND_CCMP_IMM:
6213 case AARCH64_OPND_FBITS:
6214 case AARCH64_OPND_UIMM4:
6215 case AARCH64_OPND_UIMM3_OP1:
6216 case AARCH64_OPND_UIMM3_OP2:
6217 case AARCH64_OPND_IMM:
6218 case AARCH64_OPND_IMM_2:
6219 case AARCH64_OPND_WIDTH:
6220 case AARCH64_OPND_UIMM7:
6221 case AARCH64_OPND_NZCV:
6222 case AARCH64_OPND_SVE_PATTERN:
6223 case AARCH64_OPND_SVE_PRFOP:
6224 operand->imm.value = default_value;
6225 break;
6226
6227 case AARCH64_OPND_SVE_PATTERN_SCALED:
6228 operand->imm.value = default_value;
6229 operand->shifter.kind = AARCH64_MOD_MUL;
6230 operand->shifter.amount = 1;
6231 break;
6232
6233 case AARCH64_OPND_EXCEPTION:
6234 inst.reloc.type = BFD_RELOC_UNUSED;
6235 break;
6236
6237 case AARCH64_OPND_BARRIER_ISB:
6238 operand->barrier = aarch64_barrier_options + default_value;
6239 break;
6240
6241 case AARCH64_OPND_BTI_TARGET:
6242 operand->hint_option = aarch64_hint_options + default_value;
6243 break;
6244
6245 default:
6246 break;
6247 }
6248 }
6249
6250 /* Process the relocation type for move wide instructions.
6251 Return TRUE on success; otherwise return FALSE. */
6252
6253 static bool
6254 process_movw_reloc_info (void)
6255 {
6256 int is32;
6257 unsigned shift;
6258
6259 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6260
6261 if (inst.base.opcode->op == OP_MOVK)
6262 switch (inst.reloc.type)
6263 {
6264 case BFD_RELOC_AARCH64_MOVW_G0_S:
6265 case BFD_RELOC_AARCH64_MOVW_G1_S:
6266 case BFD_RELOC_AARCH64_MOVW_G2_S:
6267 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6268 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6269 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6270 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6271 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6272 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6273 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6274 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6275 set_syntax_error
6276 (_("the specified relocation type is not allowed for MOVK"));
6277 return false;
6278 default:
6279 break;
6280 }
6281
6282 switch (inst.reloc.type)
6283 {
6284 case BFD_RELOC_AARCH64_MOVW_G0:
6285 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6286 case BFD_RELOC_AARCH64_MOVW_G0_S:
6287 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6288 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6289 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6290 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6291 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6292 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6293 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6294 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6295 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6296 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6297 shift = 0;
6298 break;
6299 case BFD_RELOC_AARCH64_MOVW_G1:
6300 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6301 case BFD_RELOC_AARCH64_MOVW_G1_S:
6302 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6303 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6304 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6305 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6306 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6307 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6308 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6309 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6310 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6311 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6312 shift = 16;
6313 break;
6314 case BFD_RELOC_AARCH64_MOVW_G2:
6315 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6316 case BFD_RELOC_AARCH64_MOVW_G2_S:
6317 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6318 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6319 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6320 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6321 if (is32)
6322 {
6323 set_fatal_syntax_error
6324 (_("the specified relocation type is not allowed for 32-bit "
6325 "register"));
6326 return false;
6327 }
6328 shift = 32;
6329 break;
6330 case BFD_RELOC_AARCH64_MOVW_G3:
6331 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6332 if (is32)
6333 {
6334 set_fatal_syntax_error
6335 (_("the specified relocation type is not allowed for 32-bit "
6336 "register"));
6337 return false;
6338 }
6339 shift = 48;
6340 break;
6341 default:
6342 /* More cases should be added when more MOVW-related relocation types
6343 are supported in GAS. */
6344 gas_assert (aarch64_gas_internal_fixup_p ());
6345 /* The shift amount should have already been set by the parser. */
6346 return true;
6347 }
6348 inst.base.operands[1].shifter.amount = shift;
6349 return true;
6350 }
6351
6352 /* Determine and return the real reloc type code for an instruction
6353 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6354
6355 static inline bfd_reloc_code_real_type
6356 ldst_lo12_determine_real_reloc_type (void)
6357 {
6358 unsigned logsz, max_logsz;
6359 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6360 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6361
6362 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6363 {
6364 BFD_RELOC_AARCH64_LDST8_LO12,
6365 BFD_RELOC_AARCH64_LDST16_LO12,
6366 BFD_RELOC_AARCH64_LDST32_LO12,
6367 BFD_RELOC_AARCH64_LDST64_LO12,
6368 BFD_RELOC_AARCH64_LDST128_LO12
6369 },
6370 {
6371 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6372 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6373 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6374 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6375 BFD_RELOC_AARCH64_NONE
6376 },
6377 {
6378 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6379 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6380 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6381 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6382 BFD_RELOC_AARCH64_NONE
6383 },
6384 {
6385 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6386 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6387 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6388 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6389 BFD_RELOC_AARCH64_NONE
6390 },
6391 {
6392 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6393 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6394 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6395 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6396 BFD_RELOC_AARCH64_NONE
6397 }
6398 };
6399
6400 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6401 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6402 || (inst.reloc.type
6403 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6404 || (inst.reloc.type
6405 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6406 || (inst.reloc.type
6407 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6408 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6409
6410 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6411 opd1_qlf =
6412 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6413 1, opd0_qlf, 0);
6414 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6415
6416 logsz = get_log2 (aarch64_get_qualifier_esize (opd1_qlf));
6417
6418 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6419 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6420 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6421 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6422 max_logsz = 3;
6423 else
6424 max_logsz = 4;
6425
6426 if (logsz > max_logsz)
6427 {
6428 /* SEE PR 27904 for an example of this. */
6429 set_fatal_syntax_error
6430 (_("relocation qualifier does not match instruction size"));
6431 return BFD_RELOC_AARCH64_NONE;
6432 }
6433
6434 /* In reloc.c, these pseudo relocation types should be defined in similar
6435 order as above reloc_ldst_lo12 array. Because the array index calculation
6436 below relies on this. */
6437 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6438 }
6439
6440 /* Check whether a register list REGINFO is valid. The registers must be
6441 numbered in increasing order (modulo 32). They must also have a
6442 consistent stride.
6443
6444 Return true if the list is valid, describing it in LIST if so. */
6445
6446 static bool
6447 reg_list_valid_p (uint32_t reginfo, struct aarch64_reglist *list)
6448 {
6449 uint32_t i, nb_regs, prev_regno, incr;
6450
6451 nb_regs = 1 + (reginfo & 0x3);
6452 reginfo >>= 2;
6453 prev_regno = reginfo & 0x1f;
6454 incr = 1;
6455
6456 list->first_regno = prev_regno;
6457 list->num_regs = nb_regs;
6458
6459 for (i = 1; i < nb_regs; ++i)
6460 {
6461 uint32_t curr_regno, curr_incr;
6462 reginfo >>= 5;
6463 curr_regno = reginfo & 0x1f;
6464 curr_incr = (curr_regno - prev_regno) & 0x1f;
6465 if (curr_incr == 0)
6466 return false;
6467 else if (i == 1)
6468 incr = curr_incr;
6469 else if (curr_incr != incr)
6470 return false;
6471 prev_regno = curr_regno;
6472 }
6473
6474 list->stride = incr;
6475 return true;
6476 }
6477
6478 /* Generic instruction operand parser. This does no encoding and no
6479 semantic validation; it merely squirrels values away in the inst
6480 structure. Returns TRUE or FALSE depending on whether the
6481 specified grammar matched. */
6482
6483 static bool
6484 parse_operands (char *str, const aarch64_opcode *opcode)
6485 {
6486 int i;
6487 char *backtrack_pos = 0;
6488 const enum aarch64_opnd *operands = opcode->operands;
6489 aarch64_reg_type imm_reg_type;
6490
6491 clear_error ();
6492 skip_whitespace (str);
6493
6494 if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
6495 AARCH64_FEATURE_SVE
6496 | AARCH64_FEATURE_SVE2))
6497 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP;
6498 else
6499 imm_reg_type = REG_TYPE_R_ZR_BHSDQ_V;
6500
6501 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6502 {
6503 int64_t val;
6504 const reg_entry *reg;
6505 int comma_skipped_p = 0;
6506 struct vector_type_el vectype;
6507 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6508 aarch64_opnd_info *info = &inst.base.operands[i];
6509 aarch64_reg_type reg_type;
6510
6511 DEBUG_TRACE ("parse operand %d", i);
6512
6513 /* Assign the operand code. */
6514 info->type = operands[i];
6515
6516 if (optional_operand_p (opcode, i))
6517 {
6518 /* Remember where we are in case we need to backtrack. */
6519 gas_assert (!backtrack_pos);
6520 backtrack_pos = str;
6521 }
6522
6523 /* Expect comma between operands; the backtrack mechanism will take
6524 care of cases of omitted optional operand. */
6525 if (i > 0 && ! skip_past_char (&str, ','))
6526 {
6527 set_syntax_error (_("comma expected between operands"));
6528 goto failure;
6529 }
6530 else
6531 comma_skipped_p = 1;
6532
6533 switch (operands[i])
6534 {
6535 case AARCH64_OPND_Rd:
6536 case AARCH64_OPND_Rn:
6537 case AARCH64_OPND_Rm:
6538 case AARCH64_OPND_Rt:
6539 case AARCH64_OPND_Rt2:
6540 case AARCH64_OPND_Rs:
6541 case AARCH64_OPND_Ra:
6542 case AARCH64_OPND_Rt_LS64:
6543 case AARCH64_OPND_Rt_SYS:
6544 case AARCH64_OPND_PAIRREG:
6545 case AARCH64_OPND_SVE_Rm:
6546 po_int_fp_reg_or_fail (REG_TYPE_R_ZR);
6547
6548 /* In LS64 load/store instructions Rt register number must be even
6549 and <=22. */
6550 if (operands[i] == AARCH64_OPND_Rt_LS64)
6551 {
6552 /* We've already checked if this is valid register.
6553 This will check if register number (Rt) is not undefined for LS64
6554 instructions:
6555 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6556 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6557 {
6558 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6559 goto failure;
6560 }
6561 }
6562 break;
6563
6564 case AARCH64_OPND_Rd_SP:
6565 case AARCH64_OPND_Rn_SP:
6566 case AARCH64_OPND_Rt_SP:
6567 case AARCH64_OPND_SVE_Rn_SP:
6568 case AARCH64_OPND_Rm_SP:
6569 po_int_fp_reg_or_fail (REG_TYPE_R_SP);
6570 break;
6571
6572 case AARCH64_OPND_Rm_EXT:
6573 case AARCH64_OPND_Rm_SFT:
6574 po_misc_or_fail (parse_shifter_operand
6575 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6576 ? SHIFTED_ARITH_IMM
6577 : SHIFTED_LOGIC_IMM)));
6578 if (!info->shifter.operator_present)
6579 {
6580 /* Default to LSL if not present. Libopcodes prefers shifter
6581 kind to be explicit. */
6582 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6583 info->shifter.kind = AARCH64_MOD_LSL;
6584 /* For Rm_EXT, libopcodes will carry out further check on whether
6585 or not stack pointer is used in the instruction (Recall that
6586 "the extend operator is not optional unless at least one of
6587 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6588 }
6589 break;
6590
6591 case AARCH64_OPND_Fd:
6592 case AARCH64_OPND_Fn:
6593 case AARCH64_OPND_Fm:
6594 case AARCH64_OPND_Fa:
6595 case AARCH64_OPND_Ft:
6596 case AARCH64_OPND_Ft2:
6597 case AARCH64_OPND_Sd:
6598 case AARCH64_OPND_Sn:
6599 case AARCH64_OPND_Sm:
6600 case AARCH64_OPND_SVE_VZn:
6601 case AARCH64_OPND_SVE_Vd:
6602 case AARCH64_OPND_SVE_Vm:
6603 case AARCH64_OPND_SVE_Vn:
6604 po_int_fp_reg_or_fail (REG_TYPE_BHSDQ);
6605 break;
6606
6607 case AARCH64_OPND_SVE_Pd:
6608 case AARCH64_OPND_SVE_Pg3:
6609 case AARCH64_OPND_SVE_Pg4_5:
6610 case AARCH64_OPND_SVE_Pg4_10:
6611 case AARCH64_OPND_SVE_Pg4_16:
6612 case AARCH64_OPND_SVE_Pm:
6613 case AARCH64_OPND_SVE_Pn:
6614 case AARCH64_OPND_SVE_Pt:
6615 case AARCH64_OPND_SME_Pm:
6616 reg_type = REG_TYPE_P;
6617 goto vector_reg;
6618
6619 case AARCH64_OPND_SVE_Za_5:
6620 case AARCH64_OPND_SVE_Za_16:
6621 case AARCH64_OPND_SVE_Zd:
6622 case AARCH64_OPND_SVE_Zm_5:
6623 case AARCH64_OPND_SVE_Zm_16:
6624 case AARCH64_OPND_SVE_Zn:
6625 case AARCH64_OPND_SVE_Zt:
6626 reg_type = REG_TYPE_Z;
6627 goto vector_reg;
6628
6629 case AARCH64_OPND_SVE_PNd:
6630 case AARCH64_OPND_SVE_PNg4_10:
6631 case AARCH64_OPND_SVE_PNn:
6632 case AARCH64_OPND_SVE_PNt:
6633 reg_type = REG_TYPE_PN;
6634 goto vector_reg;
6635
6636 case AARCH64_OPND_Va:
6637 case AARCH64_OPND_Vd:
6638 case AARCH64_OPND_Vn:
6639 case AARCH64_OPND_Vm:
6640 reg_type = REG_TYPE_V;
6641 vector_reg:
6642 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6643 if (!reg)
6644 goto failure;
6645 if (vectype.defined & NTA_HASINDEX)
6646 goto failure;
6647
6648 info->reg.regno = reg->number;
6649 if ((reg_type == REG_TYPE_P
6650 || reg_type == REG_TYPE_PN
6651 || reg_type == REG_TYPE_Z)
6652 && vectype.type == NT_invtype)
6653 /* Unqualified P and Z registers are allowed in certain
6654 contexts. Rely on F_STRICT qualifier checking to catch
6655 invalid uses. */
6656 info->qualifier = AARCH64_OPND_QLF_NIL;
6657 else
6658 {
6659 info->qualifier = vectype_to_qualifier (&vectype);
6660 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6661 goto failure;
6662 }
6663 break;
6664
6665 case AARCH64_OPND_VdD1:
6666 case AARCH64_OPND_VnD1:
6667 reg = aarch64_reg_parse (&str, REG_TYPE_V, &vectype);
6668 if (!reg)
6669 goto failure;
6670 if (vectype.type != NT_d || vectype.index != 1)
6671 {
6672 set_fatal_syntax_error
6673 (_("the top half of a 128-bit FP/SIMD register is expected"));
6674 goto failure;
6675 }
6676 info->reg.regno = reg->number;
6677 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6678 here; it is correct for the purpose of encoding/decoding since
6679 only the register number is explicitly encoded in the related
6680 instructions, although this appears a bit hacky. */
6681 info->qualifier = AARCH64_OPND_QLF_S_D;
6682 break;
6683
6684 case AARCH64_OPND_SVE_Zm3_INDEX:
6685 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6686 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6687 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6688 case AARCH64_OPND_SVE_Zm4_INDEX:
6689 case AARCH64_OPND_SVE_Zn_INDEX:
6690 reg_type = REG_TYPE_Z;
6691 goto vector_reg_index;
6692
6693 case AARCH64_OPND_Ed:
6694 case AARCH64_OPND_En:
6695 case AARCH64_OPND_Em:
6696 case AARCH64_OPND_Em16:
6697 case AARCH64_OPND_SM3_IMM2:
6698 reg_type = REG_TYPE_V;
6699 vector_reg_index:
6700 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6701 if (!reg)
6702 goto failure;
6703 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6704 goto failure;
6705
6706 info->reglane.regno = reg->number;
6707 info->reglane.index = vectype.index;
6708 info->qualifier = vectype_to_qualifier (&vectype);
6709 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6710 goto failure;
6711 break;
6712
6713 case AARCH64_OPND_SVE_ZnxN:
6714 case AARCH64_OPND_SVE_ZtxN:
6715 reg_type = REG_TYPE_Z;
6716 goto vector_reg_list;
6717
6718 case AARCH64_OPND_LVn:
6719 case AARCH64_OPND_LVt:
6720 case AARCH64_OPND_LVt_AL:
6721 case AARCH64_OPND_LEt:
6722 reg_type = REG_TYPE_V;
6723 vector_reg_list:
6724 if (reg_type == REG_TYPE_Z
6725 && get_opcode_dependent_value (opcode) == 1
6726 && *str != '{')
6727 {
6728 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6729 if (!reg)
6730 goto failure;
6731 info->reglist.first_regno = reg->number;
6732 info->reglist.num_regs = 1;
6733 info->reglist.stride = 1;
6734 }
6735 else
6736 {
6737 val = parse_vector_reg_list (&str, reg_type, &vectype);
6738 if (val == PARSE_FAIL)
6739 goto failure;
6740
6741 if (! reg_list_valid_p (val, &info->reglist))
6742 {
6743 set_fatal_syntax_error (_("invalid register list"));
6744 goto failure;
6745 }
6746
6747 if (vectype.width != 0 && *str != ',')
6748 {
6749 set_fatal_syntax_error
6750 (_("expected element type rather than vector type"));
6751 goto failure;
6752 }
6753 }
6754 if (operands[i] == AARCH64_OPND_LEt)
6755 {
6756 if (!(vectype.defined & NTA_HASINDEX))
6757 goto failure;
6758 info->reglist.has_index = 1;
6759 info->reglist.index = vectype.index;
6760 }
6761 else
6762 {
6763 if (vectype.defined & NTA_HASINDEX)
6764 goto failure;
6765 if (!(vectype.defined & NTA_HASTYPE))
6766 {
6767 if (reg_type == REG_TYPE_Z)
6768 set_fatal_syntax_error (_("missing type suffix"));
6769 goto failure;
6770 }
6771 }
6772 info->qualifier = vectype_to_qualifier (&vectype);
6773 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6774 goto failure;
6775 break;
6776
6777 case AARCH64_OPND_CRn:
6778 case AARCH64_OPND_CRm:
6779 {
6780 char prefix = *(str++);
6781 if (prefix != 'c' && prefix != 'C')
6782 goto failure;
6783
6784 po_imm_nc_or_fail ();
6785 if (val > 15)
6786 {
6787 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6788 goto failure;
6789 }
6790 info->qualifier = AARCH64_OPND_QLF_CR;
6791 info->imm.value = val;
6792 break;
6793 }
6794
6795 case AARCH64_OPND_SHLL_IMM:
6796 case AARCH64_OPND_IMM_VLSR:
6797 po_imm_or_fail (1, 64);
6798 info->imm.value = val;
6799 break;
6800
6801 case AARCH64_OPND_CCMP_IMM:
6802 case AARCH64_OPND_SIMM5:
6803 case AARCH64_OPND_FBITS:
6804 case AARCH64_OPND_TME_UIMM16:
6805 case AARCH64_OPND_UIMM4:
6806 case AARCH64_OPND_UIMM4_ADDG:
6807 case AARCH64_OPND_UIMM10:
6808 case AARCH64_OPND_UIMM3_OP1:
6809 case AARCH64_OPND_UIMM3_OP2:
6810 case AARCH64_OPND_IMM_VLSL:
6811 case AARCH64_OPND_IMM:
6812 case AARCH64_OPND_IMM_2:
6813 case AARCH64_OPND_WIDTH:
6814 case AARCH64_OPND_SVE_INV_LIMM:
6815 case AARCH64_OPND_SVE_LIMM:
6816 case AARCH64_OPND_SVE_LIMM_MOV:
6817 case AARCH64_OPND_SVE_SHLIMM_PRED:
6818 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6819 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6820 case AARCH64_OPND_SVE_SHRIMM_PRED:
6821 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6822 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6823 case AARCH64_OPND_SVE_SIMM5:
6824 case AARCH64_OPND_SVE_SIMM5B:
6825 case AARCH64_OPND_SVE_SIMM6:
6826 case AARCH64_OPND_SVE_SIMM8:
6827 case AARCH64_OPND_SVE_UIMM3:
6828 case AARCH64_OPND_SVE_UIMM7:
6829 case AARCH64_OPND_SVE_UIMM8:
6830 case AARCH64_OPND_SVE_UIMM8_53:
6831 case AARCH64_OPND_IMM_ROT1:
6832 case AARCH64_OPND_IMM_ROT2:
6833 case AARCH64_OPND_IMM_ROT3:
6834 case AARCH64_OPND_SVE_IMM_ROT1:
6835 case AARCH64_OPND_SVE_IMM_ROT2:
6836 case AARCH64_OPND_SVE_IMM_ROT3:
6837 case AARCH64_OPND_CSSC_SIMM8:
6838 case AARCH64_OPND_CSSC_UIMM8:
6839 po_imm_nc_or_fail ();
6840 info->imm.value = val;
6841 break;
6842
6843 case AARCH64_OPND_SVE_AIMM:
6844 case AARCH64_OPND_SVE_ASIMM:
6845 po_imm_nc_or_fail ();
6846 info->imm.value = val;
6847 skip_whitespace (str);
6848 if (skip_past_comma (&str))
6849 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6850 else
6851 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6852 break;
6853
6854 case AARCH64_OPND_SVE_PATTERN:
6855 po_enum_or_fail (aarch64_sve_pattern_array);
6856 info->imm.value = val;
6857 break;
6858
6859 case AARCH64_OPND_SVE_PATTERN_SCALED:
6860 po_enum_or_fail (aarch64_sve_pattern_array);
6861 info->imm.value = val;
6862 if (skip_past_comma (&str)
6863 && !parse_shift (&str, info, SHIFTED_MUL))
6864 goto failure;
6865 if (!info->shifter.operator_present)
6866 {
6867 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6868 info->shifter.kind = AARCH64_MOD_MUL;
6869 info->shifter.amount = 1;
6870 }
6871 break;
6872
6873 case AARCH64_OPND_SVE_PRFOP:
6874 po_enum_or_fail (aarch64_sve_prfop_array);
6875 info->imm.value = val;
6876 break;
6877
6878 case AARCH64_OPND_UIMM7:
6879 po_imm_or_fail (0, 127);
6880 info->imm.value = val;
6881 break;
6882
6883 case AARCH64_OPND_IDX:
6884 case AARCH64_OPND_MASK:
6885 case AARCH64_OPND_BIT_NUM:
6886 case AARCH64_OPND_IMMR:
6887 case AARCH64_OPND_IMMS:
6888 po_imm_or_fail (0, 63);
6889 info->imm.value = val;
6890 break;
6891
6892 case AARCH64_OPND_IMM0:
6893 po_imm_nc_or_fail ();
6894 if (val != 0)
6895 {
6896 set_fatal_syntax_error (_("immediate zero expected"));
6897 goto failure;
6898 }
6899 info->imm.value = 0;
6900 break;
6901
6902 case AARCH64_OPND_FPIMM0:
6903 {
6904 int qfloat;
6905 bool res1 = false, res2 = false;
6906 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6907 it is probably not worth the effort to support it. */
6908 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6909 imm_reg_type))
6910 && (error_p ()
6911 || !(res2 = parse_constant_immediate (&str, &val,
6912 imm_reg_type))))
6913 goto failure;
6914 if ((res1 && qfloat == 0) || (res2 && val == 0))
6915 {
6916 info->imm.value = 0;
6917 info->imm.is_fp = 1;
6918 break;
6919 }
6920 set_fatal_syntax_error (_("immediate zero expected"));
6921 goto failure;
6922 }
6923
6924 case AARCH64_OPND_IMM_MOV:
6925 {
6926 char *saved = str;
6927 if (reg_name_p (str, REG_TYPE_R_ZR_SP)
6928 || reg_name_p (str, REG_TYPE_V))
6929 goto failure;
6930 str = saved;
6931 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6932 GE_OPT_PREFIX, REJECT_ABSENT));
6933 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6934 later. fix_mov_imm_insn will try to determine a machine
6935 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6936 message if the immediate cannot be moved by a single
6937 instruction. */
6938 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6939 inst.base.operands[i].skip = 1;
6940 }
6941 break;
6942
6943 case AARCH64_OPND_SIMD_IMM:
6944 case AARCH64_OPND_SIMD_IMM_SFT:
6945 if (! parse_big_immediate (&str, &val, imm_reg_type))
6946 goto failure;
6947 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6948 /* addr_off_p */ 0,
6949 /* need_libopcodes_p */ 1,
6950 /* skip_p */ 1);
6951 /* Parse shift.
6952 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6953 shift, we don't check it here; we leave the checking to
6954 the libopcodes (operand_general_constraint_met_p). By
6955 doing this, we achieve better diagnostics. */
6956 if (skip_past_comma (&str)
6957 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6958 goto failure;
6959 if (!info->shifter.operator_present
6960 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6961 {
6962 /* Default to LSL if not present. Libopcodes prefers shifter
6963 kind to be explicit. */
6964 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6965 info->shifter.kind = AARCH64_MOD_LSL;
6966 }
6967 break;
6968
6969 case AARCH64_OPND_FPIMM:
6970 case AARCH64_OPND_SIMD_FPIMM:
6971 case AARCH64_OPND_SVE_FPIMM8:
6972 {
6973 int qfloat;
6974 bool dp_p;
6975
6976 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6977 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6978 || !aarch64_imm_float_p (qfloat))
6979 {
6980 if (!error_p ())
6981 set_fatal_syntax_error (_("invalid floating-point"
6982 " constant"));
6983 goto failure;
6984 }
6985 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6986 inst.base.operands[i].imm.is_fp = 1;
6987 }
6988 break;
6989
6990 case AARCH64_OPND_SVE_I1_HALF_ONE:
6991 case AARCH64_OPND_SVE_I1_HALF_TWO:
6992 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6993 {
6994 int qfloat;
6995 bool dp_p;
6996
6997 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6998 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6999 {
7000 if (!error_p ())
7001 set_fatal_syntax_error (_("invalid floating-point"
7002 " constant"));
7003 goto failure;
7004 }
7005 inst.base.operands[i].imm.value = qfloat;
7006 inst.base.operands[i].imm.is_fp = 1;
7007 }
7008 break;
7009
7010 case AARCH64_OPND_LIMM:
7011 po_misc_or_fail (parse_shifter_operand (&str, info,
7012 SHIFTED_LOGIC_IMM));
7013 if (info->shifter.operator_present)
7014 {
7015 set_fatal_syntax_error
7016 (_("shift not allowed for bitmask immediate"));
7017 goto failure;
7018 }
7019 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7020 /* addr_off_p */ 0,
7021 /* need_libopcodes_p */ 1,
7022 /* skip_p */ 1);
7023 break;
7024
7025 case AARCH64_OPND_AIMM:
7026 if (opcode->op == OP_ADD)
7027 /* ADD may have relocation types. */
7028 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
7029 SHIFTED_ARITH_IMM));
7030 else
7031 po_misc_or_fail (parse_shifter_operand (&str, info,
7032 SHIFTED_ARITH_IMM));
7033 switch (inst.reloc.type)
7034 {
7035 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7036 info->shifter.amount = 12;
7037 break;
7038 case BFD_RELOC_UNUSED:
7039 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7040 if (info->shifter.kind != AARCH64_MOD_NONE)
7041 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
7042 inst.reloc.pc_rel = 0;
7043 break;
7044 default:
7045 break;
7046 }
7047 info->imm.value = 0;
7048 if (!info->shifter.operator_present)
7049 {
7050 /* Default to LSL if not present. Libopcodes prefers shifter
7051 kind to be explicit. */
7052 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7053 info->shifter.kind = AARCH64_MOD_LSL;
7054 }
7055 break;
7056
7057 case AARCH64_OPND_HALF:
7058 {
7059 /* #<imm16> or relocation. */
7060 int internal_fixup_p;
7061 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
7062 if (internal_fixup_p)
7063 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7064 skip_whitespace (str);
7065 if (skip_past_comma (&str))
7066 {
7067 /* {, LSL #<shift>} */
7068 if (! aarch64_gas_internal_fixup_p ())
7069 {
7070 set_fatal_syntax_error (_("can't mix relocation modifier "
7071 "with explicit shift"));
7072 goto failure;
7073 }
7074 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
7075 }
7076 else
7077 inst.base.operands[i].shifter.amount = 0;
7078 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
7079 inst.base.operands[i].imm.value = 0;
7080 if (! process_movw_reloc_info ())
7081 goto failure;
7082 }
7083 break;
7084
7085 case AARCH64_OPND_EXCEPTION:
7086 case AARCH64_OPND_UNDEFINED:
7087 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
7088 imm_reg_type));
7089 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7090 /* addr_off_p */ 0,
7091 /* need_libopcodes_p */ 0,
7092 /* skip_p */ 1);
7093 break;
7094
7095 case AARCH64_OPND_NZCV:
7096 {
7097 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
7098 if (nzcv != NULL)
7099 {
7100 str += 4;
7101 info->imm.value = nzcv->value;
7102 break;
7103 }
7104 po_imm_or_fail (0, 15);
7105 info->imm.value = val;
7106 }
7107 break;
7108
7109 case AARCH64_OPND_COND:
7110 case AARCH64_OPND_COND1:
7111 {
7112 char *start = str;
7113 do
7114 str++;
7115 while (ISALPHA (*str));
7116 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
7117 if (info->cond == NULL)
7118 {
7119 set_syntax_error (_("invalid condition"));
7120 goto failure;
7121 }
7122 else if (operands[i] == AARCH64_OPND_COND1
7123 && (info->cond->value & 0xe) == 0xe)
7124 {
7125 /* Do not allow AL or NV. */
7126 set_default_error ();
7127 goto failure;
7128 }
7129 }
7130 break;
7131
7132 case AARCH64_OPND_ADDR_ADRP:
7133 po_misc_or_fail (parse_adrp (&str));
7134 /* Clear the value as operand needs to be relocated. */
7135 info->imm.value = 0;
7136 break;
7137
7138 case AARCH64_OPND_ADDR_PCREL14:
7139 case AARCH64_OPND_ADDR_PCREL19:
7140 case AARCH64_OPND_ADDR_PCREL21:
7141 case AARCH64_OPND_ADDR_PCREL26:
7142 po_misc_or_fail (parse_address (&str, info));
7143 if (!info->addr.pcrel)
7144 {
7145 set_syntax_error (_("invalid pc-relative address"));
7146 goto failure;
7147 }
7148 if (inst.gen_lit_pool
7149 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
7150 {
7151 /* Only permit "=value" in the literal load instructions.
7152 The literal will be generated by programmer_friendly_fixup. */
7153 set_syntax_error (_("invalid use of \"=immediate\""));
7154 goto failure;
7155 }
7156 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
7157 {
7158 set_syntax_error (_("unrecognized relocation suffix"));
7159 goto failure;
7160 }
7161 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7162 {
7163 info->imm.value = inst.reloc.exp.X_add_number;
7164 inst.reloc.type = BFD_RELOC_UNUSED;
7165 }
7166 else
7167 {
7168 info->imm.value = 0;
7169 if (inst.reloc.type == BFD_RELOC_UNUSED)
7170 switch (opcode->iclass)
7171 {
7172 case compbranch:
7173 case condbranch:
7174 /* e.g. CBZ or B.COND */
7175 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7176 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7177 break;
7178 case testbranch:
7179 /* e.g. TBZ */
7180 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7181 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7182 break;
7183 case branch_imm:
7184 /* e.g. B or BL */
7185 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7186 inst.reloc.type =
7187 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7188 : BFD_RELOC_AARCH64_JUMP26;
7189 break;
7190 case loadlit:
7191 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7192 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7193 break;
7194 case pcreladdr:
7195 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7196 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7197 break;
7198 default:
7199 gas_assert (0);
7200 abort ();
7201 }
7202 inst.reloc.pc_rel = 1;
7203 }
7204 break;
7205
7206 case AARCH64_OPND_ADDR_SIMPLE:
7207 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7208 {
7209 /* [<Xn|SP>{, #<simm>}] */
7210 char *start = str;
7211 /* First use the normal address-parsing routines, to get
7212 the usual syntax errors. */
7213 po_misc_or_fail (parse_address (&str, info));
7214 if (info->addr.pcrel || info->addr.offset.is_reg
7215 || !info->addr.preind || info->addr.postind
7216 || info->addr.writeback)
7217 {
7218 set_syntax_error (_("invalid addressing mode"));
7219 goto failure;
7220 }
7221
7222 /* Then retry, matching the specific syntax of these addresses. */
7223 str = start;
7224 po_char_or_fail ('[');
7225 po_reg_or_fail (REG_TYPE_R64_SP);
7226 /* Accept optional ", #0". */
7227 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7228 && skip_past_char (&str, ','))
7229 {
7230 skip_past_char (&str, '#');
7231 if (! skip_past_char (&str, '0'))
7232 {
7233 set_fatal_syntax_error
7234 (_("the optional immediate offset can only be 0"));
7235 goto failure;
7236 }
7237 }
7238 po_char_or_fail (']');
7239 break;
7240 }
7241
7242 case AARCH64_OPND_ADDR_REGOFF:
7243 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7244 po_misc_or_fail (parse_address (&str, info));
7245 regoff_addr:
7246 if (info->addr.pcrel || !info->addr.offset.is_reg
7247 || !info->addr.preind || info->addr.postind
7248 || info->addr.writeback)
7249 {
7250 set_syntax_error (_("invalid addressing mode"));
7251 goto failure;
7252 }
7253 if (!info->shifter.operator_present)
7254 {
7255 /* Default to LSL if not present. Libopcodes prefers shifter
7256 kind to be explicit. */
7257 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7258 info->shifter.kind = AARCH64_MOD_LSL;
7259 }
7260 /* Qualifier to be deduced by libopcodes. */
7261 break;
7262
7263 case AARCH64_OPND_ADDR_SIMM7:
7264 po_misc_or_fail (parse_address (&str, info));
7265 if (info->addr.pcrel || info->addr.offset.is_reg
7266 || (!info->addr.preind && !info->addr.postind))
7267 {
7268 set_syntax_error (_("invalid addressing mode"));
7269 goto failure;
7270 }
7271 if (inst.reloc.type != BFD_RELOC_UNUSED)
7272 {
7273 set_syntax_error (_("relocation not allowed"));
7274 goto failure;
7275 }
7276 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7277 /* addr_off_p */ 1,
7278 /* need_libopcodes_p */ 1,
7279 /* skip_p */ 0);
7280 break;
7281
7282 case AARCH64_OPND_ADDR_SIMM9:
7283 case AARCH64_OPND_ADDR_SIMM9_2:
7284 case AARCH64_OPND_ADDR_SIMM11:
7285 case AARCH64_OPND_ADDR_SIMM13:
7286 po_misc_or_fail (parse_address (&str, info));
7287 if (info->addr.pcrel || info->addr.offset.is_reg
7288 || (!info->addr.preind && !info->addr.postind)
7289 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7290 && info->addr.writeback))
7291 {
7292 set_syntax_error (_("invalid addressing mode"));
7293 goto failure;
7294 }
7295 if (inst.reloc.type != BFD_RELOC_UNUSED)
7296 {
7297 set_syntax_error (_("relocation not allowed"));
7298 goto failure;
7299 }
7300 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7301 /* addr_off_p */ 1,
7302 /* need_libopcodes_p */ 1,
7303 /* skip_p */ 0);
7304 break;
7305
7306 case AARCH64_OPND_ADDR_SIMM10:
7307 case AARCH64_OPND_ADDR_OFFSET:
7308 po_misc_or_fail (parse_address (&str, info));
7309 if (info->addr.pcrel || info->addr.offset.is_reg
7310 || !info->addr.preind || info->addr.postind)
7311 {
7312 set_syntax_error (_("invalid addressing mode"));
7313 goto failure;
7314 }
7315 if (inst.reloc.type != BFD_RELOC_UNUSED)
7316 {
7317 set_syntax_error (_("relocation not allowed"));
7318 goto failure;
7319 }
7320 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7321 /* addr_off_p */ 1,
7322 /* need_libopcodes_p */ 1,
7323 /* skip_p */ 0);
7324 break;
7325
7326 case AARCH64_OPND_ADDR_UIMM12:
7327 po_misc_or_fail (parse_address (&str, info));
7328 if (info->addr.pcrel || info->addr.offset.is_reg
7329 || !info->addr.preind || info->addr.writeback)
7330 {
7331 set_syntax_error (_("invalid addressing mode"));
7332 goto failure;
7333 }
7334 if (inst.reloc.type == BFD_RELOC_UNUSED)
7335 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7336 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7337 || (inst.reloc.type
7338 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7339 || (inst.reloc.type
7340 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7341 || (inst.reloc.type
7342 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7343 || (inst.reloc.type
7344 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7345 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7346 /* Leave qualifier to be determined by libopcodes. */
7347 break;
7348
7349 case AARCH64_OPND_SIMD_ADDR_POST:
7350 /* [<Xn|SP>], <Xm|#<amount>> */
7351 po_misc_or_fail (parse_address (&str, info));
7352 if (!info->addr.postind || !info->addr.writeback)
7353 {
7354 set_syntax_error (_("invalid addressing mode"));
7355 goto failure;
7356 }
7357 if (!info->addr.offset.is_reg)
7358 {
7359 if (inst.reloc.exp.X_op == O_constant)
7360 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7361 else
7362 {
7363 set_fatal_syntax_error
7364 (_("writeback value must be an immediate constant"));
7365 goto failure;
7366 }
7367 }
7368 /* No qualifier. */
7369 break;
7370
7371 case AARCH64_OPND_SME_SM_ZA:
7372 /* { SM | ZA } */
7373 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7374 {
7375 set_syntax_error (_("unknown or missing PSTATE field name"));
7376 goto failure;
7377 }
7378 info->reg.regno = val;
7379 break;
7380
7381 case AARCH64_OPND_SME_PnT_Wm_imm:
7382 if (!parse_dual_indexed_reg (&str, REG_TYPE_P,
7383 &info->indexed_za, &qualifier, 0))
7384 goto failure;
7385 info->qualifier = qualifier;
7386 break;
7387
7388 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7389 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7390 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7391 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7392 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7393 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7394 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7395 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7396 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7397 case AARCH64_OPND_SVE_ADDR_RI_U6:
7398 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7399 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7400 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7401 /* [X<n>{, #imm, MUL VL}]
7402 [X<n>{, #imm}]
7403 but recognizing SVE registers. */
7404 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7405 &offset_qualifier));
7406 if (base_qualifier != AARCH64_OPND_QLF_X)
7407 {
7408 set_syntax_error (_("invalid addressing mode"));
7409 goto failure;
7410 }
7411 sve_regimm:
7412 if (info->addr.pcrel || info->addr.offset.is_reg
7413 || !info->addr.preind || info->addr.writeback)
7414 {
7415 set_syntax_error (_("invalid addressing mode"));
7416 goto failure;
7417 }
7418 if (inst.reloc.type != BFD_RELOC_UNUSED
7419 || inst.reloc.exp.X_op != O_constant)
7420 {
7421 /* Make sure this has priority over
7422 "invalid addressing mode". */
7423 set_fatal_syntax_error (_("constant offset required"));
7424 goto failure;
7425 }
7426 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7427 break;
7428
7429 case AARCH64_OPND_SVE_ADDR_R:
7430 /* [<Xn|SP>{, <R><m>}]
7431 but recognizing SVE registers. */
7432 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7433 &offset_qualifier));
7434 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7435 {
7436 offset_qualifier = AARCH64_OPND_QLF_X;
7437 info->addr.offset.is_reg = 1;
7438 info->addr.offset.regno = 31;
7439 }
7440 else if (base_qualifier != AARCH64_OPND_QLF_X
7441 || offset_qualifier != AARCH64_OPND_QLF_X)
7442 {
7443 set_syntax_error (_("invalid addressing mode"));
7444 goto failure;
7445 }
7446 goto regoff_addr;
7447
7448 case AARCH64_OPND_SVE_ADDR_RR:
7449 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7450 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7451 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7452 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7453 case AARCH64_OPND_SVE_ADDR_RX:
7454 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7455 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7456 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7457 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7458 but recognizing SVE registers. */
7459 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7460 &offset_qualifier));
7461 if (base_qualifier != AARCH64_OPND_QLF_X
7462 || offset_qualifier != AARCH64_OPND_QLF_X)
7463 {
7464 set_syntax_error (_("invalid addressing mode"));
7465 goto failure;
7466 }
7467 goto regoff_addr;
7468
7469 case AARCH64_OPND_SVE_ADDR_RZ:
7470 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7471 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7472 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7473 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7474 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7475 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7476 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7477 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7478 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7479 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7480 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7481 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7482 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7483 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7484 &offset_qualifier));
7485 if (base_qualifier != AARCH64_OPND_QLF_X
7486 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7487 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7488 {
7489 set_syntax_error (_("invalid addressing mode"));
7490 goto failure;
7491 }
7492 info->qualifier = offset_qualifier;
7493 goto regoff_addr;
7494
7495 case AARCH64_OPND_SVE_ADDR_ZX:
7496 /* [Zn.<T>{, <Xm>}]. */
7497 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7498 &offset_qualifier));
7499 /* Things to check:
7500 base_qualifier either S_S or S_D
7501 offset_qualifier must be X
7502 */
7503 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7504 && base_qualifier != AARCH64_OPND_QLF_S_D)
7505 || offset_qualifier != AARCH64_OPND_QLF_X)
7506 {
7507 set_syntax_error (_("invalid addressing mode"));
7508 goto failure;
7509 }
7510 info->qualifier = base_qualifier;
7511 if (!info->addr.offset.is_reg || info->addr.pcrel
7512 || !info->addr.preind || info->addr.writeback
7513 || info->shifter.operator_present != 0)
7514 {
7515 set_syntax_error (_("invalid addressing mode"));
7516 goto failure;
7517 }
7518 info->shifter.kind = AARCH64_MOD_LSL;
7519 break;
7520
7521
7522 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7523 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7524 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7525 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7526 /* [Z<n>.<T>{, #imm}] */
7527 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7528 &offset_qualifier));
7529 if (base_qualifier != AARCH64_OPND_QLF_S_S
7530 && base_qualifier != AARCH64_OPND_QLF_S_D)
7531 {
7532 set_syntax_error (_("invalid addressing mode"));
7533 goto failure;
7534 }
7535 info->qualifier = base_qualifier;
7536 goto sve_regimm;
7537
7538 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7539 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7540 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7541 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7542 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7543
7544 We don't reject:
7545
7546 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7547
7548 here since we get better error messages by leaving it to
7549 the qualifier checking routines. */
7550 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7551 &offset_qualifier));
7552 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7553 && base_qualifier != AARCH64_OPND_QLF_S_D)
7554 || offset_qualifier != base_qualifier)
7555 {
7556 set_syntax_error (_("invalid addressing mode"));
7557 goto failure;
7558 }
7559 info->qualifier = base_qualifier;
7560 goto regoff_addr;
7561
7562 case AARCH64_OPND_SYSREG:
7563 {
7564 uint32_t sysreg_flags;
7565 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7566 &sysreg_flags)) == PARSE_FAIL)
7567 {
7568 set_syntax_error (_("unknown or missing system register name"));
7569 goto failure;
7570 }
7571 inst.base.operands[i].sysreg.value = val;
7572 inst.base.operands[i].sysreg.flags = sysreg_flags;
7573 break;
7574 }
7575
7576 case AARCH64_OPND_PSTATEFIELD:
7577 {
7578 uint32_t sysreg_flags;
7579 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7580 &sysreg_flags)) == PARSE_FAIL)
7581 {
7582 set_syntax_error (_("unknown or missing PSTATE field name"));
7583 goto failure;
7584 }
7585 inst.base.operands[i].pstatefield = val;
7586 inst.base.operands[i].sysreg.flags = sysreg_flags;
7587 break;
7588 }
7589
7590 case AARCH64_OPND_SYSREG_IC:
7591 inst.base.operands[i].sysins_op =
7592 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7593 goto sys_reg_ins;
7594
7595 case AARCH64_OPND_SYSREG_DC:
7596 inst.base.operands[i].sysins_op =
7597 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7598 goto sys_reg_ins;
7599
7600 case AARCH64_OPND_SYSREG_AT:
7601 inst.base.operands[i].sysins_op =
7602 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7603 goto sys_reg_ins;
7604
7605 case AARCH64_OPND_SYSREG_SR:
7606 inst.base.operands[i].sysins_op =
7607 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7608 goto sys_reg_ins;
7609
7610 case AARCH64_OPND_SYSREG_TLBI:
7611 inst.base.operands[i].sysins_op =
7612 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7613 sys_reg_ins:
7614 if (inst.base.operands[i].sysins_op == NULL)
7615 {
7616 set_fatal_syntax_error ( _("unknown or missing operation name"));
7617 goto failure;
7618 }
7619 break;
7620
7621 case AARCH64_OPND_BARRIER:
7622 case AARCH64_OPND_BARRIER_ISB:
7623 val = parse_barrier (&str);
7624 if (val != PARSE_FAIL
7625 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7626 {
7627 /* ISB only accepts options name 'sy'. */
7628 set_syntax_error
7629 (_("the specified option is not accepted in ISB"));
7630 /* Turn off backtrack as this optional operand is present. */
7631 backtrack_pos = 0;
7632 goto failure;
7633 }
7634 if (val != PARSE_FAIL
7635 && operands[i] == AARCH64_OPND_BARRIER)
7636 {
7637 /* Regular barriers accept options CRm (C0-C15).
7638 DSB nXS barrier variant accepts values > 15. */
7639 if (val < 0 || val > 15)
7640 {
7641 set_syntax_error (_("the specified option is not accepted in DSB"));
7642 goto failure;
7643 }
7644 }
7645 /* This is an extension to accept a 0..15 immediate. */
7646 if (val == PARSE_FAIL)
7647 po_imm_or_fail (0, 15);
7648 info->barrier = aarch64_barrier_options + val;
7649 break;
7650
7651 case AARCH64_OPND_BARRIER_DSB_NXS:
7652 val = parse_barrier (&str);
7653 if (val != PARSE_FAIL)
7654 {
7655 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7656 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7657 {
7658 set_syntax_error (_("the specified option is not accepted in DSB"));
7659 /* Turn off backtrack as this optional operand is present. */
7660 backtrack_pos = 0;
7661 goto failure;
7662 }
7663 }
7664 else
7665 {
7666 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7667 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7668 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7669 goto failure;
7670 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7671 {
7672 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7673 goto failure;
7674 }
7675 }
7676 /* Option index is encoded as 2-bit value in val<3:2>. */
7677 val = (val >> 2) - 4;
7678 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7679 break;
7680
7681 case AARCH64_OPND_PRFOP:
7682 val = parse_pldop (&str);
7683 /* This is an extension to accept a 0..31 immediate. */
7684 if (val == PARSE_FAIL)
7685 po_imm_or_fail (0, 31);
7686 inst.base.operands[i].prfop = aarch64_prfops + val;
7687 break;
7688
7689 case AARCH64_OPND_BARRIER_PSB:
7690 val = parse_barrier_psb (&str, &(info->hint_option));
7691 if (val == PARSE_FAIL)
7692 goto failure;
7693 break;
7694
7695 case AARCH64_OPND_BTI_TARGET:
7696 val = parse_bti_operand (&str, &(info->hint_option));
7697 if (val == PARSE_FAIL)
7698 goto failure;
7699 break;
7700
7701 case AARCH64_OPND_SME_ZAda_2b:
7702 case AARCH64_OPND_SME_ZAda_3b:
7703 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier, 0);
7704 if (!reg)
7705 goto failure;
7706 info->reg.regno = reg->number;
7707 info->qualifier = qualifier;
7708 break;
7709
7710 case AARCH64_OPND_SME_ZA_HV_idx_src:
7711 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7712 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7713 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7714 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7715 &info->indexed_za,
7716 &qualifier)
7717 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7718 &info->indexed_za, &qualifier, 0))
7719 goto failure;
7720 info->qualifier = qualifier;
7721 break;
7722
7723 case AARCH64_OPND_SME_list_of_64bit_tiles:
7724 val = parse_sme_list_of_64bit_tiles (&str);
7725 if (val == PARSE_FAIL)
7726 goto failure;
7727 info->imm.value = val;
7728 break;
7729
7730 case AARCH64_OPND_SME_ZA_array_off4:
7731 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7732 &info->indexed_za, &qualifier, 0))
7733 goto failure;
7734 info->qualifier = qualifier;
7735 break;
7736
7737 case AARCH64_OPND_MOPS_ADDR_Rd:
7738 case AARCH64_OPND_MOPS_ADDR_Rs:
7739 po_char_or_fail ('[');
7740 if (!parse_x0_to_x30 (&str, info))
7741 goto failure;
7742 po_char_or_fail (']');
7743 po_char_or_fail ('!');
7744 break;
7745
7746 case AARCH64_OPND_MOPS_WB_Rn:
7747 if (!parse_x0_to_x30 (&str, info))
7748 goto failure;
7749 po_char_or_fail ('!');
7750 break;
7751
7752 default:
7753 as_fatal (_("unhandled operand code %d"), operands[i]);
7754 }
7755
7756 /* If we get here, this operand was successfully parsed. */
7757 inst.base.operands[i].present = 1;
7758 continue;
7759
7760 failure:
7761 /* The parse routine should already have set the error, but in case
7762 not, set a default one here. */
7763 if (! error_p ())
7764 set_default_error ();
7765
7766 if (! backtrack_pos)
7767 goto parse_operands_return;
7768
7769 {
7770 /* We reach here because this operand is marked as optional, and
7771 either no operand was supplied or the operand was supplied but it
7772 was syntactically incorrect. In the latter case we report an
7773 error. In the former case we perform a few more checks before
7774 dropping through to the code to insert the default operand. */
7775
7776 char *tmp = backtrack_pos;
7777 char endchar = END_OF_INSN;
7778
7779 if (i != (aarch64_num_of_operands (opcode) - 1))
7780 endchar = ',';
7781 skip_past_char (&tmp, ',');
7782
7783 if (*tmp != endchar)
7784 /* The user has supplied an operand in the wrong format. */
7785 goto parse_operands_return;
7786
7787 /* Make sure there is not a comma before the optional operand.
7788 For example the fifth operand of 'sys' is optional:
7789
7790 sys #0,c0,c0,#0, <--- wrong
7791 sys #0,c0,c0,#0 <--- correct. */
7792 if (comma_skipped_p && i && endchar == END_OF_INSN)
7793 {
7794 set_fatal_syntax_error
7795 (_("unexpected comma before the omitted optional operand"));
7796 goto parse_operands_return;
7797 }
7798 }
7799
7800 /* Reaching here means we are dealing with an optional operand that is
7801 omitted from the assembly line. */
7802 gas_assert (optional_operand_p (opcode, i));
7803 info->present = 0;
7804 process_omitted_operand (operands[i], opcode, i, info);
7805
7806 /* Try again, skipping the optional operand at backtrack_pos. */
7807 str = backtrack_pos;
7808 backtrack_pos = 0;
7809
7810 /* Clear any error record after the omitted optional operand has been
7811 successfully handled. */
7812 clear_error ();
7813 }
7814
7815 /* Check if we have parsed all the operands. */
7816 if (*str != '\0' && ! error_p ())
7817 {
7818 /* Set I to the index of the last present operand; this is
7819 for the purpose of diagnostics. */
7820 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7821 ;
7822 set_fatal_syntax_error
7823 (_("unexpected characters following instruction"));
7824 }
7825
7826 parse_operands_return:
7827
7828 if (error_p ())
7829 {
7830 inst.parsing_error.index = i;
7831 DEBUG_TRACE ("parsing FAIL: %s - %s",
7832 operand_mismatch_kind_names[inst.parsing_error.kind],
7833 inst.parsing_error.error);
7834 /* Record the operand error properly; this is useful when there
7835 are multiple instruction templates for a mnemonic name, so that
7836 later on, we can select the error that most closely describes
7837 the problem. */
7838 record_operand_error_info (opcode, &inst.parsing_error);
7839 return false;
7840 }
7841 else
7842 {
7843 DEBUG_TRACE ("parsing SUCCESS");
7844 return true;
7845 }
7846 }
7847
7848 /* It does some fix-up to provide some programmer friendly feature while
7849 keeping the libopcodes happy, i.e. libopcodes only accepts
7850 the preferred architectural syntax.
7851 Return FALSE if there is any failure; otherwise return TRUE. */
7852
7853 static bool
7854 programmer_friendly_fixup (aarch64_instruction *instr)
7855 {
7856 aarch64_inst *base = &instr->base;
7857 const aarch64_opcode *opcode = base->opcode;
7858 enum aarch64_op op = opcode->op;
7859 aarch64_opnd_info *operands = base->operands;
7860
7861 DEBUG_TRACE ("enter");
7862
7863 switch (opcode->iclass)
7864 {
7865 case testbranch:
7866 /* TBNZ Xn|Wn, #uimm6, label
7867 Test and Branch Not Zero: conditionally jumps to label if bit number
7868 uimm6 in register Xn is not zero. The bit number implies the width of
7869 the register, which may be written and should be disassembled as Wn if
7870 uimm is less than 32. */
7871 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7872 {
7873 if (operands[1].imm.value >= 32)
7874 {
7875 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7876 0, 31);
7877 return false;
7878 }
7879 operands[0].qualifier = AARCH64_OPND_QLF_X;
7880 }
7881 break;
7882 case loadlit:
7883 /* LDR Wt, label | =value
7884 As a convenience assemblers will typically permit the notation
7885 "=value" in conjunction with the pc-relative literal load instructions
7886 to automatically place an immediate value or symbolic address in a
7887 nearby literal pool and generate a hidden label which references it.
7888 ISREG has been set to 0 in the case of =value. */
7889 if (instr->gen_lit_pool
7890 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7891 {
7892 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7893 if (op == OP_LDRSW_LIT)
7894 size = 4;
7895 if (instr->reloc.exp.X_op != O_constant
7896 && instr->reloc.exp.X_op != O_big
7897 && instr->reloc.exp.X_op != O_symbol)
7898 {
7899 record_operand_error (opcode, 1,
7900 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7901 _("constant expression expected"));
7902 return false;
7903 }
7904 if (! add_to_lit_pool (&instr->reloc.exp, size))
7905 {
7906 record_operand_error (opcode, 1,
7907 AARCH64_OPDE_OTHER_ERROR,
7908 _("literal pool insertion failed"));
7909 return false;
7910 }
7911 }
7912 break;
7913 case log_shift:
7914 case bitfield:
7915 /* UXT[BHW] Wd, Wn
7916 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7917 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7918 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7919 A programmer-friendly assembler should accept a destination Xd in
7920 place of Wd, however that is not the preferred form for disassembly.
7921 */
7922 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7923 && operands[1].qualifier == AARCH64_OPND_QLF_W
7924 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7925 operands[0].qualifier = AARCH64_OPND_QLF_W;
7926 break;
7927
7928 case addsub_ext:
7929 {
7930 /* In the 64-bit form, the final register operand is written as Wm
7931 for all but the (possibly omitted) UXTX/LSL and SXTX
7932 operators.
7933 As a programmer-friendly assembler, we accept e.g.
7934 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7935 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7936 int idx = aarch64_operand_index (opcode->operands,
7937 AARCH64_OPND_Rm_EXT);
7938 gas_assert (idx == 1 || idx == 2);
7939 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7940 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7941 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7942 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7943 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7944 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7945 }
7946 break;
7947
7948 default:
7949 break;
7950 }
7951
7952 DEBUG_TRACE ("exit with SUCCESS");
7953 return true;
7954 }
7955
7956 /* Check for loads and stores that will cause unpredictable behavior. */
7957
7958 static void
7959 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7960 {
7961 aarch64_inst *base = &instr->base;
7962 const aarch64_opcode *opcode = base->opcode;
7963 const aarch64_opnd_info *opnds = base->operands;
7964 switch (opcode->iclass)
7965 {
7966 case ldst_pos:
7967 case ldst_imm9:
7968 case ldst_imm10:
7969 case ldst_unscaled:
7970 case ldst_unpriv:
7971 /* Loading/storing the base register is unpredictable if writeback. */
7972 if ((aarch64_get_operand_class (opnds[0].type)
7973 == AARCH64_OPND_CLASS_INT_REG)
7974 && opnds[0].reg.regno == opnds[1].addr.base_regno
7975 && opnds[1].addr.base_regno != REG_SP
7976 /* Exempt STG/STZG/ST2G/STZ2G. */
7977 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7978 && opnds[1].addr.writeback)
7979 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7980 break;
7981
7982 case ldstpair_off:
7983 case ldstnapair_offs:
7984 case ldstpair_indexed:
7985 /* Loading/storing the base register is unpredictable if writeback. */
7986 if ((aarch64_get_operand_class (opnds[0].type)
7987 == AARCH64_OPND_CLASS_INT_REG)
7988 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7989 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7990 && opnds[2].addr.base_regno != REG_SP
7991 /* Exempt STGP. */
7992 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7993 && opnds[2].addr.writeback)
7994 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7995 /* Load operations must load different registers. */
7996 if ((opcode->opcode & (1 << 22))
7997 && opnds[0].reg.regno == opnds[1].reg.regno)
7998 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7999 break;
8000
8001 case ldstexcl:
8002 if ((aarch64_get_operand_class (opnds[0].type)
8003 == AARCH64_OPND_CLASS_INT_REG)
8004 && (aarch64_get_operand_class (opnds[1].type)
8005 == AARCH64_OPND_CLASS_INT_REG))
8006 {
8007 if ((opcode->opcode & (1 << 22)))
8008 {
8009 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
8010 if ((opcode->opcode & (1 << 21))
8011 && opnds[0].reg.regno == opnds[1].reg.regno)
8012 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8013 }
8014 else
8015 {
8016 /* Store-Exclusive is unpredictable if Rt == Rs. */
8017 if (opnds[0].reg.regno == opnds[1].reg.regno)
8018 as_warn
8019 (_("unpredictable: identical transfer and status registers"
8020 " --`%s'"),str);
8021
8022 if (opnds[0].reg.regno == opnds[2].reg.regno)
8023 {
8024 if (!(opcode->opcode & (1 << 21)))
8025 /* Store-Exclusive is unpredictable if Rn == Rs. */
8026 as_warn
8027 (_("unpredictable: identical base and status registers"
8028 " --`%s'"),str);
8029 else
8030 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
8031 as_warn
8032 (_("unpredictable: "
8033 "identical transfer and status registers"
8034 " --`%s'"),str);
8035 }
8036
8037 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
8038 if ((opcode->opcode & (1 << 21))
8039 && opnds[0].reg.regno == opnds[3].reg.regno
8040 && opnds[3].reg.regno != REG_SP)
8041 as_warn (_("unpredictable: identical base and status registers"
8042 " --`%s'"),str);
8043 }
8044 }
8045 break;
8046
8047 default:
8048 break;
8049 }
8050 }
8051
8052 static void
8053 force_automatic_sequence_close (void)
8054 {
8055 struct aarch64_segment_info_type *tc_seg_info;
8056
8057 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8058 if (tc_seg_info->insn_sequence.instr)
8059 {
8060 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
8061 _("previous `%s' sequence has not been closed"),
8062 tc_seg_info->insn_sequence.instr->opcode->name);
8063 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
8064 }
8065 }
8066
8067 /* A wrapper function to interface with libopcodes on encoding and
8068 record the error message if there is any.
8069
8070 Return TRUE on success; otherwise return FALSE. */
8071
8072 static bool
8073 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
8074 aarch64_insn *code)
8075 {
8076 aarch64_operand_error error_info;
8077 memset (&error_info, '\0', sizeof (error_info));
8078 error_info.kind = AARCH64_OPDE_NIL;
8079 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
8080 && !error_info.non_fatal)
8081 return true;
8082
8083 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
8084 record_operand_error_info (opcode, &error_info);
8085 return error_info.non_fatal;
8086 }
8087
8088 #ifdef DEBUG_AARCH64
8089 static inline void
8090 dump_opcode_operands (const aarch64_opcode *opcode)
8091 {
8092 int i = 0;
8093 while (opcode->operands[i] != AARCH64_OPND_NIL)
8094 {
8095 aarch64_verbose ("\t\t opnd%d: %s", i,
8096 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
8097 ? aarch64_get_operand_name (opcode->operands[i])
8098 : aarch64_get_operand_desc (opcode->operands[i]));
8099 ++i;
8100 }
8101 }
8102 #endif /* DEBUG_AARCH64 */
8103
8104 /* This is the guts of the machine-dependent assembler. STR points to a
8105 machine dependent instruction. This function is supposed to emit
8106 the frags/bytes it assembles to. */
8107
8108 void
8109 md_assemble (char *str)
8110 {
8111 templates *template;
8112 const aarch64_opcode *opcode;
8113 struct aarch64_segment_info_type *tc_seg_info;
8114 aarch64_inst *inst_base;
8115 unsigned saved_cond;
8116
8117 /* Align the previous label if needed. */
8118 if (last_label_seen != NULL)
8119 {
8120 symbol_set_frag (last_label_seen, frag_now);
8121 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
8122 S_SET_SEGMENT (last_label_seen, now_seg);
8123 }
8124
8125 /* Update the current insn_sequence from the segment. */
8126 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8127 insn_sequence = &tc_seg_info->insn_sequence;
8128 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
8129
8130 inst.reloc.type = BFD_RELOC_UNUSED;
8131
8132 DEBUG_TRACE ("\n\n");
8133 DEBUG_TRACE ("==============================");
8134 DEBUG_TRACE ("Enter md_assemble with %s", str);
8135
8136 /* Scan up to the end of the mnemonic, which must end in whitespace,
8137 '.', or end of string. */
8138 char *p = str;
8139 char *dot = 0;
8140 for (; is_part_of_name (*p); p++)
8141 if (*p == '.' && !dot)
8142 dot = p;
8143
8144 if (p == str)
8145 {
8146 as_bad (_("unknown mnemonic -- `%s'"), str);
8147 return;
8148 }
8149
8150 if (!dot && create_register_alias (str, p))
8151 return;
8152
8153 template = opcode_lookup (str, dot, p);
8154 if (!template)
8155 {
8156 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8157 str);
8158 return;
8159 }
8160
8161 skip_whitespace (p);
8162 if (*p == ',')
8163 {
8164 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8165 get_mnemonic_name (str), str);
8166 return;
8167 }
8168
8169 init_operand_error_report ();
8170
8171 /* Sections are assumed to start aligned. In executable section, there is no
8172 MAP_DATA symbol pending. So we only align the address during
8173 MAP_DATA --> MAP_INSN transition.
8174 For other sections, this is not guaranteed. */
8175 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8176 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8177 frag_align_code (2, 0);
8178
8179 saved_cond = inst.cond;
8180 reset_aarch64_instruction (&inst);
8181 inst.cond = saved_cond;
8182
8183 /* Iterate through all opcode entries with the same mnemonic name. */
8184 do
8185 {
8186 opcode = template->opcode;
8187
8188 DEBUG_TRACE ("opcode %s found", opcode->name);
8189 #ifdef DEBUG_AARCH64
8190 if (debug_dump)
8191 dump_opcode_operands (opcode);
8192 #endif /* DEBUG_AARCH64 */
8193
8194 mapping_state (MAP_INSN);
8195
8196 inst_base = &inst.base;
8197 inst_base->opcode = opcode;
8198
8199 /* Truly conditionally executed instructions, e.g. b.cond. */
8200 if (opcode->flags & F_COND)
8201 {
8202 gas_assert (inst.cond != COND_ALWAYS);
8203 inst_base->cond = get_cond_from_value (inst.cond);
8204 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8205 }
8206 else if (inst.cond != COND_ALWAYS)
8207 {
8208 /* It shouldn't arrive here, where the assembly looks like a
8209 conditional instruction but the found opcode is unconditional. */
8210 gas_assert (0);
8211 continue;
8212 }
8213
8214 if (parse_operands (p, opcode)
8215 && programmer_friendly_fixup (&inst)
8216 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8217 {
8218 /* Check that this instruction is supported for this CPU. */
8219 if (!aarch64_cpu_supports_inst_p (cpu_variant, inst_base))
8220 {
8221 as_bad (_("selected processor does not support `%s'"), str);
8222 return;
8223 }
8224
8225 warn_unpredictable_ldst (&inst, str);
8226
8227 if (inst.reloc.type == BFD_RELOC_UNUSED
8228 || !inst.reloc.need_libopcodes_p)
8229 output_inst (NULL);
8230 else
8231 {
8232 /* If there is relocation generated for the instruction,
8233 store the instruction information for the future fix-up. */
8234 struct aarch64_inst *copy;
8235 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8236 copy = XNEW (struct aarch64_inst);
8237 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8238 output_inst (copy);
8239 }
8240
8241 /* Issue non-fatal messages if any. */
8242 output_operand_error_report (str, true);
8243 return;
8244 }
8245
8246 template = template->next;
8247 if (template != NULL)
8248 {
8249 reset_aarch64_instruction (&inst);
8250 inst.cond = saved_cond;
8251 }
8252 }
8253 while (template != NULL);
8254
8255 /* Issue the error messages if any. */
8256 output_operand_error_report (str, false);
8257 }
8258
8259 /* Various frobbings of labels and their addresses. */
8260
8261 void
8262 aarch64_start_line_hook (void)
8263 {
8264 last_label_seen = NULL;
8265 }
8266
8267 void
8268 aarch64_frob_label (symbolS * sym)
8269 {
8270 last_label_seen = sym;
8271
8272 dwarf2_emit_label (sym);
8273 }
8274
8275 void
8276 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8277 {
8278 /* Check to see if we have a block to close. */
8279 force_automatic_sequence_close ();
8280 }
8281
8282 int
8283 aarch64_data_in_code (void)
8284 {
8285 if (startswith (input_line_pointer + 1, "data:"))
8286 {
8287 *input_line_pointer = '/';
8288 input_line_pointer += 5;
8289 *input_line_pointer = 0;
8290 return 1;
8291 }
8292
8293 return 0;
8294 }
8295
8296 char *
8297 aarch64_canonicalize_symbol_name (char *name)
8298 {
8299 int len;
8300
8301 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8302 *(name + len - 5) = 0;
8303
8304 return name;
8305 }
8306 \f
8307 /* Table of all register names defined by default. The user can
8308 define additional names with .req. Note that all register names
8309 should appear in both upper and lowercase variants. Some registers
8310 also have mixed-case names. */
8311
8312 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8313 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8314 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8315 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8316 #define REGSET16(p,t) \
8317 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8318 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8319 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8320 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8321 #define REGSET16S(p,s,t) \
8322 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8323 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8324 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8325 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8326 #define REGSET31(p,t) \
8327 REGSET16(p, t), \
8328 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8329 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8330 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8331 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8332 #define REGSET(p,t) \
8333 REGSET31(p,t), REGNUM(p,31,t)
8334
8335 /* These go into aarch64_reg_hsh hash-table. */
8336 static const reg_entry reg_names[] = {
8337 /* Integer registers. */
8338 REGSET31 (x, R_64), REGSET31 (X, R_64),
8339 REGSET31 (w, R_32), REGSET31 (W, R_32),
8340
8341 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8342 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8343 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8344 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8345 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8346 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8347
8348 REGDEF (wzr, 31, ZR_32), REGDEF (WZR, 31, ZR_32),
8349 REGDEF (xzr, 31, ZR_64), REGDEF (XZR, 31, ZR_64),
8350
8351 /* Floating-point single precision registers. */
8352 REGSET (s, FP_S), REGSET (S, FP_S),
8353
8354 /* Floating-point double precision registers. */
8355 REGSET (d, FP_D), REGSET (D, FP_D),
8356
8357 /* Floating-point half precision registers. */
8358 REGSET (h, FP_H), REGSET (H, FP_H),
8359
8360 /* Floating-point byte precision registers. */
8361 REGSET (b, FP_B), REGSET (B, FP_B),
8362
8363 /* Floating-point quad precision registers. */
8364 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8365
8366 /* FP/SIMD registers. */
8367 REGSET (v, V), REGSET (V, V),
8368
8369 /* SVE vector registers. */
8370 REGSET (z, Z), REGSET (Z, Z),
8371
8372 /* SVE predicate(-as-mask) registers. */
8373 REGSET16 (p, P), REGSET16 (P, P),
8374
8375 /* SVE predicate-as-counter registers. */
8376 REGSET16 (pn, PN), REGSET16 (PN, PN),
8377
8378 /* SME ZA. We model this as a register because it acts syntactically
8379 like ZA0H, supporting qualifier suffixes and indexing. */
8380 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8381
8382 /* SME ZA tile registers. */
8383 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8384
8385 /* SME ZA tile registers (horizontal slice). */
8386 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8387
8388 /* SME ZA tile registers (vertical slice). */
8389 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV)
8390 };
8391
8392 #undef REGDEF
8393 #undef REGDEF_ALIAS
8394 #undef REGNUM
8395 #undef REGSET16
8396 #undef REGSET31
8397 #undef REGSET
8398
8399 #define N 1
8400 #define n 0
8401 #define Z 1
8402 #define z 0
8403 #define C 1
8404 #define c 0
8405 #define V 1
8406 #define v 0
8407 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8408 static const asm_nzcv nzcv_names[] = {
8409 {"nzcv", B (n, z, c, v)},
8410 {"nzcV", B (n, z, c, V)},
8411 {"nzCv", B (n, z, C, v)},
8412 {"nzCV", B (n, z, C, V)},
8413 {"nZcv", B (n, Z, c, v)},
8414 {"nZcV", B (n, Z, c, V)},
8415 {"nZCv", B (n, Z, C, v)},
8416 {"nZCV", B (n, Z, C, V)},
8417 {"Nzcv", B (N, z, c, v)},
8418 {"NzcV", B (N, z, c, V)},
8419 {"NzCv", B (N, z, C, v)},
8420 {"NzCV", B (N, z, C, V)},
8421 {"NZcv", B (N, Z, c, v)},
8422 {"NZcV", B (N, Z, c, V)},
8423 {"NZCv", B (N, Z, C, v)},
8424 {"NZCV", B (N, Z, C, V)}
8425 };
8426
8427 #undef N
8428 #undef n
8429 #undef Z
8430 #undef z
8431 #undef C
8432 #undef c
8433 #undef V
8434 #undef v
8435 #undef B
8436 \f
8437 /* MD interface: bits in the object file. */
8438
8439 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8440 for use in the a.out file, and stores them in the array pointed to by buf.
8441 This knows about the endian-ness of the target machine and does
8442 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8443 2 (short) and 4 (long) Floating numbers are put out as a series of
8444 LITTLENUMS (shorts, here at least). */
8445
8446 void
8447 md_number_to_chars (char *buf, valueT val, int n)
8448 {
8449 if (target_big_endian)
8450 number_to_chars_bigendian (buf, val, n);
8451 else
8452 number_to_chars_littleendian (buf, val, n);
8453 }
8454
8455 /* MD interface: Sections. */
8456
8457 /* Estimate the size of a frag before relaxing. Assume everything fits in
8458 4 bytes. */
8459
8460 int
8461 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8462 {
8463 fragp->fr_var = 4;
8464 return 4;
8465 }
8466
8467 /* Round up a section size to the appropriate boundary. */
8468
8469 valueT
8470 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8471 {
8472 return size;
8473 }
8474
8475 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8476 of an rs_align_code fragment.
8477
8478 Here we fill the frag with the appropriate info for padding the
8479 output stream. The resulting frag will consist of a fixed (fr_fix)
8480 and of a repeating (fr_var) part.
8481
8482 The fixed content is always emitted before the repeating content and
8483 these two parts are used as follows in constructing the output:
8484 - the fixed part will be used to align to a valid instruction word
8485 boundary, in case that we start at a misaligned address; as no
8486 executable instruction can live at the misaligned location, we
8487 simply fill with zeros;
8488 - the variable part will be used to cover the remaining padding and
8489 we fill using the AArch64 NOP instruction.
8490
8491 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8492 enough storage space for up to 3 bytes for padding the back to a valid
8493 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8494
8495 void
8496 aarch64_handle_align (fragS * fragP)
8497 {
8498 /* NOP = d503201f */
8499 /* AArch64 instructions are always little-endian. */
8500 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8501
8502 int bytes, fix, noop_size;
8503 char *p;
8504
8505 if (fragP->fr_type != rs_align_code)
8506 return;
8507
8508 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8509 p = fragP->fr_literal + fragP->fr_fix;
8510
8511 #ifdef OBJ_ELF
8512 gas_assert (fragP->tc_frag_data.recorded);
8513 #endif
8514
8515 noop_size = sizeof (aarch64_noop);
8516
8517 fix = bytes & (noop_size - 1);
8518 if (fix)
8519 {
8520 #if defined OBJ_ELF || defined OBJ_COFF
8521 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8522 #endif
8523 memset (p, 0, fix);
8524 p += fix;
8525 fragP->fr_fix += fix;
8526 }
8527
8528 if (noop_size)
8529 memcpy (p, aarch64_noop, noop_size);
8530 fragP->fr_var = noop_size;
8531 }
8532
8533 /* Perform target specific initialisation of a frag.
8534 Note - despite the name this initialisation is not done when the frag
8535 is created, but only when its type is assigned. A frag can be created
8536 and used a long time before its type is set, so beware of assuming that
8537 this initialisation is performed first. */
8538
8539 #ifndef OBJ_ELF
8540 void
8541 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8542 int max_chars ATTRIBUTE_UNUSED)
8543 {
8544 }
8545
8546 #else /* OBJ_ELF is defined. */
8547 void
8548 aarch64_init_frag (fragS * fragP, int max_chars)
8549 {
8550 /* Record a mapping symbol for alignment frags. We will delete this
8551 later if the alignment ends up empty. */
8552 if (!fragP->tc_frag_data.recorded)
8553 fragP->tc_frag_data.recorded = 1;
8554
8555 /* PR 21809: Do not set a mapping state for debug sections
8556 - it just confuses other tools. */
8557 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8558 return;
8559
8560 switch (fragP->fr_type)
8561 {
8562 case rs_align_test:
8563 case rs_fill:
8564 mapping_state_2 (MAP_DATA, max_chars);
8565 break;
8566 case rs_align:
8567 /* PR 20364: We can get alignment frags in code sections,
8568 so do not just assume that we should use the MAP_DATA state. */
8569 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8570 break;
8571 case rs_align_code:
8572 mapping_state_2 (MAP_INSN, max_chars);
8573 break;
8574 default:
8575 break;
8576 }
8577 }
8578
8579 /* Whether SFrame stack trace info is supported. */
8580
8581 bool
8582 aarch64_support_sframe_p (void)
8583 {
8584 /* At this time, SFrame is supported for aarch64 only. */
8585 return (aarch64_abi == AARCH64_ABI_LP64);
8586 }
8587
8588 /* Specify if RA tracking is needed. */
8589
8590 bool
8591 aarch64_sframe_ra_tracking_p (void)
8592 {
8593 return true;
8594 }
8595
8596 /* Specify the fixed offset to recover RA from CFA.
8597 (useful only when RA tracking is not needed). */
8598
8599 offsetT
8600 aarch64_sframe_cfa_ra_offset (void)
8601 {
8602 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8603 }
8604
8605 /* Get the abi/arch indentifier for SFrame. */
8606
8607 unsigned char
8608 aarch64_sframe_get_abi_arch (void)
8609 {
8610 unsigned char sframe_abi_arch = 0;
8611
8612 if (aarch64_support_sframe_p ())
8613 {
8614 sframe_abi_arch = target_big_endian
8615 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8616 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8617 }
8618
8619 return sframe_abi_arch;
8620 }
8621
8622 #endif /* OBJ_ELF */
8623 \f
8624 /* Initialize the DWARF-2 unwind information for this procedure. */
8625
8626 void
8627 tc_aarch64_frame_initial_instructions (void)
8628 {
8629 cfi_add_CFA_def_cfa (REG_SP, 0);
8630 }
8631
8632 /* Convert REGNAME to a DWARF-2 register number. */
8633
8634 int
8635 tc_aarch64_regname_to_dw2regnum (char *regname)
8636 {
8637 const reg_entry *reg = parse_reg (&regname);
8638 if (reg == NULL)
8639 return -1;
8640
8641 switch (reg->type)
8642 {
8643 case REG_TYPE_SP_32:
8644 case REG_TYPE_SP_64:
8645 case REG_TYPE_R_32:
8646 case REG_TYPE_R_64:
8647 return reg->number;
8648
8649 case REG_TYPE_FP_B:
8650 case REG_TYPE_FP_H:
8651 case REG_TYPE_FP_S:
8652 case REG_TYPE_FP_D:
8653 case REG_TYPE_FP_Q:
8654 return reg->number + 64;
8655
8656 default:
8657 break;
8658 }
8659 return -1;
8660 }
8661
8662 /* Implement DWARF2_ADDR_SIZE. */
8663
8664 int
8665 aarch64_dwarf2_addr_size (void)
8666 {
8667 if (ilp32_p)
8668 return 4;
8669 else if (llp64_p)
8670 return 8;
8671 return bfd_arch_bits_per_address (stdoutput) / 8;
8672 }
8673
8674 /* MD interface: Symbol and relocation handling. */
8675
8676 /* Return the address within the segment that a PC-relative fixup is
8677 relative to. For AArch64 PC-relative fixups applied to instructions
8678 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8679
8680 long
8681 md_pcrel_from_section (fixS * fixP, segT seg)
8682 {
8683 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8684
8685 /* If this is pc-relative and we are going to emit a relocation
8686 then we just want to put out any pipeline compensation that the linker
8687 will need. Otherwise we want to use the calculated base. */
8688 if (fixP->fx_pcrel
8689 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8690 || aarch64_force_relocation (fixP)))
8691 base = 0;
8692
8693 /* AArch64 should be consistent for all pc-relative relocations. */
8694 return base + AARCH64_PCREL_OFFSET;
8695 }
8696
8697 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8698 Otherwise we have no need to default values of symbols. */
8699
8700 symbolS *
8701 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8702 {
8703 #ifdef OBJ_ELF
8704 if (name[0] == '_' && name[1] == 'G'
8705 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8706 {
8707 if (!GOT_symbol)
8708 {
8709 if (symbol_find (name))
8710 as_bad (_("GOT already in the symbol table"));
8711
8712 GOT_symbol = symbol_new (name, undefined_section,
8713 &zero_address_frag, 0);
8714 }
8715
8716 return GOT_symbol;
8717 }
8718 #endif
8719
8720 return 0;
8721 }
8722
8723 /* Return non-zero if the indicated VALUE has overflowed the maximum
8724 range expressible by a unsigned number with the indicated number of
8725 BITS. */
8726
8727 static bool
8728 unsigned_overflow (valueT value, unsigned bits)
8729 {
8730 valueT lim;
8731 if (bits >= sizeof (valueT) * 8)
8732 return false;
8733 lim = (valueT) 1 << bits;
8734 return (value >= lim);
8735 }
8736
8737
8738 /* Return non-zero if the indicated VALUE has overflowed the maximum
8739 range expressible by an signed number with the indicated number of
8740 BITS. */
8741
8742 static bool
8743 signed_overflow (offsetT value, unsigned bits)
8744 {
8745 offsetT lim;
8746 if (bits >= sizeof (offsetT) * 8)
8747 return false;
8748 lim = (offsetT) 1 << (bits - 1);
8749 return (value < -lim || value >= lim);
8750 }
8751
8752 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8753 unsigned immediate offset load/store instruction, try to encode it as
8754 an unscaled, 9-bit, signed immediate offset load/store instruction.
8755 Return TRUE if it is successful; otherwise return FALSE.
8756
8757 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8758 in response to the standard LDR/STR mnemonics when the immediate offset is
8759 unambiguous, i.e. when it is negative or unaligned. */
8760
8761 static bool
8762 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8763 {
8764 int idx;
8765 enum aarch64_op new_op;
8766 const aarch64_opcode *new_opcode;
8767
8768 gas_assert (instr->opcode->iclass == ldst_pos);
8769
8770 switch (instr->opcode->op)
8771 {
8772 case OP_LDRB_POS:new_op = OP_LDURB; break;
8773 case OP_STRB_POS: new_op = OP_STURB; break;
8774 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8775 case OP_LDRH_POS: new_op = OP_LDURH; break;
8776 case OP_STRH_POS: new_op = OP_STURH; break;
8777 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8778 case OP_LDR_POS: new_op = OP_LDUR; break;
8779 case OP_STR_POS: new_op = OP_STUR; break;
8780 case OP_LDRF_POS: new_op = OP_LDURV; break;
8781 case OP_STRF_POS: new_op = OP_STURV; break;
8782 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8783 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8784 default: new_op = OP_NIL; break;
8785 }
8786
8787 if (new_op == OP_NIL)
8788 return false;
8789
8790 new_opcode = aarch64_get_opcode (new_op);
8791 gas_assert (new_opcode != NULL);
8792
8793 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8794 instr->opcode->op, new_opcode->op);
8795
8796 aarch64_replace_opcode (instr, new_opcode);
8797
8798 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8799 qualifier matching may fail because the out-of-date qualifier will
8800 prevent the operand being updated with a new and correct qualifier. */
8801 idx = aarch64_operand_index (instr->opcode->operands,
8802 AARCH64_OPND_ADDR_SIMM9);
8803 gas_assert (idx == 1);
8804 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8805
8806 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8807
8808 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8809 insn_sequence))
8810 return false;
8811
8812 return true;
8813 }
8814
8815 /* Called by fix_insn to fix a MOV immediate alias instruction.
8816
8817 Operand for a generic move immediate instruction, which is an alias
8818 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8819 a 32-bit/64-bit immediate value into general register. An assembler error
8820 shall result if the immediate cannot be created by a single one of these
8821 instructions. If there is a choice, then to ensure reversability an
8822 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8823
8824 static void
8825 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8826 {
8827 const aarch64_opcode *opcode;
8828
8829 /* Need to check if the destination is SP/ZR. The check has to be done
8830 before any aarch64_replace_opcode. */
8831 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8832 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8833
8834 instr->operands[1].imm.value = value;
8835 instr->operands[1].skip = 0;
8836
8837 if (try_mov_wide_p)
8838 {
8839 /* Try the MOVZ alias. */
8840 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8841 aarch64_replace_opcode (instr, opcode);
8842 if (aarch64_opcode_encode (instr->opcode, instr,
8843 &instr->value, NULL, NULL, insn_sequence))
8844 {
8845 put_aarch64_insn (buf, instr->value);
8846 return;
8847 }
8848 /* Try the MOVK alias. */
8849 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8850 aarch64_replace_opcode (instr, opcode);
8851 if (aarch64_opcode_encode (instr->opcode, instr,
8852 &instr->value, NULL, NULL, insn_sequence))
8853 {
8854 put_aarch64_insn (buf, instr->value);
8855 return;
8856 }
8857 }
8858
8859 if (try_mov_bitmask_p)
8860 {
8861 /* Try the ORR alias. */
8862 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8863 aarch64_replace_opcode (instr, opcode);
8864 if (aarch64_opcode_encode (instr->opcode, instr,
8865 &instr->value, NULL, NULL, insn_sequence))
8866 {
8867 put_aarch64_insn (buf, instr->value);
8868 return;
8869 }
8870 }
8871
8872 as_bad_where (fixP->fx_file, fixP->fx_line,
8873 _("immediate cannot be moved by a single instruction"));
8874 }
8875
8876 /* An instruction operand which is immediate related may have symbol used
8877 in the assembly, e.g.
8878
8879 mov w0, u32
8880 .set u32, 0x00ffff00
8881
8882 At the time when the assembly instruction is parsed, a referenced symbol,
8883 like 'u32' in the above example may not have been seen; a fixS is created
8884 in such a case and is handled here after symbols have been resolved.
8885 Instruction is fixed up with VALUE using the information in *FIXP plus
8886 extra information in FLAGS.
8887
8888 This function is called by md_apply_fix to fix up instructions that need
8889 a fix-up described above but does not involve any linker-time relocation. */
8890
8891 static void
8892 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8893 {
8894 int idx;
8895 uint32_t insn;
8896 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8897 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8898 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8899
8900 if (new_inst)
8901 {
8902 /* Now the instruction is about to be fixed-up, so the operand that
8903 was previously marked as 'ignored' needs to be unmarked in order
8904 to get the encoding done properly. */
8905 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8906 new_inst->operands[idx].skip = 0;
8907 }
8908
8909 gas_assert (opnd != AARCH64_OPND_NIL);
8910
8911 switch (opnd)
8912 {
8913 case AARCH64_OPND_EXCEPTION:
8914 case AARCH64_OPND_UNDEFINED:
8915 if (unsigned_overflow (value, 16))
8916 as_bad_where (fixP->fx_file, fixP->fx_line,
8917 _("immediate out of range"));
8918 insn = get_aarch64_insn (buf);
8919 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8920 put_aarch64_insn (buf, insn);
8921 break;
8922
8923 case AARCH64_OPND_AIMM:
8924 /* ADD or SUB with immediate.
8925 NOTE this assumes we come here with a add/sub shifted reg encoding
8926 3 322|2222|2 2 2 21111 111111
8927 1 098|7654|3 2 1 09876 543210 98765 43210
8928 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8929 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8930 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8931 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8932 ->
8933 3 322|2222|2 2 221111111111
8934 1 098|7654|3 2 109876543210 98765 43210
8935 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8936 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8937 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8938 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8939 Fields sf Rn Rd are already set. */
8940 insn = get_aarch64_insn (buf);
8941 if (value < 0)
8942 {
8943 /* Add <-> sub. */
8944 insn = reencode_addsub_switch_add_sub (insn);
8945 value = -value;
8946 }
8947
8948 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8949 && unsigned_overflow (value, 12))
8950 {
8951 /* Try to shift the value by 12 to make it fit. */
8952 if (((value >> 12) << 12) == value
8953 && ! unsigned_overflow (value, 12 + 12))
8954 {
8955 value >>= 12;
8956 insn |= encode_addsub_imm_shift_amount (1);
8957 }
8958 }
8959
8960 if (unsigned_overflow (value, 12))
8961 as_bad_where (fixP->fx_file, fixP->fx_line,
8962 _("immediate out of range"));
8963
8964 insn |= encode_addsub_imm (value);
8965
8966 put_aarch64_insn (buf, insn);
8967 break;
8968
8969 case AARCH64_OPND_SIMD_IMM:
8970 case AARCH64_OPND_SIMD_IMM_SFT:
8971 case AARCH64_OPND_LIMM:
8972 /* Bit mask immediate. */
8973 gas_assert (new_inst != NULL);
8974 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8975 new_inst->operands[idx].imm.value = value;
8976 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8977 &new_inst->value, NULL, NULL, insn_sequence))
8978 put_aarch64_insn (buf, new_inst->value);
8979 else
8980 as_bad_where (fixP->fx_file, fixP->fx_line,
8981 _("invalid immediate"));
8982 break;
8983
8984 case AARCH64_OPND_HALF:
8985 /* 16-bit unsigned immediate. */
8986 if (unsigned_overflow (value, 16))
8987 as_bad_where (fixP->fx_file, fixP->fx_line,
8988 _("immediate out of range"));
8989 insn = get_aarch64_insn (buf);
8990 insn |= encode_movw_imm (value & 0xffff);
8991 put_aarch64_insn (buf, insn);
8992 break;
8993
8994 case AARCH64_OPND_IMM_MOV:
8995 /* Operand for a generic move immediate instruction, which is
8996 an alias instruction that generates a single MOVZ, MOVN or ORR
8997 instruction to loads a 32-bit/64-bit immediate value into general
8998 register. An assembler error shall result if the immediate cannot be
8999 created by a single one of these instructions. If there is a choice,
9000 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
9001 and MOVZ or MOVN to ORR. */
9002 gas_assert (new_inst != NULL);
9003 fix_mov_imm_insn (fixP, buf, new_inst, value);
9004 break;
9005
9006 case AARCH64_OPND_ADDR_SIMM7:
9007 case AARCH64_OPND_ADDR_SIMM9:
9008 case AARCH64_OPND_ADDR_SIMM9_2:
9009 case AARCH64_OPND_ADDR_SIMM10:
9010 case AARCH64_OPND_ADDR_UIMM12:
9011 case AARCH64_OPND_ADDR_SIMM11:
9012 case AARCH64_OPND_ADDR_SIMM13:
9013 /* Immediate offset in an address. */
9014 insn = get_aarch64_insn (buf);
9015
9016 gas_assert (new_inst != NULL && new_inst->value == insn);
9017 gas_assert (new_inst->opcode->operands[1] == opnd
9018 || new_inst->opcode->operands[2] == opnd);
9019
9020 /* Get the index of the address operand. */
9021 if (new_inst->opcode->operands[1] == opnd)
9022 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
9023 idx = 1;
9024 else
9025 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
9026 idx = 2;
9027
9028 /* Update the resolved offset value. */
9029 new_inst->operands[idx].addr.offset.imm = value;
9030
9031 /* Encode/fix-up. */
9032 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9033 &new_inst->value, NULL, NULL, insn_sequence))
9034 {
9035 put_aarch64_insn (buf, new_inst->value);
9036 break;
9037 }
9038 else if (new_inst->opcode->iclass == ldst_pos
9039 && try_to_encode_as_unscaled_ldst (new_inst))
9040 {
9041 put_aarch64_insn (buf, new_inst->value);
9042 break;
9043 }
9044
9045 as_bad_where (fixP->fx_file, fixP->fx_line,
9046 _("immediate offset out of range"));
9047 break;
9048
9049 default:
9050 gas_assert (0);
9051 as_fatal (_("unhandled operand code %d"), opnd);
9052 }
9053 }
9054
9055 /* Apply a fixup (fixP) to segment data, once it has been determined
9056 by our caller that we have all the info we need to fix it up.
9057
9058 Parameter valP is the pointer to the value of the bits. */
9059
9060 void
9061 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
9062 {
9063 offsetT value = *valP;
9064 uint32_t insn;
9065 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9066 int scale;
9067 unsigned flags = fixP->fx_addnumber;
9068
9069 DEBUG_TRACE ("\n\n");
9070 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
9071 DEBUG_TRACE ("Enter md_apply_fix");
9072
9073 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
9074
9075 /* Note whether this will delete the relocation. */
9076
9077 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
9078 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
9079 fixP->fx_done = 1;
9080
9081 /* Process the relocations. */
9082 switch (fixP->fx_r_type)
9083 {
9084 case BFD_RELOC_NONE:
9085 /* This will need to go in the object file. */
9086 fixP->fx_done = 0;
9087 break;
9088
9089 case BFD_RELOC_8:
9090 case BFD_RELOC_8_PCREL:
9091 if (fixP->fx_done || !seg->use_rela_p)
9092 md_number_to_chars (buf, value, 1);
9093 break;
9094
9095 case BFD_RELOC_16:
9096 case BFD_RELOC_16_PCREL:
9097 if (fixP->fx_done || !seg->use_rela_p)
9098 md_number_to_chars (buf, value, 2);
9099 break;
9100
9101 case BFD_RELOC_32:
9102 case BFD_RELOC_32_PCREL:
9103 if (fixP->fx_done || !seg->use_rela_p)
9104 md_number_to_chars (buf, value, 4);
9105 break;
9106
9107 case BFD_RELOC_64:
9108 case BFD_RELOC_64_PCREL:
9109 if (fixP->fx_done || !seg->use_rela_p)
9110 md_number_to_chars (buf, value, 8);
9111 break;
9112
9113 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
9114 /* We claim that these fixups have been processed here, even if
9115 in fact we generate an error because we do not have a reloc
9116 for them, so tc_gen_reloc() will reject them. */
9117 fixP->fx_done = 1;
9118 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
9119 {
9120 as_bad_where (fixP->fx_file, fixP->fx_line,
9121 _("undefined symbol %s used as an immediate value"),
9122 S_GET_NAME (fixP->fx_addsy));
9123 goto apply_fix_return;
9124 }
9125 fix_insn (fixP, flags, value);
9126 break;
9127
9128 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
9129 if (fixP->fx_done || !seg->use_rela_p)
9130 {
9131 if (value & 3)
9132 as_bad_where (fixP->fx_file, fixP->fx_line,
9133 _("pc-relative load offset not word aligned"));
9134 if (signed_overflow (value, 21))
9135 as_bad_where (fixP->fx_file, fixP->fx_line,
9136 _("pc-relative load offset out of range"));
9137 insn = get_aarch64_insn (buf);
9138 insn |= encode_ld_lit_ofs_19 (value >> 2);
9139 put_aarch64_insn (buf, insn);
9140 }
9141 break;
9142
9143 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9144 if (fixP->fx_done || !seg->use_rela_p)
9145 {
9146 if (signed_overflow (value, 21))
9147 as_bad_where (fixP->fx_file, fixP->fx_line,
9148 _("pc-relative address offset out of range"));
9149 insn = get_aarch64_insn (buf);
9150 insn |= encode_adr_imm (value);
9151 put_aarch64_insn (buf, insn);
9152 }
9153 break;
9154
9155 case BFD_RELOC_AARCH64_BRANCH19:
9156 if (fixP->fx_done || !seg->use_rela_p)
9157 {
9158 if (value & 3)
9159 as_bad_where (fixP->fx_file, fixP->fx_line,
9160 _("conditional branch target not word aligned"));
9161 if (signed_overflow (value, 21))
9162 as_bad_where (fixP->fx_file, fixP->fx_line,
9163 _("conditional branch out of range"));
9164 insn = get_aarch64_insn (buf);
9165 insn |= encode_cond_branch_ofs_19 (value >> 2);
9166 put_aarch64_insn (buf, insn);
9167 }
9168 break;
9169
9170 case BFD_RELOC_AARCH64_TSTBR14:
9171 if (fixP->fx_done || !seg->use_rela_p)
9172 {
9173 if (value & 3)
9174 as_bad_where (fixP->fx_file, fixP->fx_line,
9175 _("conditional branch target not word aligned"));
9176 if (signed_overflow (value, 16))
9177 as_bad_where (fixP->fx_file, fixP->fx_line,
9178 _("conditional branch out of range"));
9179 insn = get_aarch64_insn (buf);
9180 insn |= encode_tst_branch_ofs_14 (value >> 2);
9181 put_aarch64_insn (buf, insn);
9182 }
9183 break;
9184
9185 case BFD_RELOC_AARCH64_CALL26:
9186 case BFD_RELOC_AARCH64_JUMP26:
9187 if (fixP->fx_done || !seg->use_rela_p)
9188 {
9189 if (value & 3)
9190 as_bad_where (fixP->fx_file, fixP->fx_line,
9191 _("branch target not word aligned"));
9192 if (signed_overflow (value, 28))
9193 as_bad_where (fixP->fx_file, fixP->fx_line,
9194 _("branch out of range"));
9195 insn = get_aarch64_insn (buf);
9196 insn |= encode_branch_ofs_26 (value >> 2);
9197 put_aarch64_insn (buf, insn);
9198 }
9199 break;
9200
9201 case BFD_RELOC_AARCH64_MOVW_G0:
9202 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9203 case BFD_RELOC_AARCH64_MOVW_G0_S:
9204 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9205 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9206 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9207 scale = 0;
9208 goto movw_common;
9209 case BFD_RELOC_AARCH64_MOVW_G1:
9210 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9211 case BFD_RELOC_AARCH64_MOVW_G1_S:
9212 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9213 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9214 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9215 scale = 16;
9216 goto movw_common;
9217 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9218 scale = 0;
9219 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9220 /* Should always be exported to object file, see
9221 aarch64_force_relocation(). */
9222 gas_assert (!fixP->fx_done);
9223 gas_assert (seg->use_rela_p);
9224 goto movw_common;
9225 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9226 scale = 16;
9227 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9228 /* Should always be exported to object file, see
9229 aarch64_force_relocation(). */
9230 gas_assert (!fixP->fx_done);
9231 gas_assert (seg->use_rela_p);
9232 goto movw_common;
9233 case BFD_RELOC_AARCH64_MOVW_G2:
9234 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9235 case BFD_RELOC_AARCH64_MOVW_G2_S:
9236 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9237 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9238 scale = 32;
9239 goto movw_common;
9240 case BFD_RELOC_AARCH64_MOVW_G3:
9241 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9242 scale = 48;
9243 movw_common:
9244 if (fixP->fx_done || !seg->use_rela_p)
9245 {
9246 insn = get_aarch64_insn (buf);
9247
9248 if (!fixP->fx_done)
9249 {
9250 /* REL signed addend must fit in 16 bits */
9251 if (signed_overflow (value, 16))
9252 as_bad_where (fixP->fx_file, fixP->fx_line,
9253 _("offset out of range"));
9254 }
9255 else
9256 {
9257 /* Check for overflow and scale. */
9258 switch (fixP->fx_r_type)
9259 {
9260 case BFD_RELOC_AARCH64_MOVW_G0:
9261 case BFD_RELOC_AARCH64_MOVW_G1:
9262 case BFD_RELOC_AARCH64_MOVW_G2:
9263 case BFD_RELOC_AARCH64_MOVW_G3:
9264 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9265 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9266 if (unsigned_overflow (value, scale + 16))
9267 as_bad_where (fixP->fx_file, fixP->fx_line,
9268 _("unsigned value out of range"));
9269 break;
9270 case BFD_RELOC_AARCH64_MOVW_G0_S:
9271 case BFD_RELOC_AARCH64_MOVW_G1_S:
9272 case BFD_RELOC_AARCH64_MOVW_G2_S:
9273 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9274 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9275 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9276 /* NOTE: We can only come here with movz or movn. */
9277 if (signed_overflow (value, scale + 16))
9278 as_bad_where (fixP->fx_file, fixP->fx_line,
9279 _("signed value out of range"));
9280 if (value < 0)
9281 {
9282 /* Force use of MOVN. */
9283 value = ~value;
9284 insn = reencode_movzn_to_movn (insn);
9285 }
9286 else
9287 {
9288 /* Force use of MOVZ. */
9289 insn = reencode_movzn_to_movz (insn);
9290 }
9291 break;
9292 default:
9293 /* Unchecked relocations. */
9294 break;
9295 }
9296 value >>= scale;
9297 }
9298
9299 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9300 insn |= encode_movw_imm (value & 0xffff);
9301
9302 put_aarch64_insn (buf, insn);
9303 }
9304 break;
9305
9306 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9307 fixP->fx_r_type = (ilp32_p
9308 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9309 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9310 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9311 /* Should always be exported to object file, see
9312 aarch64_force_relocation(). */
9313 gas_assert (!fixP->fx_done);
9314 gas_assert (seg->use_rela_p);
9315 break;
9316
9317 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9318 fixP->fx_r_type = (ilp32_p
9319 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9320 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9321 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9322 /* Should always be exported to object file, see
9323 aarch64_force_relocation(). */
9324 gas_assert (!fixP->fx_done);
9325 gas_assert (seg->use_rela_p);
9326 break;
9327
9328 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9329 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9330 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9331 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9332 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9333 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9334 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9335 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9336 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9337 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9338 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9339 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9340 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9341 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9342 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9343 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9344 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9345 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9346 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9347 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9348 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9349 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9350 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9351 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9352 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9353 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9354 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9355 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9356 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9357 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9358 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9359 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9360 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9361 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9362 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9363 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9364 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9365 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9366 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9367 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9368 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9369 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9370 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9371 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9372 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9373 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9374 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9375 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9376 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9377 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9378 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9379 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9380 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9381 /* Should always be exported to object file, see
9382 aarch64_force_relocation(). */
9383 gas_assert (!fixP->fx_done);
9384 gas_assert (seg->use_rela_p);
9385 break;
9386
9387 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9388 /* Should always be exported to object file, see
9389 aarch64_force_relocation(). */
9390 fixP->fx_r_type = (ilp32_p
9391 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9392 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9393 gas_assert (!fixP->fx_done);
9394 gas_assert (seg->use_rela_p);
9395 break;
9396
9397 case BFD_RELOC_AARCH64_ADD_LO12:
9398 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9399 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9400 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9401 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9402 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9403 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9404 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9405 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9406 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9407 case BFD_RELOC_AARCH64_LDST128_LO12:
9408 case BFD_RELOC_AARCH64_LDST16_LO12:
9409 case BFD_RELOC_AARCH64_LDST32_LO12:
9410 case BFD_RELOC_AARCH64_LDST64_LO12:
9411 case BFD_RELOC_AARCH64_LDST8_LO12:
9412 /* Should always be exported to object file, see
9413 aarch64_force_relocation(). */
9414 gas_assert (!fixP->fx_done);
9415 gas_assert (seg->use_rela_p);
9416 break;
9417
9418 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9419 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9420 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9421 break;
9422
9423 case BFD_RELOC_UNUSED:
9424 /* An error will already have been reported. */
9425 break;
9426
9427 case BFD_RELOC_RVA:
9428 case BFD_RELOC_32_SECREL:
9429 case BFD_RELOC_16_SECIDX:
9430 break;
9431
9432 default:
9433 as_bad_where (fixP->fx_file, fixP->fx_line,
9434 _("unexpected %s fixup"),
9435 bfd_get_reloc_code_name (fixP->fx_r_type));
9436 break;
9437 }
9438
9439 apply_fix_return:
9440 /* Free the allocated the struct aarch64_inst.
9441 N.B. currently there are very limited number of fix-up types actually use
9442 this field, so the impact on the performance should be minimal . */
9443 free (fixP->tc_fix_data.inst);
9444
9445 return;
9446 }
9447
9448 /* Translate internal representation of relocation info to BFD target
9449 format. */
9450
9451 arelent *
9452 tc_gen_reloc (asection * section, fixS * fixp)
9453 {
9454 arelent *reloc;
9455 bfd_reloc_code_real_type code;
9456
9457 reloc = XNEW (arelent);
9458
9459 reloc->sym_ptr_ptr = XNEW (asymbol *);
9460 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9461 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9462
9463 if (fixp->fx_pcrel)
9464 {
9465 if (section->use_rela_p)
9466 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9467 else
9468 fixp->fx_offset = reloc->address;
9469 }
9470 reloc->addend = fixp->fx_offset;
9471
9472 code = fixp->fx_r_type;
9473 switch (code)
9474 {
9475 case BFD_RELOC_16:
9476 if (fixp->fx_pcrel)
9477 code = BFD_RELOC_16_PCREL;
9478 break;
9479
9480 case BFD_RELOC_32:
9481 if (fixp->fx_pcrel)
9482 code = BFD_RELOC_32_PCREL;
9483 break;
9484
9485 case BFD_RELOC_64:
9486 if (fixp->fx_pcrel)
9487 code = BFD_RELOC_64_PCREL;
9488 break;
9489
9490 default:
9491 break;
9492 }
9493
9494 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9495 if (reloc->howto == NULL)
9496 {
9497 as_bad_where (fixp->fx_file, fixp->fx_line,
9498 _
9499 ("cannot represent %s relocation in this object file format"),
9500 bfd_get_reloc_code_name (code));
9501 return NULL;
9502 }
9503
9504 return reloc;
9505 }
9506
9507 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9508
9509 void
9510 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9511 {
9512 bfd_reloc_code_real_type type;
9513 int pcrel = 0;
9514
9515 #ifdef TE_PE
9516 if (exp->X_op == O_secrel)
9517 {
9518 exp->X_op = O_symbol;
9519 type = BFD_RELOC_32_SECREL;
9520 }
9521 else if (exp->X_op == O_secidx)
9522 {
9523 exp->X_op = O_symbol;
9524 type = BFD_RELOC_16_SECIDX;
9525 }
9526 else
9527 {
9528 #endif
9529 /* Pick a reloc.
9530 FIXME: @@ Should look at CPU word size. */
9531 switch (size)
9532 {
9533 case 1:
9534 type = BFD_RELOC_8;
9535 break;
9536 case 2:
9537 type = BFD_RELOC_16;
9538 break;
9539 case 4:
9540 type = BFD_RELOC_32;
9541 break;
9542 case 8:
9543 type = BFD_RELOC_64;
9544 break;
9545 default:
9546 as_bad (_("cannot do %u-byte relocation"), size);
9547 type = BFD_RELOC_UNUSED;
9548 break;
9549 }
9550 #ifdef TE_PE
9551 }
9552 #endif
9553
9554 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9555 }
9556
9557 /* Implement md_after_parse_args. This is the earliest time we need to decide
9558 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9559
9560 void
9561 aarch64_after_parse_args (void)
9562 {
9563 if (aarch64_abi != AARCH64_ABI_NONE)
9564 return;
9565
9566 #ifdef OBJ_ELF
9567 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9568 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9569 aarch64_abi = AARCH64_ABI_ILP32;
9570 else
9571 aarch64_abi = AARCH64_ABI_LP64;
9572 #else
9573 aarch64_abi = AARCH64_ABI_LLP64;
9574 #endif
9575 }
9576
9577 #ifdef OBJ_ELF
9578 const char *
9579 elf64_aarch64_target_format (void)
9580 {
9581 #ifdef TE_CLOUDABI
9582 /* FIXME: What to do for ilp32_p ? */
9583 if (target_big_endian)
9584 return "elf64-bigaarch64-cloudabi";
9585 else
9586 return "elf64-littleaarch64-cloudabi";
9587 #else
9588 if (target_big_endian)
9589 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9590 else
9591 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9592 #endif
9593 }
9594
9595 void
9596 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9597 {
9598 elf_frob_symbol (symp, puntp);
9599 }
9600 #elif defined OBJ_COFF
9601 const char *
9602 coff_aarch64_target_format (void)
9603 {
9604 return "pe-aarch64-little";
9605 }
9606 #endif
9607
9608 /* MD interface: Finalization. */
9609
9610 /* A good place to do this, although this was probably not intended
9611 for this kind of use. We need to dump the literal pool before
9612 references are made to a null symbol pointer. */
9613
9614 void
9615 aarch64_cleanup (void)
9616 {
9617 literal_pool *pool;
9618
9619 for (pool = list_of_pools; pool; pool = pool->next)
9620 {
9621 /* Put it at the end of the relevant section. */
9622 subseg_set (pool->section, pool->sub_section);
9623 s_ltorg (0);
9624 }
9625 }
9626
9627 #ifdef OBJ_ELF
9628 /* Remove any excess mapping symbols generated for alignment frags in
9629 SEC. We may have created a mapping symbol before a zero byte
9630 alignment; remove it if there's a mapping symbol after the
9631 alignment. */
9632 static void
9633 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9634 void *dummy ATTRIBUTE_UNUSED)
9635 {
9636 segment_info_type *seginfo = seg_info (sec);
9637 fragS *fragp;
9638
9639 if (seginfo == NULL || seginfo->frchainP == NULL)
9640 return;
9641
9642 for (fragp = seginfo->frchainP->frch_root;
9643 fragp != NULL; fragp = fragp->fr_next)
9644 {
9645 symbolS *sym = fragp->tc_frag_data.last_map;
9646 fragS *next = fragp->fr_next;
9647
9648 /* Variable-sized frags have been converted to fixed size by
9649 this point. But if this was variable-sized to start with,
9650 there will be a fixed-size frag after it. So don't handle
9651 next == NULL. */
9652 if (sym == NULL || next == NULL)
9653 continue;
9654
9655 if (S_GET_VALUE (sym) < next->fr_address)
9656 /* Not at the end of this frag. */
9657 continue;
9658 know (S_GET_VALUE (sym) == next->fr_address);
9659
9660 do
9661 {
9662 if (next->tc_frag_data.first_map != NULL)
9663 {
9664 /* Next frag starts with a mapping symbol. Discard this
9665 one. */
9666 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9667 break;
9668 }
9669
9670 if (next->fr_next == NULL)
9671 {
9672 /* This mapping symbol is at the end of the section. Discard
9673 it. */
9674 know (next->fr_fix == 0 && next->fr_var == 0);
9675 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9676 break;
9677 }
9678
9679 /* As long as we have empty frags without any mapping symbols,
9680 keep looking. */
9681 /* If the next frag is non-empty and does not start with a
9682 mapping symbol, then this mapping symbol is required. */
9683 if (next->fr_address != next->fr_next->fr_address)
9684 break;
9685
9686 next = next->fr_next;
9687 }
9688 while (next != NULL);
9689 }
9690 }
9691 #endif
9692
9693 /* Adjust the symbol table. */
9694
9695 void
9696 aarch64_adjust_symtab (void)
9697 {
9698 #ifdef OBJ_ELF
9699 /* Remove any overlapping mapping symbols generated by alignment frags. */
9700 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9701 /* Now do generic ELF adjustments. */
9702 elf_adjust_symtab ();
9703 #endif
9704 }
9705
9706 static void
9707 checked_hash_insert (htab_t table, const char *key, void *value)
9708 {
9709 str_hash_insert (table, key, value, 0);
9710 }
9711
9712 static void
9713 sysreg_hash_insert (htab_t table, const char *key, void *value)
9714 {
9715 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9716 checked_hash_insert (table, key, value);
9717 }
9718
9719 static void
9720 fill_instruction_hash_table (void)
9721 {
9722 const aarch64_opcode *opcode = aarch64_opcode_table;
9723
9724 while (opcode->name != NULL)
9725 {
9726 templates *templ, *new_templ;
9727 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9728
9729 new_templ = XNEW (templates);
9730 new_templ->opcode = opcode;
9731 new_templ->next = NULL;
9732
9733 if (!templ)
9734 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9735 else
9736 {
9737 new_templ->next = templ->next;
9738 templ->next = new_templ;
9739 }
9740 ++opcode;
9741 }
9742 }
9743
9744 static inline void
9745 convert_to_upper (char *dst, const char *src, size_t num)
9746 {
9747 unsigned int i;
9748 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9749 *dst = TOUPPER (*src);
9750 *dst = '\0';
9751 }
9752
9753 /* Assume STR point to a lower-case string, allocate, convert and return
9754 the corresponding upper-case string. */
9755 static inline const char*
9756 get_upper_str (const char *str)
9757 {
9758 char *ret;
9759 size_t len = strlen (str);
9760 ret = XNEWVEC (char, len + 1);
9761 convert_to_upper (ret, str, len);
9762 return ret;
9763 }
9764
9765 /* MD interface: Initialization. */
9766
9767 void
9768 md_begin (void)
9769 {
9770 unsigned mach;
9771 unsigned int i;
9772
9773 aarch64_ops_hsh = str_htab_create ();
9774 aarch64_cond_hsh = str_htab_create ();
9775 aarch64_shift_hsh = str_htab_create ();
9776 aarch64_sys_regs_hsh = str_htab_create ();
9777 aarch64_pstatefield_hsh = str_htab_create ();
9778 aarch64_sys_regs_ic_hsh = str_htab_create ();
9779 aarch64_sys_regs_dc_hsh = str_htab_create ();
9780 aarch64_sys_regs_at_hsh = str_htab_create ();
9781 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9782 aarch64_sys_regs_sr_hsh = str_htab_create ();
9783 aarch64_reg_hsh = str_htab_create ();
9784 aarch64_barrier_opt_hsh = str_htab_create ();
9785 aarch64_nzcv_hsh = str_htab_create ();
9786 aarch64_pldop_hsh = str_htab_create ();
9787 aarch64_hint_opt_hsh = str_htab_create ();
9788
9789 fill_instruction_hash_table ();
9790
9791 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9792 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9793 (void *) (aarch64_sys_regs + i));
9794
9795 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9796 sysreg_hash_insert (aarch64_pstatefield_hsh,
9797 aarch64_pstatefields[i].name,
9798 (void *) (aarch64_pstatefields + i));
9799
9800 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9801 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9802 aarch64_sys_regs_ic[i].name,
9803 (void *) (aarch64_sys_regs_ic + i));
9804
9805 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9806 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9807 aarch64_sys_regs_dc[i].name,
9808 (void *) (aarch64_sys_regs_dc + i));
9809
9810 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9811 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9812 aarch64_sys_regs_at[i].name,
9813 (void *) (aarch64_sys_regs_at + i));
9814
9815 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9816 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9817 aarch64_sys_regs_tlbi[i].name,
9818 (void *) (aarch64_sys_regs_tlbi + i));
9819
9820 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9821 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9822 aarch64_sys_regs_sr[i].name,
9823 (void *) (aarch64_sys_regs_sr + i));
9824
9825 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9826 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9827 (void *) (reg_names + i));
9828
9829 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9830 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9831 (void *) (nzcv_names + i));
9832
9833 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9834 {
9835 const char *name = aarch64_operand_modifiers[i].name;
9836 checked_hash_insert (aarch64_shift_hsh, name,
9837 (void *) (aarch64_operand_modifiers + i));
9838 /* Also hash the name in the upper case. */
9839 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9840 (void *) (aarch64_operand_modifiers + i));
9841 }
9842
9843 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9844 {
9845 unsigned int j;
9846 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9847 the same condition code. */
9848 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9849 {
9850 const char *name = aarch64_conds[i].names[j];
9851 if (name == NULL)
9852 break;
9853 checked_hash_insert (aarch64_cond_hsh, name,
9854 (void *) (aarch64_conds + i));
9855 /* Also hash the name in the upper case. */
9856 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9857 (void *) (aarch64_conds + i));
9858 }
9859 }
9860
9861 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9862 {
9863 const char *name = aarch64_barrier_options[i].name;
9864 /* Skip xx00 - the unallocated values of option. */
9865 if ((i & 0x3) == 0)
9866 continue;
9867 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9868 (void *) (aarch64_barrier_options + i));
9869 /* Also hash the name in the upper case. */
9870 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9871 (void *) (aarch64_barrier_options + i));
9872 }
9873
9874 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9875 {
9876 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9877 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9878 (void *) (aarch64_barrier_dsb_nxs_options + i));
9879 /* Also hash the name in the upper case. */
9880 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9881 (void *) (aarch64_barrier_dsb_nxs_options + i));
9882 }
9883
9884 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9885 {
9886 const char* name = aarch64_prfops[i].name;
9887 /* Skip the unallocated hint encodings. */
9888 if (name == NULL)
9889 continue;
9890 checked_hash_insert (aarch64_pldop_hsh, name,
9891 (void *) (aarch64_prfops + i));
9892 /* Also hash the name in the upper case. */
9893 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9894 (void *) (aarch64_prfops + i));
9895 }
9896
9897 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9898 {
9899 const char* name = aarch64_hint_options[i].name;
9900 const char* upper_name = get_upper_str(name);
9901
9902 checked_hash_insert (aarch64_hint_opt_hsh, name,
9903 (void *) (aarch64_hint_options + i));
9904
9905 /* Also hash the name in the upper case if not the same. */
9906 if (strcmp (name, upper_name) != 0)
9907 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9908 (void *) (aarch64_hint_options + i));
9909 }
9910
9911 /* Set the cpu variant based on the command-line options. */
9912 if (!mcpu_cpu_opt)
9913 mcpu_cpu_opt = march_cpu_opt;
9914
9915 if (!mcpu_cpu_opt)
9916 mcpu_cpu_opt = &cpu_default;
9917
9918 cpu_variant = *mcpu_cpu_opt;
9919
9920 /* Record the CPU type. */
9921 if(ilp32_p)
9922 mach = bfd_mach_aarch64_ilp32;
9923 else if (llp64_p)
9924 mach = bfd_mach_aarch64_llp64;
9925 else
9926 mach = bfd_mach_aarch64;
9927
9928 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9929 #ifdef OBJ_ELF
9930 /* FIXME - is there a better way to do it ? */
9931 aarch64_sframe_cfa_sp_reg = 31;
9932 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
9933 aarch64_sframe_cfa_ra_reg = 30;
9934 #endif
9935 }
9936
9937 /* Command line processing. */
9938
9939 const char *md_shortopts = "m:";
9940
9941 #ifdef AARCH64_BI_ENDIAN
9942 #define OPTION_EB (OPTION_MD_BASE + 0)
9943 #define OPTION_EL (OPTION_MD_BASE + 1)
9944 #else
9945 #if TARGET_BYTES_BIG_ENDIAN
9946 #define OPTION_EB (OPTION_MD_BASE + 0)
9947 #else
9948 #define OPTION_EL (OPTION_MD_BASE + 1)
9949 #endif
9950 #endif
9951
9952 struct option md_longopts[] = {
9953 #ifdef OPTION_EB
9954 {"EB", no_argument, NULL, OPTION_EB},
9955 #endif
9956 #ifdef OPTION_EL
9957 {"EL", no_argument, NULL, OPTION_EL},
9958 #endif
9959 {NULL, no_argument, NULL, 0}
9960 };
9961
9962 size_t md_longopts_size = sizeof (md_longopts);
9963
9964 struct aarch64_option_table
9965 {
9966 const char *option; /* Option name to match. */
9967 const char *help; /* Help information. */
9968 int *var; /* Variable to change. */
9969 int value; /* What to change it to. */
9970 char *deprecated; /* If non-null, print this message. */
9971 };
9972
9973 static struct aarch64_option_table aarch64_opts[] = {
9974 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9975 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9976 NULL},
9977 #ifdef DEBUG_AARCH64
9978 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9979 #endif /* DEBUG_AARCH64 */
9980 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9981 NULL},
9982 {"mno-verbose-error", N_("do not output verbose error messages"),
9983 &verbose_error_p, 0, NULL},
9984 {NULL, NULL, NULL, 0, NULL}
9985 };
9986
9987 struct aarch64_cpu_option_table
9988 {
9989 const char *name;
9990 const aarch64_feature_set value;
9991 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9992 case. */
9993 const char *canonical_name;
9994 };
9995
9996 /* This list should, at a minimum, contain all the cpu names
9997 recognized by GCC. */
9998 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9999 {"all", AARCH64_ANY, NULL},
10000 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
10001 AARCH64_FEATURE_CRC), "Cortex-A34"},
10002 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
10003 AARCH64_FEATURE_CRC), "Cortex-A35"},
10004 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
10005 AARCH64_FEATURE_CRC), "Cortex-A53"},
10006 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
10007 AARCH64_FEATURE_CRC), "Cortex-A57"},
10008 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
10009 AARCH64_FEATURE_CRC), "Cortex-A72"},
10010 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
10011 AARCH64_FEATURE_CRC), "Cortex-A73"},
10012 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10013 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
10014 "Cortex-A55"},
10015 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10016 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
10017 "Cortex-A75"},
10018 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10019 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
10020 "Cortex-A76"},
10021 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10022 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
10023 | AARCH64_FEATURE_DOTPROD
10024 | AARCH64_FEATURE_SSBS),
10025 "Cortex-A76AE"},
10026 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10027 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
10028 | AARCH64_FEATURE_DOTPROD
10029 | AARCH64_FEATURE_SSBS),
10030 "Cortex-A77"},
10031 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10032 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
10033 | AARCH64_FEATURE_DOTPROD
10034 | AARCH64_FEATURE_SSBS),
10035 "Cortex-A65"},
10036 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10037 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
10038 | AARCH64_FEATURE_DOTPROD
10039 | AARCH64_FEATURE_SSBS),
10040 "Cortex-A65AE"},
10041 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10042 AARCH64_FEATURE_F16
10043 | AARCH64_FEATURE_RCPC
10044 | AARCH64_FEATURE_DOTPROD
10045 | AARCH64_FEATURE_SSBS
10046 | AARCH64_FEATURE_PROFILE),
10047 "Cortex-A78"},
10048 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10049 AARCH64_FEATURE_F16
10050 | AARCH64_FEATURE_RCPC
10051 | AARCH64_FEATURE_DOTPROD
10052 | AARCH64_FEATURE_SSBS
10053 | AARCH64_FEATURE_PROFILE),
10054 "Cortex-A78AE"},
10055 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10056 AARCH64_FEATURE_DOTPROD
10057 | AARCH64_FEATURE_F16
10058 | AARCH64_FEATURE_FLAGM
10059 | AARCH64_FEATURE_PAC
10060 | AARCH64_FEATURE_PROFILE
10061 | AARCH64_FEATURE_RCPC
10062 | AARCH64_FEATURE_SSBS),
10063 "Cortex-A78C"},
10064 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
10065 AARCH64_FEATURE_BFLOAT16
10066 | AARCH64_FEATURE_I8MM
10067 | AARCH64_FEATURE_MEMTAG
10068 | AARCH64_FEATURE_SVE2_BITPERM),
10069 "Cortex-A510"},
10070 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
10071 AARCH64_FEATURE_BFLOAT16
10072 | AARCH64_FEATURE_I8MM
10073 | AARCH64_FEATURE_MEMTAG
10074 | AARCH64_FEATURE_SVE2_BITPERM),
10075 "Cortex-A710"},
10076 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10077 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
10078 | AARCH64_FEATURE_DOTPROD
10079 | AARCH64_FEATURE_PROFILE),
10080 "Ares"},
10081 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
10082 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
10083 "Samsung Exynos M1"},
10084 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
10085 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
10086 | AARCH64_FEATURE_RDMA),
10087 "Qualcomm Falkor"},
10088 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10089 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
10090 | AARCH64_FEATURE_DOTPROD
10091 | AARCH64_FEATURE_SSBS),
10092 "Neoverse E1"},
10093 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10094 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
10095 | AARCH64_FEATURE_DOTPROD
10096 | AARCH64_FEATURE_PROFILE),
10097 "Neoverse N1"},
10098 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
10099 AARCH64_FEATURE_BFLOAT16
10100 | AARCH64_FEATURE_I8MM
10101 | AARCH64_FEATURE_F16
10102 | AARCH64_FEATURE_SVE
10103 | AARCH64_FEATURE_SVE2
10104 | AARCH64_FEATURE_SVE2_BITPERM
10105 | AARCH64_FEATURE_MEMTAG
10106 | AARCH64_FEATURE_RNG),
10107 "Neoverse N2"},
10108 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
10109 AARCH64_FEATURE_PROFILE
10110 | AARCH64_FEATURE_CVADP
10111 | AARCH64_FEATURE_SVE
10112 | AARCH64_FEATURE_SSBS
10113 | AARCH64_FEATURE_RNG
10114 | AARCH64_FEATURE_F16
10115 | AARCH64_FEATURE_BFLOAT16
10116 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
10117 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
10118 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
10119 | AARCH64_FEATURE_RDMA),
10120 "Qualcomm QDF24XX"},
10121 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
10122 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
10123 "Qualcomm Saphira"},
10124 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
10125 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
10126 "Cavium ThunderX"},
10127 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
10128 AARCH64_FEATURE_CRYPTO),
10129 "Broadcom Vulcan"},
10130 /* The 'xgene-1' name is an older name for 'xgene1', which was used
10131 in earlier releases and is superseded by 'xgene1' in all
10132 tools. */
10133 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
10134 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
10135 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
10136 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
10137 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
10138 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10139 AARCH64_FEATURE_F16
10140 | AARCH64_FEATURE_RCPC
10141 | AARCH64_FEATURE_DOTPROD
10142 | AARCH64_FEATURE_SSBS
10143 | AARCH64_FEATURE_PROFILE),
10144 "Cortex-X1"},
10145 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
10146 AARCH64_FEATURE_BFLOAT16
10147 | AARCH64_FEATURE_I8MM
10148 | AARCH64_FEATURE_MEMTAG
10149 | AARCH64_FEATURE_SVE2_BITPERM),
10150 "Cortex-X2"},
10151 {"generic", AARCH64_ARCH_V8, NULL},
10152
10153 {NULL, AARCH64_ARCH_NONE, NULL}
10154 };
10155
10156 struct aarch64_arch_option_table
10157 {
10158 const char *name;
10159 const aarch64_feature_set value;
10160 };
10161
10162 /* This list should, at a minimum, contain all the architecture names
10163 recognized by GCC. */
10164 static const struct aarch64_arch_option_table aarch64_archs[] = {
10165 {"all", AARCH64_ANY},
10166 {"armv8-a", AARCH64_ARCH_V8},
10167 {"armv8.1-a", AARCH64_ARCH_V8_1},
10168 {"armv8.2-a", AARCH64_ARCH_V8_2},
10169 {"armv8.3-a", AARCH64_ARCH_V8_3},
10170 {"armv8.4-a", AARCH64_ARCH_V8_4},
10171 {"armv8.5-a", AARCH64_ARCH_V8_5},
10172 {"armv8.6-a", AARCH64_ARCH_V8_6},
10173 {"armv8.7-a", AARCH64_ARCH_V8_7},
10174 {"armv8.8-a", AARCH64_ARCH_V8_8},
10175 {"armv8-r", AARCH64_ARCH_V8_R},
10176 {"armv9-a", AARCH64_ARCH_V9},
10177 {"armv9.1-a", AARCH64_ARCH_V9_1},
10178 {"armv9.2-a", AARCH64_ARCH_V9_2},
10179 {"armv9.3-a", AARCH64_ARCH_V9_3},
10180 {NULL, AARCH64_ARCH_NONE}
10181 };
10182
10183 /* ISA extensions. */
10184 struct aarch64_option_cpu_value_table
10185 {
10186 const char *name;
10187 const aarch64_feature_set value;
10188 const aarch64_feature_set require; /* Feature dependencies. */
10189 };
10190
10191 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10192 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
10193 AARCH64_ARCH_NONE},
10194 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
10195 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10196 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
10197 AARCH64_ARCH_NONE},
10198 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
10199 AARCH64_ARCH_NONE},
10200 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
10201 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10202 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
10203 AARCH64_ARCH_NONE},
10204 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
10205 AARCH64_ARCH_NONE},
10206 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
10207 AARCH64_ARCH_NONE},
10208 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
10209 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10210 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
10211 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10212 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
10213 AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
10214 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
10215 AARCH64_ARCH_NONE},
10216 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
10217 AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
10218 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
10219 AARCH64_ARCH_NONE},
10220 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
10221 AARCH64_FEATURE (AARCH64_FEATURE_F16
10222 | AARCH64_FEATURE_SIMD, 0)},
10223 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
10224 AARCH64_ARCH_NONE},
10225 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
10226 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10227 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
10228 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10229 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
10230 AARCH64_ARCH_NONE},
10231 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
10232 AARCH64_ARCH_NONE},
10233 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
10234 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10235 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
10236 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10237 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
10238 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
10239 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
10240 AARCH64_ARCH_NONE},
10241 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
10242 AARCH64_ARCH_NONE},
10243 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10244 AARCH64_ARCH_NONE},
10245 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10246 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10247 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10248 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10249 | AARCH64_FEATURE_SM4, 0)},
10250 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10251 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10252 | AARCH64_FEATURE_AES, 0)},
10253 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10254 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10255 | AARCH64_FEATURE_SHA3, 0)},
10256 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10257 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10258 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10259 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10260 | AARCH64_FEATURE_BFLOAT16, 0)},
10261 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10262 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10263 {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10264 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10265 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10266 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10267 {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10268 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10269 {"sme2", AARCH64_FEATURE (AARCH64_FEATURE_SME2, 0),
10270 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10271 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10272 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10273 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10274 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10275 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10276 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10277 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10278 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10279 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10280 AARCH64_ARCH_NONE},
10281 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10282 AARCH64_ARCH_NONE},
10283 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10284 AARCH64_ARCH_NONE},
10285 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10286 AARCH64_ARCH_NONE},
10287 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10288 AARCH64_ARCH_NONE},
10289 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
10290 AARCH64_ARCH_NONE},
10291 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10292 };
10293
10294 struct aarch64_long_option_table
10295 {
10296 const char *option; /* Substring to match. */
10297 const char *help; /* Help information. */
10298 int (*func) (const char *subopt); /* Function to decode sub-option. */
10299 char *deprecated; /* If non-null, print this message. */
10300 };
10301
10302 /* Transitive closure of features depending on set. */
10303 static aarch64_feature_set
10304 aarch64_feature_disable_set (aarch64_feature_set set)
10305 {
10306 const struct aarch64_option_cpu_value_table *opt;
10307 aarch64_feature_set prev = 0;
10308
10309 while (prev != set) {
10310 prev = set;
10311 for (opt = aarch64_features; opt->name != NULL; opt++)
10312 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10313 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10314 }
10315 return set;
10316 }
10317
10318 /* Transitive closure of dependencies of set. */
10319 static aarch64_feature_set
10320 aarch64_feature_enable_set (aarch64_feature_set set)
10321 {
10322 const struct aarch64_option_cpu_value_table *opt;
10323 aarch64_feature_set prev = 0;
10324
10325 while (prev != set) {
10326 prev = set;
10327 for (opt = aarch64_features; opt->name != NULL; opt++)
10328 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10329 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10330 }
10331 return set;
10332 }
10333
10334 static int
10335 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10336 bool ext_only)
10337 {
10338 /* We insist on extensions being added before being removed. We achieve
10339 this by using the ADDING_VALUE variable to indicate whether we are
10340 adding an extension (1) or removing it (0) and only allowing it to
10341 change in the order -1 -> 1 -> 0. */
10342 int adding_value = -1;
10343 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10344
10345 /* Copy the feature set, so that we can modify it. */
10346 *ext_set = **opt_p;
10347 *opt_p = ext_set;
10348
10349 while (str != NULL && *str != 0)
10350 {
10351 const struct aarch64_option_cpu_value_table *opt;
10352 const char *ext = NULL;
10353 int optlen;
10354
10355 if (!ext_only)
10356 {
10357 if (*str != '+')
10358 {
10359 as_bad (_("invalid architectural extension"));
10360 return 0;
10361 }
10362
10363 ext = strchr (++str, '+');
10364 }
10365
10366 if (ext != NULL)
10367 optlen = ext - str;
10368 else
10369 optlen = strlen (str);
10370
10371 if (optlen >= 2 && startswith (str, "no"))
10372 {
10373 if (adding_value != 0)
10374 adding_value = 0;
10375 optlen -= 2;
10376 str += 2;
10377 }
10378 else if (optlen > 0)
10379 {
10380 if (adding_value == -1)
10381 adding_value = 1;
10382 else if (adding_value != 1)
10383 {
10384 as_bad (_("must specify extensions to add before specifying "
10385 "those to remove"));
10386 return false;
10387 }
10388 }
10389
10390 if (optlen == 0)
10391 {
10392 as_bad (_("missing architectural extension"));
10393 return 0;
10394 }
10395
10396 gas_assert (adding_value != -1);
10397
10398 for (opt = aarch64_features; opt->name != NULL; opt++)
10399 if (strncmp (opt->name, str, optlen) == 0)
10400 {
10401 aarch64_feature_set set;
10402
10403 /* Add or remove the extension. */
10404 if (adding_value)
10405 {
10406 set = aarch64_feature_enable_set (opt->value);
10407 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10408 }
10409 else
10410 {
10411 set = aarch64_feature_disable_set (opt->value);
10412 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10413 }
10414 break;
10415 }
10416
10417 if (opt->name == NULL)
10418 {
10419 as_bad (_("unknown architectural extension `%s'"), str);
10420 return 0;
10421 }
10422
10423 str = ext;
10424 };
10425
10426 return 1;
10427 }
10428
10429 static int
10430 aarch64_parse_cpu (const char *str)
10431 {
10432 const struct aarch64_cpu_option_table *opt;
10433 const char *ext = strchr (str, '+');
10434 size_t optlen;
10435
10436 if (ext != NULL)
10437 optlen = ext - str;
10438 else
10439 optlen = strlen (str);
10440
10441 if (optlen == 0)
10442 {
10443 as_bad (_("missing cpu name `%s'"), str);
10444 return 0;
10445 }
10446
10447 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10448 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10449 {
10450 mcpu_cpu_opt = &opt->value;
10451 if (ext != NULL)
10452 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10453
10454 return 1;
10455 }
10456
10457 as_bad (_("unknown cpu `%s'"), str);
10458 return 0;
10459 }
10460
10461 static int
10462 aarch64_parse_arch (const char *str)
10463 {
10464 const struct aarch64_arch_option_table *opt;
10465 const char *ext = strchr (str, '+');
10466 size_t optlen;
10467
10468 if (ext != NULL)
10469 optlen = ext - str;
10470 else
10471 optlen = strlen (str);
10472
10473 if (optlen == 0)
10474 {
10475 as_bad (_("missing architecture name `%s'"), str);
10476 return 0;
10477 }
10478
10479 for (opt = aarch64_archs; opt->name != NULL; opt++)
10480 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10481 {
10482 march_cpu_opt = &opt->value;
10483 if (ext != NULL)
10484 return aarch64_parse_features (ext, &march_cpu_opt, false);
10485
10486 return 1;
10487 }
10488
10489 as_bad (_("unknown architecture `%s'\n"), str);
10490 return 0;
10491 }
10492
10493 /* ABIs. */
10494 struct aarch64_option_abi_value_table
10495 {
10496 const char *name;
10497 enum aarch64_abi_type value;
10498 };
10499
10500 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10501 #ifdef OBJ_ELF
10502 {"ilp32", AARCH64_ABI_ILP32},
10503 {"lp64", AARCH64_ABI_LP64},
10504 #else
10505 {"llp64", AARCH64_ABI_LLP64},
10506 #endif
10507 };
10508
10509 static int
10510 aarch64_parse_abi (const char *str)
10511 {
10512 unsigned int i;
10513
10514 if (str[0] == '\0')
10515 {
10516 as_bad (_("missing abi name `%s'"), str);
10517 return 0;
10518 }
10519
10520 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10521 if (strcmp (str, aarch64_abis[i].name) == 0)
10522 {
10523 aarch64_abi = aarch64_abis[i].value;
10524 return 1;
10525 }
10526
10527 as_bad (_("unknown abi `%s'\n"), str);
10528 return 0;
10529 }
10530
10531 static struct aarch64_long_option_table aarch64_long_opts[] = {
10532 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10533 aarch64_parse_abi, NULL},
10534 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10535 aarch64_parse_cpu, NULL},
10536 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10537 aarch64_parse_arch, NULL},
10538 {NULL, NULL, 0, NULL}
10539 };
10540
10541 int
10542 md_parse_option (int c, const char *arg)
10543 {
10544 struct aarch64_option_table *opt;
10545 struct aarch64_long_option_table *lopt;
10546
10547 switch (c)
10548 {
10549 #ifdef OPTION_EB
10550 case OPTION_EB:
10551 target_big_endian = 1;
10552 break;
10553 #endif
10554
10555 #ifdef OPTION_EL
10556 case OPTION_EL:
10557 target_big_endian = 0;
10558 break;
10559 #endif
10560
10561 case 'a':
10562 /* Listing option. Just ignore these, we don't support additional
10563 ones. */
10564 return 0;
10565
10566 default:
10567 for (opt = aarch64_opts; opt->option != NULL; opt++)
10568 {
10569 if (c == opt->option[0]
10570 && ((arg == NULL && opt->option[1] == 0)
10571 || streq (arg, opt->option + 1)))
10572 {
10573 /* If the option is deprecated, tell the user. */
10574 if (opt->deprecated != NULL)
10575 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10576 arg ? arg : "", _(opt->deprecated));
10577
10578 if (opt->var != NULL)
10579 *opt->var = opt->value;
10580
10581 return 1;
10582 }
10583 }
10584
10585 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10586 {
10587 /* These options are expected to have an argument. */
10588 if (c == lopt->option[0]
10589 && arg != NULL
10590 && startswith (arg, lopt->option + 1))
10591 {
10592 /* If the option is deprecated, tell the user. */
10593 if (lopt->deprecated != NULL)
10594 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10595 _(lopt->deprecated));
10596
10597 /* Call the sup-option parser. */
10598 return lopt->func (arg + strlen (lopt->option) - 1);
10599 }
10600 }
10601
10602 return 0;
10603 }
10604
10605 return 1;
10606 }
10607
10608 void
10609 md_show_usage (FILE * fp)
10610 {
10611 struct aarch64_option_table *opt;
10612 struct aarch64_long_option_table *lopt;
10613
10614 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10615
10616 for (opt = aarch64_opts; opt->option != NULL; opt++)
10617 if (opt->help != NULL)
10618 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10619
10620 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10621 if (lopt->help != NULL)
10622 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10623
10624 #ifdef OPTION_EB
10625 fprintf (fp, _("\
10626 -EB assemble code for a big-endian cpu\n"));
10627 #endif
10628
10629 #ifdef OPTION_EL
10630 fprintf (fp, _("\
10631 -EL assemble code for a little-endian cpu\n"));
10632 #endif
10633 }
10634
10635 /* Parse a .cpu directive. */
10636
10637 static void
10638 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10639 {
10640 const struct aarch64_cpu_option_table *opt;
10641 char saved_char;
10642 char *name;
10643 char *ext;
10644 size_t optlen;
10645
10646 name = input_line_pointer;
10647 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10648 saved_char = *input_line_pointer;
10649 *input_line_pointer = 0;
10650
10651 ext = strchr (name, '+');
10652
10653 if (ext != NULL)
10654 optlen = ext - name;
10655 else
10656 optlen = strlen (name);
10657
10658 /* Skip the first "all" entry. */
10659 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10660 if (strlen (opt->name) == optlen
10661 && strncmp (name, opt->name, optlen) == 0)
10662 {
10663 mcpu_cpu_opt = &opt->value;
10664 if (ext != NULL)
10665 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10666 return;
10667
10668 cpu_variant = *mcpu_cpu_opt;
10669
10670 *input_line_pointer = saved_char;
10671 demand_empty_rest_of_line ();
10672 return;
10673 }
10674 as_bad (_("unknown cpu `%s'"), name);
10675 *input_line_pointer = saved_char;
10676 ignore_rest_of_line ();
10677 }
10678
10679
10680 /* Parse a .arch directive. */
10681
10682 static void
10683 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10684 {
10685 const struct aarch64_arch_option_table *opt;
10686 char saved_char;
10687 char *name;
10688 char *ext;
10689 size_t optlen;
10690
10691 name = input_line_pointer;
10692 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10693 saved_char = *input_line_pointer;
10694 *input_line_pointer = 0;
10695
10696 ext = strchr (name, '+');
10697
10698 if (ext != NULL)
10699 optlen = ext - name;
10700 else
10701 optlen = strlen (name);
10702
10703 /* Skip the first "all" entry. */
10704 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10705 if (strlen (opt->name) == optlen
10706 && strncmp (name, opt->name, optlen) == 0)
10707 {
10708 mcpu_cpu_opt = &opt->value;
10709 if (ext != NULL)
10710 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10711 return;
10712
10713 cpu_variant = *mcpu_cpu_opt;
10714
10715 *input_line_pointer = saved_char;
10716 demand_empty_rest_of_line ();
10717 return;
10718 }
10719
10720 as_bad (_("unknown architecture `%s'\n"), name);
10721 *input_line_pointer = saved_char;
10722 ignore_rest_of_line ();
10723 }
10724
10725 /* Parse a .arch_extension directive. */
10726
10727 static void
10728 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10729 {
10730 char saved_char;
10731 char *ext = input_line_pointer;
10732
10733 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10734 saved_char = *input_line_pointer;
10735 *input_line_pointer = 0;
10736
10737 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10738 return;
10739
10740 cpu_variant = *mcpu_cpu_opt;
10741
10742 *input_line_pointer = saved_char;
10743 demand_empty_rest_of_line ();
10744 }
10745
10746 /* Copy symbol information. */
10747
10748 void
10749 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10750 {
10751 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10752 }
10753
10754 #ifdef OBJ_ELF
10755 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10756 This is needed so AArch64 specific st_other values can be independently
10757 specified for an IFUNC resolver (that is called by the dynamic linker)
10758 and the symbol it resolves (aliased to the resolver). In particular,
10759 if a function symbol has special st_other value set via directives,
10760 then attaching an IFUNC resolver to that symbol should not override
10761 the st_other setting. Requiring the directive on the IFUNC resolver
10762 symbol would be unexpected and problematic in C code, where the two
10763 symbols appear as two independent function declarations. */
10764
10765 void
10766 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10767 {
10768 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10769 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10770 /* If size is unset, copy size from src. Because we don't track whether
10771 .size has been used, we can't differentiate .size dest, 0 from the case
10772 where dest's size is unset. */
10773 if (!destelf->size && S_GET_SIZE (dest) == 0)
10774 {
10775 if (srcelf->size)
10776 {
10777 destelf->size = XNEW (expressionS);
10778 *destelf->size = *srcelf->size;
10779 }
10780 S_SET_SIZE (dest, S_GET_SIZE (src));
10781 }
10782 }
10783 #endif