5f5ec1b3dbcba62fa20d339126e50311f651a952
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 #define streq(a, b) (strcmp (a, b) == 0)
42
43 #define END_OF_INSN '\0'
44
45 static aarch64_feature_set cpu_variant;
46
47 /* Variables that we set while parsing command-line options. Once all
48 options have been read we re-process these values to set the real
49 assembly flags. */
50 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
51 static const aarch64_feature_set *march_cpu_opt = NULL;
52
53 /* Constants for known architecture features. */
54 static const aarch64_feature_set cpu_default = AARCH64_ARCH_FEATURES (V8A);
55
56 /* Currently active instruction sequence. */
57 static aarch64_instr_sequence *insn_sequence = NULL;
58
59 #ifdef OBJ_ELF
60 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
61 static symbolS *GOT_symbol;
62 #endif
63
64 /* Which ABI to use. */
65 enum aarch64_abi_type
66 {
67 AARCH64_ABI_NONE = 0,
68 AARCH64_ABI_LP64 = 1,
69 AARCH64_ABI_ILP32 = 2,
70 AARCH64_ABI_LLP64 = 3
71 };
72
73 unsigned int aarch64_sframe_cfa_sp_reg;
74 /* The other CFA base register for SFrame stack trace info. */
75 unsigned int aarch64_sframe_cfa_fp_reg;
76 unsigned int aarch64_sframe_cfa_ra_reg;
77
78 #ifndef DEFAULT_ARCH
79 #define DEFAULT_ARCH "aarch64"
80 #endif
81
82 #ifdef OBJ_ELF
83 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
84 static const char *default_arch = DEFAULT_ARCH;
85 #endif
86
87 /* AArch64 ABI for the output file. */
88 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
89
90 /* When non-zero, program to a 32-bit model, in which the C data types
91 int, long and all pointer types are 32-bit objects (ILP32); or to a
92 64-bit model, in which the C int type is 32-bits but the C long type
93 and all pointer types are 64-bit objects (LP64). */
94 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
95
96 /* When non zero, C types int and long are 32 bit,
97 pointers, however are 64 bit */
98 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
99
100 enum vector_el_type
101 {
102 NT_invtype = -1,
103 NT_b,
104 NT_h,
105 NT_s,
106 NT_d,
107 NT_q,
108 NT_zero,
109 NT_merge
110 };
111
112 /* Bits for DEFINED field in vector_type_el. */
113 #define NTA_HASTYPE 1
114 #define NTA_HASINDEX 2
115 #define NTA_HASVARWIDTH 4
116
117 struct vector_type_el
118 {
119 enum vector_el_type type;
120 unsigned char defined;
121 unsigned element_size;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 aarch64_operand_error parsing_error;
144 /* The condition that appears in the assembly line. */
145 int cond;
146 /* Relocation information (including the GAS internal fixup). */
147 struct reloc reloc;
148 /* Need to generate an immediate in the literal pool. */
149 unsigned gen_lit_pool : 1;
150 };
151
152 typedef struct aarch64_instruction aarch64_instruction;
153
154 static aarch64_instruction inst;
155
156 static bool parse_operands (char *, const aarch64_opcode *);
157 static bool programmer_friendly_fixup (aarch64_instruction *);
158
159 /* If an AARCH64_OPDE_SYNTAX_ERROR has no error string, its first three
160 data fields contain the following information:
161
162 data[0].i:
163 A mask of register types that would have been acceptable as bare
164 operands, outside of a register list. In addition, SEF_DEFAULT_ERROR
165 is set if a general parsing error occured for an operand (that is,
166 an error not related to registers, and having no error string).
167
168 data[1].i:
169 A mask of register types that would have been acceptable inside
170 a register list. In addition, SEF_IN_REGLIST is set if the
171 operand contained a '{' and if we got to the point of trying
172 to parse a register inside a list.
173
174 data[2].i:
175 The mask associated with the register that was actually seen, or 0
176 if none. A nonzero value describes a register inside a register
177 list if data[1].i & SEF_IN_REGLIST, otherwise it describes a bare
178 register.
179
180 The idea is that stringless errors from multiple opcode templates can
181 be ORed together to give a summary of the available alternatives. */
182 #define SEF_DEFAULT_ERROR (1U << 31)
183 #define SEF_IN_REGLIST (1U << 31)
184
185 /* Diagnostics inline function utilities.
186
187 These are lightweight utilities which should only be called by parse_operands
188 and other parsers. GAS processes each assembly line by parsing it against
189 instruction template(s), in the case of multiple templates (for the same
190 mnemonic name), those templates are tried one by one until one succeeds or
191 all fail. An assembly line may fail a few templates before being
192 successfully parsed; an error saved here in most cases is not a user error
193 but an error indicating the current template is not the right template.
194 Therefore it is very important that errors can be saved at a low cost during
195 the parsing; we don't want to slow down the whole parsing by recording
196 non-user errors in detail.
197
198 Remember that the objective is to help GAS pick up the most appropriate
199 error message in the case of multiple templates, e.g. FMOV which has 8
200 templates. */
201
202 static inline void
203 clear_error (void)
204 {
205 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
206 inst.parsing_error.kind = AARCH64_OPDE_NIL;
207 }
208
209 static inline bool
210 error_p (void)
211 {
212 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
213 }
214
215 static inline void
216 set_error (enum aarch64_operand_error_kind kind, const char *error)
217 {
218 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
219 inst.parsing_error.index = -1;
220 inst.parsing_error.kind = kind;
221 inst.parsing_error.error = error;
222 }
223
224 static inline void
225 set_recoverable_error (const char *error)
226 {
227 set_error (AARCH64_OPDE_RECOVERABLE, error);
228 }
229
230 /* Use the DESC field of the corresponding aarch64_operand entry to compose
231 the error message. */
232 static inline void
233 set_default_error (void)
234 {
235 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
236 inst.parsing_error.data[0].i = SEF_DEFAULT_ERROR;
237 }
238
239 static inline void
240 set_syntax_error (const char *error)
241 {
242 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
243 }
244
245 static inline void
246 set_first_syntax_error (const char *error)
247 {
248 if (! error_p ())
249 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
250 }
251
252 static inline void
253 set_fatal_syntax_error (const char *error)
254 {
255 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
256 }
257 \f
258 /* Return value for certain parsers when the parsing fails; those parsers
259 return the information of the parsed result, e.g. register number, on
260 success. */
261 #define PARSE_FAIL -1
262
263 /* This is an invalid condition code that means no conditional field is
264 present. */
265 #define COND_ALWAYS 0x10
266
267 typedef struct
268 {
269 const char *template;
270 uint32_t value;
271 } asm_nzcv;
272
273 struct reloc_entry
274 {
275 char *name;
276 bfd_reloc_code_real_type reloc;
277 };
278
279 /* Macros to define the register types and masks for the purpose
280 of parsing. */
281
282 #undef AARCH64_REG_TYPES
283 #define AARCH64_REG_TYPES \
284 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
285 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
286 BASIC_REG_TYPE(SP_32) /* wsp */ \
287 BASIC_REG_TYPE(SP_64) /* sp */ \
288 BASIC_REG_TYPE(ZR_32) /* wzr */ \
289 BASIC_REG_TYPE(ZR_64) /* xzr */ \
290 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
291 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
292 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
293 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
294 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
295 BASIC_REG_TYPE(V) /* v[0-31] */ \
296 BASIC_REG_TYPE(Z) /* z[0-31] */ \
297 BASIC_REG_TYPE(P) /* p[0-15] */ \
298 BASIC_REG_TYPE(PN) /* pn[0-15] */ \
299 BASIC_REG_TYPE(ZA) /* za */ \
300 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
301 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
302 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
303 BASIC_REG_TYPE(ZT0) /* zt0 */ \
304 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
305 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
306 /* Typecheck: same, plus SVE registers. */ \
307 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z)) \
309 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
310 MULTI_REG_TYPE(R_ZR, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
312 /* Typecheck: same, plus SVE registers. */ \
313 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) \
315 | REG_TYPE(Z)) \
316 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
317 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
319 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
320 MULTI_REG_TYPE(R_ZR_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
323 /* Typecheck: any [BHSDQ]P FP. */ \
324 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
325 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
326 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
327 MULTI_REG_TYPE(R_ZR_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
329 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
330 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
331 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
332 be used for SVE instructions, since Zn and Pn are valid symbols \
333 in other contexts. */ \
334 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
335 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
336 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
337 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
338 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
339 | REG_TYPE(Z) | REG_TYPE(P)) \
340 /* Likewise, but with predicate-as-counter registers added. */ \
341 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP_PN, REG_TYPE(R_32) | REG_TYPE(R_64) \
342 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
343 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
344 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
345 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
346 | REG_TYPE(Z) | REG_TYPE(P) | REG_TYPE(PN)) \
347 /* Any integer register; used for error messages only. */ \
348 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
349 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
350 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
351 /* Any vector register. */ \
352 MULTI_REG_TYPE(VZ, REG_TYPE(V) | REG_TYPE(Z)) \
353 /* An SVE vector or predicate register. */ \
354 MULTI_REG_TYPE(ZP, REG_TYPE(Z) | REG_TYPE(P)) \
355 /* Any vector or predicate register. */ \
356 MULTI_REG_TYPE(VZP, REG_TYPE(V) | REG_TYPE(Z) | REG_TYPE(P)) \
357 /* The whole of ZA or a single tile. */ \
358 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
359 /* A horizontal or vertical slice of a ZA tile. */ \
360 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
361 /* Pseudo type to mark the end of the enumerator sequence. */ \
362 END_REG_TYPE(MAX)
363
364 #undef BASIC_REG_TYPE
365 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
366 #undef MULTI_REG_TYPE
367 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
368 #undef END_REG_TYPE
369 #define END_REG_TYPE(T) BASIC_REG_TYPE(T)
370
371 /* Register type enumerators. */
372 typedef enum aarch64_reg_type_
373 {
374 /* A list of REG_TYPE_*. */
375 AARCH64_REG_TYPES
376 } aarch64_reg_type;
377
378 #undef BASIC_REG_TYPE
379 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
380 #undef REG_TYPE
381 #define REG_TYPE(T) (1 << REG_TYPE_##T)
382 #undef MULTI_REG_TYPE
383 #define MULTI_REG_TYPE(T,V) V,
384 #undef END_REG_TYPE
385 #define END_REG_TYPE(T) 0
386
387 /* Structure for a hash table entry for a register. */
388 typedef struct
389 {
390 const char *name;
391 unsigned char number;
392 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
393 unsigned char builtin;
394 } reg_entry;
395
396 /* Values indexed by aarch64_reg_type to assist the type checking. */
397 static const unsigned reg_type_masks[] =
398 {
399 AARCH64_REG_TYPES
400 };
401
402 #undef BASIC_REG_TYPE
403 #undef REG_TYPE
404 #undef MULTI_REG_TYPE
405 #undef END_REG_TYPE
406 #undef AARCH64_REG_TYPES
407
408 /* We expected one of the registers in MASK to be specified. If a register
409 of some kind was specified, SEEN is a mask that contains that register,
410 otherwise it is zero.
411
412 If it is possible to provide a relatively pithy message that describes
413 the error exactly, return a string that does so, reporting the error
414 against "operand %d". Return null otherwise.
415
416 From a QoI perspective, any REG_TYPE_* that is passed as the first
417 argument to set_expected_reg_error should generally have its own message.
418 Providing messages for combinations of such REG_TYPE_*s can be useful if
419 it is possible to summarize the combination in a relatively natural way.
420 On the other hand, it seems better to avoid long lists of unrelated
421 things. */
422
423 static const char *
424 get_reg_expected_msg (unsigned int mask, unsigned int seen)
425 {
426 /* First handle messages that use SEEN. */
427 if ((mask & reg_type_masks[REG_TYPE_ZAT])
428 && (seen & reg_type_masks[REG_TYPE_ZATHV]))
429 return N_("expected an unsuffixed ZA tile at operand %d");
430
431 if ((mask & reg_type_masks[REG_TYPE_ZATHV])
432 && (seen & reg_type_masks[REG_TYPE_ZAT]))
433 return N_("missing horizontal or vertical suffix at operand %d");
434
435 if ((mask & reg_type_masks[REG_TYPE_ZA])
436 && (seen & (reg_type_masks[REG_TYPE_ZAT]
437 | reg_type_masks[REG_TYPE_ZATHV])))
438 return N_("expected 'za' rather than a ZA tile at operand %d");
439
440 if ((mask & reg_type_masks[REG_TYPE_PN])
441 && (seen & reg_type_masks[REG_TYPE_P]))
442 return N_("expected a predicate-as-counter rather than predicate-as-mask"
443 " register at operand %d");
444
445 if ((mask & reg_type_masks[REG_TYPE_P])
446 && (seen & reg_type_masks[REG_TYPE_PN]))
447 return N_("expected a predicate-as-mask rather than predicate-as-counter"
448 " register at operand %d");
449
450 /* Integer, zero and stack registers. */
451 if (mask == reg_type_masks[REG_TYPE_R_64])
452 return N_("expected a 64-bit integer register at operand %d");
453 if (mask == reg_type_masks[REG_TYPE_R_ZR])
454 return N_("expected an integer or zero register at operand %d");
455 if (mask == reg_type_masks[REG_TYPE_R_SP])
456 return N_("expected an integer or stack pointer register at operand %d");
457
458 /* Floating-point and SIMD registers. */
459 if (mask == reg_type_masks[REG_TYPE_BHSDQ])
460 return N_("expected a scalar SIMD or floating-point register"
461 " at operand %d");
462 if (mask == reg_type_masks[REG_TYPE_V])
463 return N_("expected an Advanced SIMD vector register at operand %d");
464 if (mask == reg_type_masks[REG_TYPE_Z])
465 return N_("expected an SVE vector register at operand %d");
466 if (mask == reg_type_masks[REG_TYPE_P]
467 || mask == (reg_type_masks[REG_TYPE_P] | reg_type_masks[REG_TYPE_PN]))
468 /* Use this error for "predicate-as-mask only" and "either kind of
469 predicate". We report a more specific error if P is used where
470 PN is expected, and vice versa, so the issue at this point is
471 "predicate-like" vs. "not predicate-like". */
472 return N_("expected an SVE predicate register at operand %d");
473 if (mask == reg_type_masks[REG_TYPE_PN])
474 return N_("expected an SVE predicate-as-counter register at operand %d");
475 if (mask == reg_type_masks[REG_TYPE_VZ])
476 return N_("expected a vector register at operand %d");
477 if (mask == reg_type_masks[REG_TYPE_ZP])
478 return N_("expected an SVE vector or predicate register at operand %d");
479 if (mask == reg_type_masks[REG_TYPE_VZP])
480 return N_("expected a vector or predicate register at operand %d");
481
482 /* SME-related registers. */
483 if (mask == reg_type_masks[REG_TYPE_ZA])
484 return N_("expected a ZA array vector at operand %d");
485 if (mask == (reg_type_masks[REG_TYPE_ZA_ZAT] | reg_type_masks[REG_TYPE_ZT0]))
486 return N_("expected ZT0 or a ZA mask at operand %d");
487 if (mask == reg_type_masks[REG_TYPE_ZAT])
488 return N_("expected a ZA tile at operand %d");
489 if (mask == reg_type_masks[REG_TYPE_ZATHV])
490 return N_("expected a ZA tile slice at operand %d");
491
492 /* Integer and vector combos. */
493 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_V]))
494 return N_("expected an integer register or Advanced SIMD vector register"
495 " at operand %d");
496 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_Z]))
497 return N_("expected an integer register or SVE vector register"
498 " at operand %d");
499 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZ]))
500 return N_("expected an integer or vector register at operand %d");
501 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_P]))
502 return N_("expected an integer or predicate register at operand %d");
503 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZP]))
504 return N_("expected an integer, vector or predicate register"
505 " at operand %d");
506
507 /* SVE and SME combos. */
508 if (mask == (reg_type_masks[REG_TYPE_Z] | reg_type_masks[REG_TYPE_ZATHV]))
509 return N_("expected an SVE vector register or ZA tile slice"
510 " at operand %d");
511
512 return NULL;
513 }
514
515 /* Record that we expected a register of type TYPE but didn't see one.
516 REG is the register that we actually saw, or null if we didn't see a
517 recognized register. FLAGS is SEF_IN_REGLIST if we are parsing the
518 contents of a register list, otherwise it is zero. */
519
520 static inline void
521 set_expected_reg_error (aarch64_reg_type type, const reg_entry *reg,
522 unsigned int flags)
523 {
524 assert (flags == 0 || flags == SEF_IN_REGLIST);
525 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
526 if (flags & SEF_IN_REGLIST)
527 inst.parsing_error.data[1].i = reg_type_masks[type] | flags;
528 else
529 inst.parsing_error.data[0].i = reg_type_masks[type];
530 if (reg)
531 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
532 }
533
534 /* Record that we expected a register list containing registers of type TYPE,
535 but didn't see the opening '{'. If we saw a register instead, REG is the
536 register that we saw, otherwise it is null. */
537
538 static inline void
539 set_expected_reglist_error (aarch64_reg_type type, const reg_entry *reg)
540 {
541 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
542 inst.parsing_error.data[1].i = reg_type_masks[type];
543 if (reg)
544 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
545 }
546
547 /* Some well known registers that we refer to directly elsewhere. */
548 #define REG_SP 31
549 #define REG_ZR 31
550
551 /* Instructions take 4 bytes in the object file. */
552 #define INSN_SIZE 4
553
554 static htab_t aarch64_ops_hsh;
555 static htab_t aarch64_cond_hsh;
556 static htab_t aarch64_shift_hsh;
557 static htab_t aarch64_sys_regs_hsh;
558 static htab_t aarch64_pstatefield_hsh;
559 static htab_t aarch64_sys_regs_ic_hsh;
560 static htab_t aarch64_sys_regs_dc_hsh;
561 static htab_t aarch64_sys_regs_at_hsh;
562 static htab_t aarch64_sys_regs_tlbi_hsh;
563 static htab_t aarch64_sys_regs_sr_hsh;
564 static htab_t aarch64_reg_hsh;
565 static htab_t aarch64_barrier_opt_hsh;
566 static htab_t aarch64_nzcv_hsh;
567 static htab_t aarch64_pldop_hsh;
568 static htab_t aarch64_hint_opt_hsh;
569
570 /* Stuff needed to resolve the label ambiguity
571 As:
572 ...
573 label: <insn>
574 may differ from:
575 ...
576 label:
577 <insn> */
578
579 static symbolS *last_label_seen;
580
581 /* Literal pool structure. Held on a per-section
582 and per-sub-section basis. */
583
584 #define MAX_LITERAL_POOL_SIZE 1024
585 typedef struct literal_expression
586 {
587 expressionS exp;
588 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
589 LITTLENUM_TYPE * bignum;
590 } literal_expression;
591
592 typedef struct literal_pool
593 {
594 literal_expression literals[MAX_LITERAL_POOL_SIZE];
595 unsigned int next_free_entry;
596 unsigned int id;
597 symbolS *symbol;
598 segT section;
599 subsegT sub_section;
600 int size;
601 struct literal_pool *next;
602 } literal_pool;
603
604 /* Pointer to a linked list of literal pools. */
605 static literal_pool *list_of_pools = NULL;
606 \f
607 /* Pure syntax. */
608
609 /* This array holds the chars that always start a comment. If the
610 pre-processor is disabled, these aren't very useful. */
611 const char comment_chars[] = "";
612
613 /* This array holds the chars that only start a comment at the beginning of
614 a line. If the line seems to have the form '# 123 filename'
615 .line and .file directives will appear in the pre-processed output. */
616 /* Note that input_file.c hand checks for '#' at the beginning of the
617 first line of the input file. This is because the compiler outputs
618 #NO_APP at the beginning of its output. */
619 /* Also note that comments like this one will always work. */
620 const char line_comment_chars[] = "#";
621
622 const char line_separator_chars[] = ";";
623
624 /* Chars that can be used to separate mant
625 from exp in floating point numbers. */
626 const char EXP_CHARS[] = "eE";
627
628 /* Chars that mean this number is a floating point constant. */
629 /* As in 0f12.456 */
630 /* or 0d1.2345e12 */
631
632 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
633
634 /* Prefix character that indicates the start of an immediate value. */
635 #define is_immediate_prefix(C) ((C) == '#')
636
637 /* Separator character handling. */
638
639 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
640
641 static inline bool
642 skip_past_char (char **str, char c)
643 {
644 if (**str == c)
645 {
646 (*str)++;
647 return true;
648 }
649 else
650 return false;
651 }
652
653 #define skip_past_comma(str) skip_past_char (str, ',')
654
655 /* Arithmetic expressions (possibly involving symbols). */
656
657 static bool in_aarch64_get_expression = false;
658
659 /* Third argument to aarch64_get_expression. */
660 #define GE_NO_PREFIX false
661 #define GE_OPT_PREFIX true
662
663 /* Fourth argument to aarch64_get_expression. */
664 #define ALLOW_ABSENT false
665 #define REJECT_ABSENT true
666
667 /* Return TRUE if the string pointed by *STR is successfully parsed
668 as an valid expression; *EP will be filled with the information of
669 such an expression. Otherwise return FALSE.
670
671 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
672 If REJECT_ABSENT is true then trat missing expressions as an error. */
673
674 static bool
675 aarch64_get_expression (expressionS * ep,
676 char ** str,
677 bool allow_immediate_prefix,
678 bool reject_absent)
679 {
680 char *save_in;
681 segT seg;
682 bool prefix_present = false;
683
684 if (allow_immediate_prefix)
685 {
686 if (is_immediate_prefix (**str))
687 {
688 (*str)++;
689 prefix_present = true;
690 }
691 }
692
693 memset (ep, 0, sizeof (expressionS));
694
695 save_in = input_line_pointer;
696 input_line_pointer = *str;
697 in_aarch64_get_expression = true;
698 seg = expression (ep);
699 in_aarch64_get_expression = false;
700
701 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
702 {
703 /* We found a bad expression in md_operand(). */
704 *str = input_line_pointer;
705 input_line_pointer = save_in;
706 if (prefix_present && ! error_p ())
707 set_fatal_syntax_error (_("bad expression"));
708 else
709 set_first_syntax_error (_("bad expression"));
710 return false;
711 }
712
713 #ifdef OBJ_AOUT
714 if (seg != absolute_section
715 && seg != text_section
716 && seg != data_section
717 && seg != bss_section
718 && seg != undefined_section)
719 {
720 set_syntax_error (_("bad segment"));
721 *str = input_line_pointer;
722 input_line_pointer = save_in;
723 return false;
724 }
725 #else
726 (void) seg;
727 #endif
728
729 *str = input_line_pointer;
730 input_line_pointer = save_in;
731 return true;
732 }
733
734 /* Turn a string in input_line_pointer into a floating point constant
735 of type TYPE, and store the appropriate bytes in *LITP. The number
736 of LITTLENUMS emitted is stored in *SIZEP. An error message is
737 returned, or NULL on OK. */
738
739 const char *
740 md_atof (int type, char *litP, int *sizeP)
741 {
742 return ieee_md_atof (type, litP, sizeP, target_big_endian);
743 }
744
745 /* We handle all bad expressions here, so that we can report the faulty
746 instruction in the error message. */
747 void
748 md_operand (expressionS * exp)
749 {
750 if (in_aarch64_get_expression)
751 exp->X_op = O_illegal;
752 }
753
754 /* Immediate values. */
755
756 /* Errors may be set multiple times during parsing or bit encoding
757 (particularly in the Neon bits), but usually the earliest error which is set
758 will be the most meaningful. Avoid overwriting it with later (cascading)
759 errors by calling this function. */
760
761 static void
762 first_error (const char *error)
763 {
764 if (! error_p ())
765 set_syntax_error (error);
766 }
767
768 /* Similar to first_error, but this function accepts formatted error
769 message. */
770 static void
771 first_error_fmt (const char *format, ...)
772 {
773 va_list args;
774 enum
775 { size = 100 };
776 /* N.B. this single buffer will not cause error messages for different
777 instructions to pollute each other; this is because at the end of
778 processing of each assembly line, error message if any will be
779 collected by as_bad. */
780 static char buffer[size];
781
782 if (! error_p ())
783 {
784 int ret ATTRIBUTE_UNUSED;
785 va_start (args, format);
786 ret = vsnprintf (buffer, size, format, args);
787 know (ret <= size - 1 && ret >= 0);
788 va_end (args);
789 set_syntax_error (buffer);
790 }
791 }
792
793 /* Internal helper routine converting a vector_type_el structure *VECTYPE
794 to a corresponding operand qualifier. */
795
796 static inline aarch64_opnd_qualifier_t
797 vectype_to_qualifier (const struct vector_type_el *vectype)
798 {
799 /* Element size in bytes indexed by vector_el_type. */
800 const unsigned char ele_size[5]
801 = {1, 2, 4, 8, 16};
802 const unsigned int ele_base [5] =
803 {
804 AARCH64_OPND_QLF_V_4B,
805 AARCH64_OPND_QLF_V_2H,
806 AARCH64_OPND_QLF_V_2S,
807 AARCH64_OPND_QLF_V_1D,
808 AARCH64_OPND_QLF_V_1Q
809 };
810
811 if (!vectype->defined || vectype->type == NT_invtype)
812 goto vectype_conversion_fail;
813
814 if (vectype->type == NT_zero)
815 return AARCH64_OPND_QLF_P_Z;
816 if (vectype->type == NT_merge)
817 return AARCH64_OPND_QLF_P_M;
818
819 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
820
821 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
822 {
823 /* Special case S_4B. */
824 if (vectype->type == NT_b && vectype->width == 4)
825 return AARCH64_OPND_QLF_S_4B;
826
827 /* Special case S_2H. */
828 if (vectype->type == NT_h && vectype->width == 2)
829 return AARCH64_OPND_QLF_S_2H;
830
831 /* Vector element register. */
832 return AARCH64_OPND_QLF_S_B + vectype->type;
833 }
834 else
835 {
836 /* Vector register. */
837 int reg_size = ele_size[vectype->type] * vectype->width;
838 unsigned offset;
839 unsigned shift;
840 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
841 goto vectype_conversion_fail;
842
843 /* The conversion is by calculating the offset from the base operand
844 qualifier for the vector type. The operand qualifiers are regular
845 enough that the offset can established by shifting the vector width by
846 a vector-type dependent amount. */
847 shift = 0;
848 if (vectype->type == NT_b)
849 shift = 3;
850 else if (vectype->type == NT_h || vectype->type == NT_s)
851 shift = 2;
852 else if (vectype->type >= NT_d)
853 shift = 1;
854 else
855 gas_assert (0);
856
857 offset = ele_base [vectype->type] + (vectype->width >> shift);
858 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
859 && offset <= AARCH64_OPND_QLF_V_1Q);
860 return offset;
861 }
862
863 vectype_conversion_fail:
864 first_error (_("bad vector arrangement type"));
865 return AARCH64_OPND_QLF_NIL;
866 }
867
868 /* Register parsing. */
869
870 /* Generic register parser which is called by other specialized
871 register parsers.
872 CCP points to what should be the beginning of a register name.
873 If it is indeed a valid register name, advance CCP over it and
874 return the reg_entry structure; otherwise return NULL.
875 It does not issue diagnostics. */
876
877 static reg_entry *
878 parse_reg (char **ccp)
879 {
880 char *start = *ccp;
881 char *p;
882 reg_entry *reg;
883
884 #ifdef REGISTER_PREFIX
885 if (*start != REGISTER_PREFIX)
886 return NULL;
887 start++;
888 #endif
889
890 p = start;
891 if (!ISALPHA (*p) || !is_name_beginner (*p))
892 return NULL;
893
894 do
895 p++;
896 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
897
898 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
899
900 if (!reg)
901 return NULL;
902
903 *ccp = p;
904 return reg;
905 }
906
907 /* Return the operand qualifier associated with all uses of REG, or
908 AARCH64_OPND_QLF_NIL if none. AARCH64_OPND_QLF_NIL means either
909 that qualifiers don't apply to REG or that qualifiers are added
910 using suffixes. */
911
912 static aarch64_opnd_qualifier_t
913 inherent_reg_qualifier (const reg_entry *reg)
914 {
915 switch (reg->type)
916 {
917 case REG_TYPE_R_32:
918 case REG_TYPE_SP_32:
919 case REG_TYPE_ZR_32:
920 return AARCH64_OPND_QLF_W;
921
922 case REG_TYPE_R_64:
923 case REG_TYPE_SP_64:
924 case REG_TYPE_ZR_64:
925 return AARCH64_OPND_QLF_X;
926
927 case REG_TYPE_FP_B:
928 case REG_TYPE_FP_H:
929 case REG_TYPE_FP_S:
930 case REG_TYPE_FP_D:
931 case REG_TYPE_FP_Q:
932 return AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
933
934 default:
935 return AARCH64_OPND_QLF_NIL;
936 }
937 }
938
939 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
940 return FALSE. */
941 static bool
942 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
943 {
944 return (reg_type_masks[type] & (1 << reg->type)) != 0;
945 }
946
947 /* Try to parse a base or offset register. Allow SVE base and offset
948 registers if REG_TYPE includes SVE registers. Return the register
949 entry on success, setting *QUALIFIER to the register qualifier.
950 Return null otherwise.
951
952 Note that this function does not issue any diagnostics. */
953
954 static const reg_entry *
955 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
956 aarch64_opnd_qualifier_t *qualifier)
957 {
958 char *str = *ccp;
959 const reg_entry *reg = parse_reg (&str);
960
961 if (reg == NULL)
962 return NULL;
963
964 switch (reg->type)
965 {
966 case REG_TYPE_Z:
967 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_Z)) == 0
968 || str[0] != '.')
969 return NULL;
970 switch (TOLOWER (str[1]))
971 {
972 case 's':
973 *qualifier = AARCH64_OPND_QLF_S_S;
974 break;
975 case 'd':
976 *qualifier = AARCH64_OPND_QLF_S_D;
977 break;
978 default:
979 return NULL;
980 }
981 str += 2;
982 break;
983
984 default:
985 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR_SP))
986 return NULL;
987 *qualifier = inherent_reg_qualifier (reg);
988 break;
989 }
990
991 *ccp = str;
992
993 return reg;
994 }
995
996 /* Try to parse a base or offset register. Return the register entry
997 on success, setting *QUALIFIER to the register qualifier. Return null
998 otherwise.
999
1000 Note that this function does not issue any diagnostics. */
1001
1002 static const reg_entry *
1003 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
1004 {
1005 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_ZR_SP, qualifier);
1006 }
1007
1008 /* Parse the qualifier of a vector register or vector element of type
1009 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
1010 succeeds; otherwise return FALSE.
1011
1012 Accept only one occurrence of:
1013 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
1014 b h s d q */
1015 static bool
1016 parse_vector_type_for_operand (aarch64_reg_type reg_type,
1017 struct vector_type_el *parsed_type, char **str)
1018 {
1019 char *ptr = *str;
1020 unsigned width;
1021 unsigned element_size;
1022 enum vector_el_type type;
1023
1024 /* skip '.' */
1025 gas_assert (*ptr == '.');
1026 ptr++;
1027
1028 if (reg_type != REG_TYPE_V || !ISDIGIT (*ptr))
1029 {
1030 width = 0;
1031 goto elt_size;
1032 }
1033 width = strtoul (ptr, &ptr, 10);
1034 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
1035 {
1036 first_error_fmt (_("bad size %d in vector width specifier"), width);
1037 return false;
1038 }
1039
1040 elt_size:
1041 switch (TOLOWER (*ptr))
1042 {
1043 case 'b':
1044 type = NT_b;
1045 element_size = 8;
1046 break;
1047 case 'h':
1048 type = NT_h;
1049 element_size = 16;
1050 break;
1051 case 's':
1052 type = NT_s;
1053 element_size = 32;
1054 break;
1055 case 'd':
1056 type = NT_d;
1057 element_size = 64;
1058 break;
1059 case 'q':
1060 if (reg_type != REG_TYPE_V || width == 1)
1061 {
1062 type = NT_q;
1063 element_size = 128;
1064 break;
1065 }
1066 /* fall through. */
1067 default:
1068 if (*ptr != '\0')
1069 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
1070 else
1071 first_error (_("missing element size"));
1072 return false;
1073 }
1074 if (width != 0 && width * element_size != 64
1075 && width * element_size != 128
1076 && !(width == 2 && element_size == 16)
1077 && !(width == 4 && element_size == 8))
1078 {
1079 first_error_fmt (_
1080 ("invalid element size %d and vector size combination %c"),
1081 width, *ptr);
1082 return false;
1083 }
1084 ptr++;
1085
1086 parsed_type->type = type;
1087 parsed_type->width = width;
1088 parsed_type->element_size = element_size;
1089
1090 *str = ptr;
1091
1092 return true;
1093 }
1094
1095 /* *STR contains an SVE zero/merge predication suffix. Parse it into
1096 *PARSED_TYPE and point *STR at the end of the suffix. */
1097
1098 static bool
1099 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
1100 {
1101 char *ptr = *str;
1102
1103 /* Skip '/'. */
1104 gas_assert (*ptr == '/');
1105 ptr++;
1106 switch (TOLOWER (*ptr))
1107 {
1108 case 'z':
1109 parsed_type->type = NT_zero;
1110 break;
1111 case 'm':
1112 parsed_type->type = NT_merge;
1113 break;
1114 default:
1115 if (*ptr != '\0' && *ptr != ',')
1116 first_error_fmt (_("unexpected character `%c' in predication type"),
1117 *ptr);
1118 else
1119 first_error (_("missing predication type"));
1120 return false;
1121 }
1122 parsed_type->width = 0;
1123 *str = ptr + 1;
1124 return true;
1125 }
1126
1127 /* Return true if CH is a valid suffix character for registers of
1128 type TYPE. */
1129
1130 static bool
1131 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1132 {
1133 switch (type)
1134 {
1135 case REG_TYPE_V:
1136 case REG_TYPE_Z:
1137 case REG_TYPE_ZA:
1138 case REG_TYPE_ZAT:
1139 case REG_TYPE_ZATH:
1140 case REG_TYPE_ZATV:
1141 return ch == '.';
1142
1143 case REG_TYPE_P:
1144 case REG_TYPE_PN:
1145 return ch == '.' || ch == '/';
1146
1147 default:
1148 return false;
1149 }
1150 }
1151
1152 /* Parse an index expression at *STR, storing it in *IMM on success. */
1153
1154 static bool
1155 parse_index_expression (char **str, int64_t *imm)
1156 {
1157 expressionS exp;
1158
1159 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1160 if (exp.X_op != O_constant)
1161 {
1162 first_error (_("constant expression required"));
1163 return false;
1164 }
1165 *imm = exp.X_add_number;
1166 return true;
1167 }
1168
1169 /* Parse a register of the type TYPE.
1170
1171 Return null if the string pointed to by *CCP is not a valid register
1172 name or the parsed register is not of TYPE.
1173
1174 Otherwise return the register, and optionally return the register
1175 shape and element index information in *TYPEINFO.
1176
1177 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1178
1179 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1180 register index.
1181
1182 FLAGS includes PTR_GOOD_MATCH if we are sufficiently far into parsing
1183 an operand that we can be confident that it is a good match. */
1184
1185 #define PTR_IN_REGLIST (1U << 0)
1186 #define PTR_FULL_REG (1U << 1)
1187 #define PTR_GOOD_MATCH (1U << 2)
1188
1189 static const reg_entry *
1190 parse_typed_reg (char **ccp, aarch64_reg_type type,
1191 struct vector_type_el *typeinfo, unsigned int flags)
1192 {
1193 char *str = *ccp;
1194 bool isalpha = ISALPHA (*str);
1195 const reg_entry *reg = parse_reg (&str);
1196 struct vector_type_el atype;
1197 struct vector_type_el parsetype;
1198 bool is_typed_vecreg = false;
1199 unsigned int err_flags = (flags & PTR_IN_REGLIST) ? SEF_IN_REGLIST : 0;
1200
1201 atype.defined = 0;
1202 atype.type = NT_invtype;
1203 atype.width = -1;
1204 atype.element_size = 0;
1205 atype.index = 0;
1206
1207 if (reg == NULL)
1208 {
1209 if (typeinfo)
1210 *typeinfo = atype;
1211 if (!isalpha && (flags & PTR_IN_REGLIST))
1212 set_fatal_syntax_error (_("syntax error in register list"));
1213 else if (flags & PTR_GOOD_MATCH)
1214 set_fatal_syntax_error (NULL);
1215 else
1216 set_expected_reg_error (type, reg, err_flags);
1217 return NULL;
1218 }
1219
1220 if (! aarch64_check_reg_type (reg, type))
1221 {
1222 DEBUG_TRACE ("reg type check failed");
1223 if (flags & PTR_GOOD_MATCH)
1224 set_fatal_syntax_error (NULL);
1225 else
1226 set_expected_reg_error (type, reg, err_flags);
1227 return NULL;
1228 }
1229 type = reg->type;
1230
1231 if (aarch64_valid_suffix_char_p (reg->type, *str))
1232 {
1233 if (*str == '.')
1234 {
1235 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1236 return NULL;
1237 if ((reg->type == REG_TYPE_ZAT
1238 || reg->type == REG_TYPE_ZATH
1239 || reg->type == REG_TYPE_ZATV)
1240 && reg->number * 8 >= parsetype.element_size)
1241 {
1242 set_syntax_error (_("ZA tile number out of range"));
1243 return NULL;
1244 }
1245 }
1246 else
1247 {
1248 if (!parse_predication_for_operand (&parsetype, &str))
1249 return NULL;
1250 }
1251
1252 /* Register if of the form Vn.[bhsdq]. */
1253 is_typed_vecreg = true;
1254
1255 if (type != REG_TYPE_V)
1256 {
1257 /* The width is always variable; we don't allow an integer width
1258 to be specified. */
1259 gas_assert (parsetype.width == 0);
1260 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1261 }
1262 else if (parsetype.width == 0)
1263 /* Expect index. In the new scheme we cannot have
1264 Vn.[bhsdq] represent a scalar. Therefore any
1265 Vn.[bhsdq] should have an index following it.
1266 Except in reglists of course. */
1267 atype.defined |= NTA_HASINDEX;
1268 else
1269 atype.defined |= NTA_HASTYPE;
1270
1271 atype.type = parsetype.type;
1272 atype.width = parsetype.width;
1273 }
1274
1275 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1276 {
1277 /* Reject Sn[index] syntax. */
1278 if (reg->type != REG_TYPE_Z
1279 && reg->type != REG_TYPE_PN
1280 && reg->type != REG_TYPE_ZT0
1281 && !is_typed_vecreg)
1282 {
1283 first_error (_("this type of register can't be indexed"));
1284 return NULL;
1285 }
1286
1287 if (flags & PTR_IN_REGLIST)
1288 {
1289 first_error (_("index not allowed inside register list"));
1290 return NULL;
1291 }
1292
1293 atype.defined |= NTA_HASINDEX;
1294
1295 if (!parse_index_expression (&str, &atype.index))
1296 return NULL;
1297
1298 if (! skip_past_char (&str, ']'))
1299 return NULL;
1300 }
1301 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1302 {
1303 /* Indexed vector register expected. */
1304 first_error (_("indexed vector register expected"));
1305 return NULL;
1306 }
1307
1308 /* A vector reg Vn should be typed or indexed. */
1309 if (type == REG_TYPE_V && atype.defined == 0)
1310 {
1311 first_error (_("invalid use of vector register"));
1312 }
1313
1314 if (typeinfo)
1315 *typeinfo = atype;
1316
1317 *ccp = str;
1318
1319 return reg;
1320 }
1321
1322 /* Parse register.
1323
1324 Return the register on success; return null otherwise.
1325
1326 If this is a NEON vector register with additional type information, fill
1327 in the struct pointed to by VECTYPE (if non-NULL).
1328
1329 This parser does not handle register lists. */
1330
1331 static const reg_entry *
1332 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1333 struct vector_type_el *vectype)
1334 {
1335 return parse_typed_reg (ccp, type, vectype, 0);
1336 }
1337
1338 static inline bool
1339 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1340 {
1341 return (e1.type == e2.type
1342 && e1.defined == e2.defined
1343 && e1.width == e2.width
1344 && e1.element_size == e2.element_size
1345 && e1.index == e2.index);
1346 }
1347
1348 /* Return the register number mask for registers of type REG_TYPE. */
1349
1350 static inline int
1351 reg_type_mask (aarch64_reg_type reg_type)
1352 {
1353 return reg_type == REG_TYPE_P ? 15 : 31;
1354 }
1355
1356 /* This function parses a list of vector registers of type TYPE.
1357 On success, it returns the parsed register list information in the
1358 following encoded format:
1359
1360 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1361 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1362
1363 The information of the register shape and/or index is returned in
1364 *VECTYPE.
1365
1366 It returns PARSE_FAIL if the register list is invalid.
1367
1368 The list contains one to four registers.
1369 Each register can be one of:
1370 <Vt>.<T>[<index>]
1371 <Vt>.<T>
1372 All <T> should be identical.
1373 All <index> should be identical.
1374 There are restrictions on <Vt> numbers which are checked later
1375 (by reg_list_valid_p). */
1376
1377 static int
1378 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1379 struct vector_type_el *vectype)
1380 {
1381 char *str = *ccp;
1382 int nb_regs;
1383 struct vector_type_el typeinfo, typeinfo_first;
1384 uint32_t val, val_range, mask;
1385 int in_range;
1386 int ret_val;
1387 bool error = false;
1388 bool expect_index = false;
1389 unsigned int ptr_flags = PTR_IN_REGLIST;
1390
1391 if (*str != '{')
1392 {
1393 set_expected_reglist_error (type, parse_reg (&str));
1394 return PARSE_FAIL;
1395 }
1396 str++;
1397
1398 nb_regs = 0;
1399 typeinfo_first.defined = 0;
1400 typeinfo_first.type = NT_invtype;
1401 typeinfo_first.width = -1;
1402 typeinfo_first.element_size = 0;
1403 typeinfo_first.index = 0;
1404 ret_val = 0;
1405 val = -1u;
1406 val_range = -1u;
1407 in_range = 0;
1408 mask = reg_type_mask (type);
1409 do
1410 {
1411 if (in_range)
1412 {
1413 str++; /* skip over '-' */
1414 val_range = val;
1415 }
1416 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1417 ptr_flags);
1418 if (!reg)
1419 {
1420 set_first_syntax_error (_("invalid vector register in list"));
1421 error = true;
1422 continue;
1423 }
1424 val = reg->number;
1425 /* reject [bhsd]n */
1426 if (type == REG_TYPE_V && typeinfo.defined == 0)
1427 {
1428 set_first_syntax_error (_("invalid scalar register in list"));
1429 error = true;
1430 continue;
1431 }
1432
1433 if (typeinfo.defined & NTA_HASINDEX)
1434 expect_index = true;
1435
1436 if (in_range)
1437 {
1438 if (val == val_range)
1439 {
1440 set_first_syntax_error
1441 (_("invalid range in vector register list"));
1442 error = true;
1443 }
1444 val_range = (val_range + 1) & mask;
1445 }
1446 else
1447 {
1448 val_range = val;
1449 if (nb_regs == 0)
1450 typeinfo_first = typeinfo;
1451 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1452 {
1453 set_first_syntax_error
1454 (_("type mismatch in vector register list"));
1455 error = true;
1456 }
1457 }
1458 if (! error)
1459 for (;;)
1460 {
1461 ret_val |= val_range << ((5 * nb_regs) & 31);
1462 nb_regs++;
1463 if (val_range == val)
1464 break;
1465 val_range = (val_range + 1) & mask;
1466 }
1467 in_range = 0;
1468 ptr_flags |= PTR_GOOD_MATCH;
1469 }
1470 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1471
1472 skip_whitespace (str);
1473 if (*str != '}')
1474 {
1475 set_first_syntax_error (_("end of vector register list not found"));
1476 error = true;
1477 }
1478 str++;
1479
1480 skip_whitespace (str);
1481
1482 if (expect_index)
1483 {
1484 if (skip_past_char (&str, '['))
1485 {
1486 if (!parse_index_expression (&str, &typeinfo_first.index))
1487 error = true;
1488 if (! skip_past_char (&str, ']'))
1489 error = true;
1490 }
1491 else
1492 {
1493 set_first_syntax_error (_("expected index"));
1494 error = true;
1495 }
1496 }
1497
1498 if (nb_regs > 4)
1499 {
1500 set_first_syntax_error (_("too many registers in vector register list"));
1501 error = true;
1502 }
1503 else if (nb_regs == 0)
1504 {
1505 set_first_syntax_error (_("empty vector register list"));
1506 error = true;
1507 }
1508
1509 *ccp = str;
1510 if (! error)
1511 *vectype = typeinfo_first;
1512
1513 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1514 }
1515
1516 /* Directives: register aliases. */
1517
1518 static reg_entry *
1519 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1520 {
1521 reg_entry *new;
1522 const char *name;
1523
1524 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1525 {
1526 if (new->builtin)
1527 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1528 str);
1529
1530 /* Only warn about a redefinition if it's not defined as the
1531 same register. */
1532 else if (new->number != number || new->type != type)
1533 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1534
1535 return NULL;
1536 }
1537
1538 name = xstrdup (str);
1539 new = XNEW (reg_entry);
1540
1541 new->name = name;
1542 new->number = number;
1543 new->type = type;
1544 new->builtin = false;
1545
1546 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1547
1548 return new;
1549 }
1550
1551 /* Look for the .req directive. This is of the form:
1552
1553 new_register_name .req existing_register_name
1554
1555 If we find one, or if it looks sufficiently like one that we want to
1556 handle any error here, return TRUE. Otherwise return FALSE. */
1557
1558 static bool
1559 create_register_alias (char *newname, char *p)
1560 {
1561 const reg_entry *old;
1562 char *oldname, *nbuf;
1563 size_t nlen;
1564
1565 /* The input scrubber ensures that whitespace after the mnemonic is
1566 collapsed to single spaces. */
1567 oldname = p;
1568 if (!startswith (oldname, " .req "))
1569 return false;
1570
1571 oldname += 6;
1572 if (*oldname == '\0')
1573 return false;
1574
1575 old = str_hash_find (aarch64_reg_hsh, oldname);
1576 if (!old)
1577 {
1578 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1579 return true;
1580 }
1581
1582 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1583 the desired alias name, and p points to its end. If not, then
1584 the desired alias name is in the global original_case_string. */
1585 #ifdef TC_CASE_SENSITIVE
1586 nlen = p - newname;
1587 #else
1588 newname = original_case_string;
1589 nlen = strlen (newname);
1590 #endif
1591
1592 nbuf = xmemdup0 (newname, nlen);
1593
1594 /* Create aliases under the new name as stated; an all-lowercase
1595 version of the new name; and an all-uppercase version of the new
1596 name. */
1597 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1598 {
1599 for (p = nbuf; *p; p++)
1600 *p = TOUPPER (*p);
1601
1602 if (strncmp (nbuf, newname, nlen))
1603 {
1604 /* If this attempt to create an additional alias fails, do not bother
1605 trying to create the all-lower case alias. We will fail and issue
1606 a second, duplicate error message. This situation arises when the
1607 programmer does something like:
1608 foo .req r0
1609 Foo .req r1
1610 The second .req creates the "Foo" alias but then fails to create
1611 the artificial FOO alias because it has already been created by the
1612 first .req. */
1613 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1614 {
1615 free (nbuf);
1616 return true;
1617 }
1618 }
1619
1620 for (p = nbuf; *p; p++)
1621 *p = TOLOWER (*p);
1622
1623 if (strncmp (nbuf, newname, nlen))
1624 insert_reg_alias (nbuf, old->number, old->type);
1625 }
1626
1627 free (nbuf);
1628 return true;
1629 }
1630
1631 /* Should never be called, as .req goes between the alias and the
1632 register name, not at the beginning of the line. */
1633 static void
1634 s_req (int a ATTRIBUTE_UNUSED)
1635 {
1636 as_bad (_("invalid syntax for .req directive"));
1637 }
1638
1639 /* The .unreq directive deletes an alias which was previously defined
1640 by .req. For example:
1641
1642 my_alias .req r11
1643 .unreq my_alias */
1644
1645 static void
1646 s_unreq (int a ATTRIBUTE_UNUSED)
1647 {
1648 char *name;
1649 char saved_char;
1650
1651 name = input_line_pointer;
1652 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1653 saved_char = *input_line_pointer;
1654 *input_line_pointer = 0;
1655
1656 if (!*name)
1657 as_bad (_("invalid syntax for .unreq directive"));
1658 else
1659 {
1660 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1661
1662 if (!reg)
1663 as_bad (_("unknown register alias '%s'"), name);
1664 else if (reg->builtin)
1665 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1666 name);
1667 else
1668 {
1669 char *p;
1670 char *nbuf;
1671
1672 str_hash_delete (aarch64_reg_hsh, name);
1673 free ((char *) reg->name);
1674 free (reg);
1675
1676 /* Also locate the all upper case and all lower case versions.
1677 Do not complain if we cannot find one or the other as it
1678 was probably deleted above. */
1679
1680 nbuf = strdup (name);
1681 for (p = nbuf; *p; p++)
1682 *p = TOUPPER (*p);
1683 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1684 if (reg)
1685 {
1686 str_hash_delete (aarch64_reg_hsh, nbuf);
1687 free ((char *) reg->name);
1688 free (reg);
1689 }
1690
1691 for (p = nbuf; *p; p++)
1692 *p = TOLOWER (*p);
1693 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1694 if (reg)
1695 {
1696 str_hash_delete (aarch64_reg_hsh, nbuf);
1697 free ((char *) reg->name);
1698 free (reg);
1699 }
1700
1701 free (nbuf);
1702 }
1703 }
1704
1705 *input_line_pointer = saved_char;
1706 demand_empty_rest_of_line ();
1707 }
1708
1709 /* Directives: Instruction set selection. */
1710
1711 #if defined OBJ_ELF || defined OBJ_COFF
1712 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1713 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1714 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1715 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1716
1717 /* Create a new mapping symbol for the transition to STATE. */
1718
1719 static void
1720 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1721 {
1722 symbolS *symbolP;
1723 const char *symname;
1724 int type;
1725
1726 switch (state)
1727 {
1728 case MAP_DATA:
1729 symname = "$d";
1730 type = BSF_NO_FLAGS;
1731 break;
1732 case MAP_INSN:
1733 symname = "$x";
1734 type = BSF_NO_FLAGS;
1735 break;
1736 default:
1737 abort ();
1738 }
1739
1740 symbolP = symbol_new (symname, now_seg, frag, value);
1741 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1742
1743 /* Save the mapping symbols for future reference. Also check that
1744 we do not place two mapping symbols at the same offset within a
1745 frag. We'll handle overlap between frags in
1746 check_mapping_symbols.
1747
1748 If .fill or other data filling directive generates zero sized data,
1749 the mapping symbol for the following code will have the same value
1750 as the one generated for the data filling directive. In this case,
1751 we replace the old symbol with the new one at the same address. */
1752 if (value == 0)
1753 {
1754 if (frag->tc_frag_data.first_map != NULL)
1755 {
1756 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1757 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1758 &symbol_lastP);
1759 }
1760 frag->tc_frag_data.first_map = symbolP;
1761 }
1762 if (frag->tc_frag_data.last_map != NULL)
1763 {
1764 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1765 S_GET_VALUE (symbolP));
1766 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1767 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1768 &symbol_lastP);
1769 }
1770 frag->tc_frag_data.last_map = symbolP;
1771 }
1772
1773 /* We must sometimes convert a region marked as code to data during
1774 code alignment, if an odd number of bytes have to be padded. The
1775 code mapping symbol is pushed to an aligned address. */
1776
1777 static void
1778 insert_data_mapping_symbol (enum mstate state,
1779 valueT value, fragS * frag, offsetT bytes)
1780 {
1781 /* If there was already a mapping symbol, remove it. */
1782 if (frag->tc_frag_data.last_map != NULL
1783 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1784 frag->fr_address + value)
1785 {
1786 symbolS *symp = frag->tc_frag_data.last_map;
1787
1788 if (value == 0)
1789 {
1790 know (frag->tc_frag_data.first_map == symp);
1791 frag->tc_frag_data.first_map = NULL;
1792 }
1793 frag->tc_frag_data.last_map = NULL;
1794 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1795 }
1796
1797 make_mapping_symbol (MAP_DATA, value, frag);
1798 make_mapping_symbol (state, value + bytes, frag);
1799 }
1800
1801 static void mapping_state_2 (enum mstate state, int max_chars);
1802
1803 /* Set the mapping state to STATE. Only call this when about to
1804 emit some STATE bytes to the file. */
1805
1806 void
1807 mapping_state (enum mstate state)
1808 {
1809 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1810
1811 if (state == MAP_INSN)
1812 /* AArch64 instructions require 4-byte alignment. When emitting
1813 instructions into any section, record the appropriate section
1814 alignment. */
1815 record_alignment (now_seg, 2);
1816
1817 if (mapstate == state)
1818 /* The mapping symbol has already been emitted.
1819 There is nothing else to do. */
1820 return;
1821
1822 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1823 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1824 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1825 evaluated later in the next else. */
1826 return;
1827 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1828 {
1829 /* Only add the symbol if the offset is > 0:
1830 if we're at the first frag, check it's size > 0;
1831 if we're not at the first frag, then for sure
1832 the offset is > 0. */
1833 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1834 const int add_symbol = (frag_now != frag_first)
1835 || (frag_now_fix () > 0);
1836
1837 if (add_symbol)
1838 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1839 }
1840 #undef TRANSITION
1841
1842 mapping_state_2 (state, 0);
1843 }
1844
1845 /* Same as mapping_state, but MAX_CHARS bytes have already been
1846 allocated. Put the mapping symbol that far back. */
1847
1848 static void
1849 mapping_state_2 (enum mstate state, int max_chars)
1850 {
1851 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1852
1853 if (!SEG_NORMAL (now_seg))
1854 return;
1855
1856 if (mapstate == state)
1857 /* The mapping symbol has already been emitted.
1858 There is nothing else to do. */
1859 return;
1860
1861 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1862 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1863 }
1864 #else
1865 #define mapping_state(x) /* nothing */
1866 #define mapping_state_2(x, y) /* nothing */
1867 #endif
1868
1869 /* Directives: sectioning and alignment. */
1870
1871 static void
1872 s_bss (int ignore ATTRIBUTE_UNUSED)
1873 {
1874 /* We don't support putting frags in the BSS segment, we fake it by
1875 marking in_bss, then looking at s_skip for clues. */
1876 subseg_set (bss_section, 0);
1877 demand_empty_rest_of_line ();
1878 mapping_state (MAP_DATA);
1879 }
1880
1881 static void
1882 s_even (int ignore ATTRIBUTE_UNUSED)
1883 {
1884 /* Never make frag if expect extra pass. */
1885 if (!need_pass_2)
1886 frag_align (1, 0, 0);
1887
1888 record_alignment (now_seg, 1);
1889
1890 demand_empty_rest_of_line ();
1891 }
1892
1893 /* Directives: Literal pools. */
1894
1895 static literal_pool *
1896 find_literal_pool (int size)
1897 {
1898 literal_pool *pool;
1899
1900 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1901 {
1902 if (pool->section == now_seg
1903 && pool->sub_section == now_subseg && pool->size == size)
1904 break;
1905 }
1906
1907 return pool;
1908 }
1909
1910 static literal_pool *
1911 find_or_make_literal_pool (int size)
1912 {
1913 /* Next literal pool ID number. */
1914 static unsigned int latest_pool_num = 1;
1915 literal_pool *pool;
1916
1917 pool = find_literal_pool (size);
1918
1919 if (pool == NULL)
1920 {
1921 /* Create a new pool. */
1922 pool = XNEW (literal_pool);
1923 if (!pool)
1924 return NULL;
1925
1926 /* Currently we always put the literal pool in the current text
1927 section. If we were generating "small" model code where we
1928 knew that all code and initialised data was within 1MB then
1929 we could output literals to mergeable, read-only data
1930 sections. */
1931
1932 pool->next_free_entry = 0;
1933 pool->section = now_seg;
1934 pool->sub_section = now_subseg;
1935 pool->size = size;
1936 pool->next = list_of_pools;
1937 pool->symbol = NULL;
1938
1939 /* Add it to the list. */
1940 list_of_pools = pool;
1941 }
1942
1943 /* New pools, and emptied pools, will have a NULL symbol. */
1944 if (pool->symbol == NULL)
1945 {
1946 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1947 &zero_address_frag, 0);
1948 pool->id = latest_pool_num++;
1949 }
1950
1951 /* Done. */
1952 return pool;
1953 }
1954
1955 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1956 Return TRUE on success, otherwise return FALSE. */
1957 static bool
1958 add_to_lit_pool (expressionS *exp, int size)
1959 {
1960 literal_pool *pool;
1961 unsigned int entry;
1962
1963 pool = find_or_make_literal_pool (size);
1964
1965 /* Check if this literal value is already in the pool. */
1966 for (entry = 0; entry < pool->next_free_entry; entry++)
1967 {
1968 expressionS * litexp = & pool->literals[entry].exp;
1969
1970 if ((litexp->X_op == exp->X_op)
1971 && (exp->X_op == O_constant)
1972 && (litexp->X_add_number == exp->X_add_number)
1973 && (litexp->X_unsigned == exp->X_unsigned))
1974 break;
1975
1976 if ((litexp->X_op == exp->X_op)
1977 && (exp->X_op == O_symbol)
1978 && (litexp->X_add_number == exp->X_add_number)
1979 && (litexp->X_add_symbol == exp->X_add_symbol)
1980 && (litexp->X_op_symbol == exp->X_op_symbol))
1981 break;
1982 }
1983
1984 /* Do we need to create a new entry? */
1985 if (entry == pool->next_free_entry)
1986 {
1987 if (entry >= MAX_LITERAL_POOL_SIZE)
1988 {
1989 set_syntax_error (_("literal pool overflow"));
1990 return false;
1991 }
1992
1993 pool->literals[entry].exp = *exp;
1994 pool->next_free_entry += 1;
1995 if (exp->X_op == O_big)
1996 {
1997 /* PR 16688: Bignums are held in a single global array. We must
1998 copy and preserve that value now, before it is overwritten. */
1999 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
2000 exp->X_add_number);
2001 memcpy (pool->literals[entry].bignum, generic_bignum,
2002 CHARS_PER_LITTLENUM * exp->X_add_number);
2003 }
2004 else
2005 pool->literals[entry].bignum = NULL;
2006 }
2007
2008 exp->X_op = O_symbol;
2009 exp->X_add_number = ((int) entry) * size;
2010 exp->X_add_symbol = pool->symbol;
2011
2012 return true;
2013 }
2014
2015 /* Can't use symbol_new here, so have to create a symbol and then at
2016 a later date assign it a value. That's what these functions do. */
2017
2018 static void
2019 symbol_locate (symbolS * symbolP,
2020 const char *name,/* It is copied, the caller can modify. */
2021 segT segment, /* Segment identifier (SEG_<something>). */
2022 valueT valu, /* Symbol value. */
2023 fragS * frag) /* Associated fragment. */
2024 {
2025 size_t name_length;
2026 char *preserved_copy_of_name;
2027
2028 name_length = strlen (name) + 1; /* +1 for \0. */
2029 obstack_grow (&notes, name, name_length);
2030 preserved_copy_of_name = obstack_finish (&notes);
2031
2032 #ifdef tc_canonicalize_symbol_name
2033 preserved_copy_of_name =
2034 tc_canonicalize_symbol_name (preserved_copy_of_name);
2035 #endif
2036
2037 S_SET_NAME (symbolP, preserved_copy_of_name);
2038
2039 S_SET_SEGMENT (symbolP, segment);
2040 S_SET_VALUE (symbolP, valu);
2041 symbol_clear_list_pointers (symbolP);
2042
2043 symbol_set_frag (symbolP, frag);
2044
2045 /* Link to end of symbol chain. */
2046 {
2047 extern int symbol_table_frozen;
2048
2049 if (symbol_table_frozen)
2050 abort ();
2051 }
2052
2053 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
2054
2055 obj_symbol_new_hook (symbolP);
2056
2057 #ifdef tc_symbol_new_hook
2058 tc_symbol_new_hook (symbolP);
2059 #endif
2060
2061 #ifdef DEBUG_SYMS
2062 verify_symbol_chain (symbol_rootP, symbol_lastP);
2063 #endif /* DEBUG_SYMS */
2064 }
2065
2066
2067 static void
2068 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2069 {
2070 unsigned int entry;
2071 literal_pool *pool;
2072 char sym_name[20];
2073 int align;
2074
2075 for (align = 2; align <= 4; align++)
2076 {
2077 int size = 1 << align;
2078
2079 pool = find_literal_pool (size);
2080 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
2081 continue;
2082
2083 /* Align pool as you have word accesses.
2084 Only make a frag if we have to. */
2085 if (!need_pass_2)
2086 frag_align (align, 0, 0);
2087
2088 mapping_state (MAP_DATA);
2089
2090 record_alignment (now_seg, align);
2091
2092 sprintf (sym_name, "$$lit_\002%x", pool->id);
2093
2094 symbol_locate (pool->symbol, sym_name, now_seg,
2095 (valueT) frag_now_fix (), frag_now);
2096 symbol_table_insert (pool->symbol);
2097
2098 for (entry = 0; entry < pool->next_free_entry; entry++)
2099 {
2100 expressionS * exp = & pool->literals[entry].exp;
2101
2102 if (exp->X_op == O_big)
2103 {
2104 /* PR 16688: Restore the global bignum value. */
2105 gas_assert (pool->literals[entry].bignum != NULL);
2106 memcpy (generic_bignum, pool->literals[entry].bignum,
2107 CHARS_PER_LITTLENUM * exp->X_add_number);
2108 }
2109
2110 /* First output the expression in the instruction to the pool. */
2111 emit_expr (exp, size); /* .word|.xword */
2112
2113 if (exp->X_op == O_big)
2114 {
2115 free (pool->literals[entry].bignum);
2116 pool->literals[entry].bignum = NULL;
2117 }
2118 }
2119
2120 /* Mark the pool as empty. */
2121 pool->next_free_entry = 0;
2122 pool->symbol = NULL;
2123 }
2124 }
2125
2126 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2127 /* Forward declarations for functions below, in the MD interface
2128 section. */
2129 static struct reloc_table_entry * find_reloc_table_entry (char **);
2130
2131 /* Directives: Data. */
2132 /* N.B. the support for relocation suffix in this directive needs to be
2133 implemented properly. */
2134
2135 static void
2136 s_aarch64_cons (int nbytes)
2137 {
2138 expressionS exp;
2139
2140 #ifdef md_flush_pending_output
2141 md_flush_pending_output ();
2142 #endif
2143
2144 if (is_it_end_of_statement ())
2145 {
2146 demand_empty_rest_of_line ();
2147 return;
2148 }
2149
2150 #ifdef md_cons_align
2151 md_cons_align (nbytes);
2152 #endif
2153
2154 mapping_state (MAP_DATA);
2155 do
2156 {
2157 struct reloc_table_entry *reloc;
2158
2159 expression (&exp);
2160
2161 if (exp.X_op != O_symbol)
2162 emit_expr (&exp, (unsigned int) nbytes);
2163 else
2164 {
2165 skip_past_char (&input_line_pointer, '#');
2166 if (skip_past_char (&input_line_pointer, ':'))
2167 {
2168 reloc = find_reloc_table_entry (&input_line_pointer);
2169 if (reloc == NULL)
2170 as_bad (_("unrecognized relocation suffix"));
2171 else
2172 as_bad (_("unimplemented relocation suffix"));
2173 ignore_rest_of_line ();
2174 return;
2175 }
2176 else
2177 emit_expr (&exp, (unsigned int) nbytes);
2178 }
2179 }
2180 while (*input_line_pointer++ == ',');
2181
2182 /* Put terminator back into stream. */
2183 input_line_pointer--;
2184 demand_empty_rest_of_line ();
2185 }
2186 #endif
2187
2188 #ifdef OBJ_ELF
2189 /* Forward declarations for functions below, in the MD interface
2190 section. */
2191 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2192
2193 /* Mark symbol that it follows a variant PCS convention. */
2194
2195 static void
2196 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2197 {
2198 char *name;
2199 char c;
2200 symbolS *sym;
2201 asymbol *bfdsym;
2202 elf_symbol_type *elfsym;
2203
2204 c = get_symbol_name (&name);
2205 if (!*name)
2206 as_bad (_("Missing symbol name in directive"));
2207 sym = symbol_find_or_make (name);
2208 restore_line_pointer (c);
2209 demand_empty_rest_of_line ();
2210 bfdsym = symbol_get_bfdsym (sym);
2211 elfsym = elf_symbol_from (bfdsym);
2212 gas_assert (elfsym);
2213 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2214 }
2215 #endif /* OBJ_ELF */
2216
2217 /* Output a 32-bit word, but mark as an instruction. */
2218
2219 static void
2220 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2221 {
2222 expressionS exp;
2223 unsigned n = 0;
2224
2225 #ifdef md_flush_pending_output
2226 md_flush_pending_output ();
2227 #endif
2228
2229 if (is_it_end_of_statement ())
2230 {
2231 demand_empty_rest_of_line ();
2232 return;
2233 }
2234
2235 /* Sections are assumed to start aligned. In executable section, there is no
2236 MAP_DATA symbol pending. So we only align the address during
2237 MAP_DATA --> MAP_INSN transition.
2238 For other sections, this is not guaranteed. */
2239 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2240 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2241 frag_align_code (2, 0);
2242
2243 #ifdef OBJ_ELF
2244 mapping_state (MAP_INSN);
2245 #endif
2246
2247 do
2248 {
2249 expression (&exp);
2250 if (exp.X_op != O_constant)
2251 {
2252 as_bad (_("constant expression required"));
2253 ignore_rest_of_line ();
2254 return;
2255 }
2256
2257 if (target_big_endian)
2258 {
2259 unsigned int val = exp.X_add_number;
2260 exp.X_add_number = SWAP_32 (val);
2261 }
2262 emit_expr (&exp, INSN_SIZE);
2263 ++n;
2264 }
2265 while (*input_line_pointer++ == ',');
2266
2267 dwarf2_emit_insn (n * INSN_SIZE);
2268
2269 /* Put terminator back into stream. */
2270 input_line_pointer--;
2271 demand_empty_rest_of_line ();
2272 }
2273
2274 static void
2275 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2276 {
2277 demand_empty_rest_of_line ();
2278 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2279 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2280 }
2281
2282 #ifdef OBJ_ELF
2283 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2284
2285 static void
2286 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2287 {
2288 expressionS exp;
2289
2290 expression (&exp);
2291 frag_grow (4);
2292 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2293 BFD_RELOC_AARCH64_TLSDESC_ADD);
2294
2295 demand_empty_rest_of_line ();
2296 }
2297
2298 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2299
2300 static void
2301 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2302 {
2303 expressionS exp;
2304
2305 /* Since we're just labelling the code, there's no need to define a
2306 mapping symbol. */
2307 expression (&exp);
2308 /* Make sure there is enough room in this frag for the following
2309 blr. This trick only works if the blr follows immediately after
2310 the .tlsdesc directive. */
2311 frag_grow (4);
2312 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2313 BFD_RELOC_AARCH64_TLSDESC_CALL);
2314
2315 demand_empty_rest_of_line ();
2316 }
2317
2318 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2319
2320 static void
2321 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2322 {
2323 expressionS exp;
2324
2325 expression (&exp);
2326 frag_grow (4);
2327 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2328 BFD_RELOC_AARCH64_TLSDESC_LDR);
2329
2330 demand_empty_rest_of_line ();
2331 }
2332 #endif /* OBJ_ELF */
2333
2334 #ifdef TE_PE
2335 static void
2336 s_secrel (int dummy ATTRIBUTE_UNUSED)
2337 {
2338 expressionS exp;
2339
2340 do
2341 {
2342 expression (&exp);
2343 if (exp.X_op == O_symbol)
2344 exp.X_op = O_secrel;
2345
2346 emit_expr (&exp, 4);
2347 }
2348 while (*input_line_pointer++ == ',');
2349
2350 input_line_pointer--;
2351 demand_empty_rest_of_line ();
2352 }
2353
2354 void
2355 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2356 {
2357 expressionS exp;
2358
2359 exp.X_op = O_secrel;
2360 exp.X_add_symbol = symbol;
2361 exp.X_add_number = 0;
2362 emit_expr (&exp, size);
2363 }
2364
2365 static void
2366 s_secidx (int dummy ATTRIBUTE_UNUSED)
2367 {
2368 expressionS exp;
2369
2370 do
2371 {
2372 expression (&exp);
2373 if (exp.X_op == O_symbol)
2374 exp.X_op = O_secidx;
2375
2376 emit_expr (&exp, 2);
2377 }
2378 while (*input_line_pointer++ == ',');
2379
2380 input_line_pointer--;
2381 demand_empty_rest_of_line ();
2382 }
2383 #endif /* TE_PE */
2384
2385 static void s_aarch64_arch (int);
2386 static void s_aarch64_cpu (int);
2387 static void s_aarch64_arch_extension (int);
2388
2389 /* This table describes all the machine specific pseudo-ops the assembler
2390 has to support. The fields are:
2391 pseudo-op name without dot
2392 function to call to execute this pseudo-op
2393 Integer arg to pass to the function. */
2394
2395 const pseudo_typeS md_pseudo_table[] = {
2396 /* Never called because '.req' does not start a line. */
2397 {"req", s_req, 0},
2398 {"unreq", s_unreq, 0},
2399 {"bss", s_bss, 0},
2400 {"even", s_even, 0},
2401 {"ltorg", s_ltorg, 0},
2402 {"pool", s_ltorg, 0},
2403 {"cpu", s_aarch64_cpu, 0},
2404 {"arch", s_aarch64_arch, 0},
2405 {"arch_extension", s_aarch64_arch_extension, 0},
2406 {"inst", s_aarch64_inst, 0},
2407 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2408 #ifdef OBJ_ELF
2409 {"tlsdescadd", s_tlsdescadd, 0},
2410 {"tlsdesccall", s_tlsdesccall, 0},
2411 {"tlsdescldr", s_tlsdescldr, 0},
2412 {"variant_pcs", s_variant_pcs, 0},
2413 #endif
2414 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2415 {"word", s_aarch64_cons, 4},
2416 {"long", s_aarch64_cons, 4},
2417 {"xword", s_aarch64_cons, 8},
2418 {"dword", s_aarch64_cons, 8},
2419 #endif
2420 #ifdef TE_PE
2421 {"secrel32", s_secrel, 0},
2422 {"secidx", s_secidx, 0},
2423 #endif
2424 {"float16", float_cons, 'h'},
2425 {"bfloat16", float_cons, 'b'},
2426 {0, 0, 0}
2427 };
2428 \f
2429
2430 /* Check whether STR points to a register name followed by a comma or the
2431 end of line; REG_TYPE indicates which register types are checked
2432 against. Return TRUE if STR is such a register name; otherwise return
2433 FALSE. The function does not intend to produce any diagnostics, but since
2434 the register parser aarch64_reg_parse, which is called by this function,
2435 does produce diagnostics, we call clear_error to clear any diagnostics
2436 that may be generated by aarch64_reg_parse.
2437 Also, the function returns FALSE directly if there is any user error
2438 present at the function entry. This prevents the existing diagnostics
2439 state from being spoiled.
2440 The function currently serves parse_constant_immediate and
2441 parse_big_immediate only. */
2442 static bool
2443 reg_name_p (char *str, aarch64_reg_type reg_type)
2444 {
2445 const reg_entry *reg;
2446
2447 /* Prevent the diagnostics state from being spoiled. */
2448 if (error_p ())
2449 return false;
2450
2451 reg = aarch64_reg_parse (&str, reg_type, NULL);
2452
2453 /* Clear the parsing error that may be set by the reg parser. */
2454 clear_error ();
2455
2456 if (!reg)
2457 return false;
2458
2459 skip_whitespace (str);
2460 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2461 return true;
2462
2463 return false;
2464 }
2465
2466 /* Parser functions used exclusively in instruction operands. */
2467
2468 /* Parse an immediate expression which may not be constant.
2469
2470 To prevent the expression parser from pushing a register name
2471 into the symbol table as an undefined symbol, firstly a check is
2472 done to find out whether STR is a register of type REG_TYPE followed
2473 by a comma or the end of line. Return FALSE if STR is such a string. */
2474
2475 static bool
2476 parse_immediate_expression (char **str, expressionS *exp,
2477 aarch64_reg_type reg_type)
2478 {
2479 if (reg_name_p (*str, reg_type))
2480 {
2481 set_recoverable_error (_("immediate operand required"));
2482 return false;
2483 }
2484
2485 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2486
2487 if (exp->X_op == O_absent)
2488 {
2489 set_fatal_syntax_error (_("missing immediate expression"));
2490 return false;
2491 }
2492
2493 return true;
2494 }
2495
2496 /* Constant immediate-value read function for use in insn parsing.
2497 STR points to the beginning of the immediate (with the optional
2498 leading #); *VAL receives the value. REG_TYPE says which register
2499 names should be treated as registers rather than as symbolic immediates.
2500
2501 Return TRUE on success; otherwise return FALSE. */
2502
2503 static bool
2504 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2505 {
2506 expressionS exp;
2507
2508 if (! parse_immediate_expression (str, &exp, reg_type))
2509 return false;
2510
2511 if (exp.X_op != O_constant)
2512 {
2513 set_syntax_error (_("constant expression required"));
2514 return false;
2515 }
2516
2517 *val = exp.X_add_number;
2518 return true;
2519 }
2520
2521 static uint32_t
2522 encode_imm_float_bits (uint32_t imm)
2523 {
2524 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2525 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2526 }
2527
2528 /* Return TRUE if the single-precision floating-point value encoded in IMM
2529 can be expressed in the AArch64 8-bit signed floating-point format with
2530 3-bit exponent and normalized 4 bits of precision; in other words, the
2531 floating-point value must be expressable as
2532 (+/-) n / 16 * power (2, r)
2533 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2534
2535 static bool
2536 aarch64_imm_float_p (uint32_t imm)
2537 {
2538 /* If a single-precision floating-point value has the following bit
2539 pattern, it can be expressed in the AArch64 8-bit floating-point
2540 format:
2541
2542 3 32222222 2221111111111
2543 1 09876543 21098765432109876543210
2544 n Eeeeeexx xxxx0000000000000000000
2545
2546 where n, e and each x are either 0 or 1 independently, with
2547 E == ~ e. */
2548
2549 uint32_t pattern;
2550
2551 /* Prepare the pattern for 'Eeeeee'. */
2552 if (((imm >> 30) & 0x1) == 0)
2553 pattern = 0x3e000000;
2554 else
2555 pattern = 0x40000000;
2556
2557 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2558 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2559 }
2560
2561 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2562 as an IEEE float without any loss of precision. Store the value in
2563 *FPWORD if so. */
2564
2565 static bool
2566 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2567 {
2568 /* If a double-precision floating-point value has the following bit
2569 pattern, it can be expressed in a float:
2570
2571 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2572 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2573 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2574
2575 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2576 if Eeee_eeee != 1111_1111
2577
2578 where n, e, s and S are either 0 or 1 independently and where ~ is the
2579 inverse of E. */
2580
2581 uint32_t pattern;
2582 uint32_t high32 = imm >> 32;
2583 uint32_t low32 = imm;
2584
2585 /* Lower 29 bits need to be 0s. */
2586 if ((imm & 0x1fffffff) != 0)
2587 return false;
2588
2589 /* Prepare the pattern for 'Eeeeeeeee'. */
2590 if (((high32 >> 30) & 0x1) == 0)
2591 pattern = 0x38000000;
2592 else
2593 pattern = 0x40000000;
2594
2595 /* Check E~~~. */
2596 if ((high32 & 0x78000000) != pattern)
2597 return false;
2598
2599 /* Check Eeee_eeee != 1111_1111. */
2600 if ((high32 & 0x7ff00000) == 0x47f00000)
2601 return false;
2602
2603 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2604 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2605 | (low32 >> 29)); /* 3 S bits. */
2606 return true;
2607 }
2608
2609 /* Return true if we should treat OPERAND as a double-precision
2610 floating-point operand rather than a single-precision one. */
2611 static bool
2612 double_precision_operand_p (const aarch64_opnd_info *operand)
2613 {
2614 /* Check for unsuffixed SVE registers, which are allowed
2615 for LDR and STR but not in instructions that require an
2616 immediate. We get better error messages if we arbitrarily
2617 pick one size, parse the immediate normally, and then
2618 report the match failure in the normal way. */
2619 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2620 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2621 }
2622
2623 /* Parse a floating-point immediate. Return TRUE on success and return the
2624 value in *IMMED in the format of IEEE754 single-precision encoding.
2625 *CCP points to the start of the string; DP_P is TRUE when the immediate
2626 is expected to be in double-precision (N.B. this only matters when
2627 hexadecimal representation is involved). REG_TYPE says which register
2628 names should be treated as registers rather than as symbolic immediates.
2629
2630 This routine accepts any IEEE float; it is up to the callers to reject
2631 invalid ones. */
2632
2633 static bool
2634 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2635 aarch64_reg_type reg_type)
2636 {
2637 char *str = *ccp;
2638 char *fpnum;
2639 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2640 int64_t val = 0;
2641 unsigned fpword = 0;
2642 bool hex_p = false;
2643
2644 skip_past_char (&str, '#');
2645
2646 fpnum = str;
2647 skip_whitespace (fpnum);
2648
2649 if (startswith (fpnum, "0x"))
2650 {
2651 /* Support the hexadecimal representation of the IEEE754 encoding.
2652 Double-precision is expected when DP_P is TRUE, otherwise the
2653 representation should be in single-precision. */
2654 if (! parse_constant_immediate (&str, &val, reg_type))
2655 goto invalid_fp;
2656
2657 if (dp_p)
2658 {
2659 if (!can_convert_double_to_float (val, &fpword))
2660 goto invalid_fp;
2661 }
2662 else if ((uint64_t) val > 0xffffffff)
2663 goto invalid_fp;
2664 else
2665 fpword = val;
2666
2667 hex_p = true;
2668 }
2669 else if (reg_name_p (str, reg_type))
2670 {
2671 set_recoverable_error (_("immediate operand required"));
2672 return false;
2673 }
2674
2675 if (! hex_p)
2676 {
2677 int i;
2678
2679 if ((str = atof_ieee (str, 's', words)) == NULL)
2680 goto invalid_fp;
2681
2682 /* Our FP word must be 32 bits (single-precision FP). */
2683 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2684 {
2685 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2686 fpword |= words[i];
2687 }
2688 }
2689
2690 *immed = fpword;
2691 *ccp = str;
2692 return true;
2693
2694 invalid_fp:
2695 set_fatal_syntax_error (_("invalid floating-point constant"));
2696 return false;
2697 }
2698
2699 /* Less-generic immediate-value read function with the possibility of loading
2700 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2701 instructions.
2702
2703 To prevent the expression parser from pushing a register name into the
2704 symbol table as an undefined symbol, a check is firstly done to find
2705 out whether STR is a register of type REG_TYPE followed by a comma or
2706 the end of line. Return FALSE if STR is such a register. */
2707
2708 static bool
2709 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2710 {
2711 char *ptr = *str;
2712
2713 if (reg_name_p (ptr, reg_type))
2714 {
2715 set_syntax_error (_("immediate operand required"));
2716 return false;
2717 }
2718
2719 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2720
2721 if (inst.reloc.exp.X_op == O_constant)
2722 *imm = inst.reloc.exp.X_add_number;
2723
2724 *str = ptr;
2725
2726 return true;
2727 }
2728
2729 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2730 if NEED_LIBOPCODES is non-zero, the fixup will need
2731 assistance from the libopcodes. */
2732
2733 static inline void
2734 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2735 const aarch64_opnd_info *operand,
2736 int need_libopcodes_p)
2737 {
2738 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2739 reloc->opnd = operand->type;
2740 if (need_libopcodes_p)
2741 reloc->need_libopcodes_p = 1;
2742 };
2743
2744 /* Return TRUE if the instruction needs to be fixed up later internally by
2745 the GAS; otherwise return FALSE. */
2746
2747 static inline bool
2748 aarch64_gas_internal_fixup_p (void)
2749 {
2750 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2751 }
2752
2753 /* Assign the immediate value to the relevant field in *OPERAND if
2754 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2755 needs an internal fixup in a later stage.
2756 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2757 IMM.VALUE that may get assigned with the constant. */
2758 static inline void
2759 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2760 aarch64_opnd_info *operand,
2761 int addr_off_p,
2762 int need_libopcodes_p,
2763 int skip_p)
2764 {
2765 if (reloc->exp.X_op == O_constant)
2766 {
2767 if (addr_off_p)
2768 operand->addr.offset.imm = reloc->exp.X_add_number;
2769 else
2770 operand->imm.value = reloc->exp.X_add_number;
2771 reloc->type = BFD_RELOC_UNUSED;
2772 }
2773 else
2774 {
2775 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2776 /* Tell libopcodes to ignore this operand or not. This is helpful
2777 when one of the operands needs to be fixed up later but we need
2778 libopcodes to check the other operands. */
2779 operand->skip = skip_p;
2780 }
2781 }
2782
2783 /* Relocation modifiers. Each entry in the table contains the textual
2784 name for the relocation which may be placed before a symbol used as
2785 a load/store offset, or add immediate. It must be surrounded by a
2786 leading and trailing colon, for example:
2787
2788 ldr x0, [x1, #:rello:varsym]
2789 add x0, x1, #:rello:varsym */
2790
2791 struct reloc_table_entry
2792 {
2793 const char *name;
2794 int pc_rel;
2795 bfd_reloc_code_real_type adr_type;
2796 bfd_reloc_code_real_type adrp_type;
2797 bfd_reloc_code_real_type movw_type;
2798 bfd_reloc_code_real_type add_type;
2799 bfd_reloc_code_real_type ldst_type;
2800 bfd_reloc_code_real_type ld_literal_type;
2801 };
2802
2803 static struct reloc_table_entry reloc_table[] =
2804 {
2805 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2806 {"lo12", 0,
2807 0, /* adr_type */
2808 0,
2809 0,
2810 BFD_RELOC_AARCH64_ADD_LO12,
2811 BFD_RELOC_AARCH64_LDST_LO12,
2812 0},
2813
2814 /* Higher 21 bits of pc-relative page offset: ADRP */
2815 {"pg_hi21", 1,
2816 0, /* adr_type */
2817 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2818 0,
2819 0,
2820 0,
2821 0},
2822
2823 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2824 {"pg_hi21_nc", 1,
2825 0, /* adr_type */
2826 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2827 0,
2828 0,
2829 0,
2830 0},
2831
2832 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2833 {"abs_g0", 0,
2834 0, /* adr_type */
2835 0,
2836 BFD_RELOC_AARCH64_MOVW_G0,
2837 0,
2838 0,
2839 0},
2840
2841 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2842 {"abs_g0_s", 0,
2843 0, /* adr_type */
2844 0,
2845 BFD_RELOC_AARCH64_MOVW_G0_S,
2846 0,
2847 0,
2848 0},
2849
2850 /* Less significant bits 0-15 of address/value: MOVK, no check */
2851 {"abs_g0_nc", 0,
2852 0, /* adr_type */
2853 0,
2854 BFD_RELOC_AARCH64_MOVW_G0_NC,
2855 0,
2856 0,
2857 0},
2858
2859 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2860 {"abs_g1", 0,
2861 0, /* adr_type */
2862 0,
2863 BFD_RELOC_AARCH64_MOVW_G1,
2864 0,
2865 0,
2866 0},
2867
2868 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2869 {"abs_g1_s", 0,
2870 0, /* adr_type */
2871 0,
2872 BFD_RELOC_AARCH64_MOVW_G1_S,
2873 0,
2874 0,
2875 0},
2876
2877 /* Less significant bits 16-31 of address/value: MOVK, no check */
2878 {"abs_g1_nc", 0,
2879 0, /* adr_type */
2880 0,
2881 BFD_RELOC_AARCH64_MOVW_G1_NC,
2882 0,
2883 0,
2884 0},
2885
2886 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2887 {"abs_g2", 0,
2888 0, /* adr_type */
2889 0,
2890 BFD_RELOC_AARCH64_MOVW_G2,
2891 0,
2892 0,
2893 0},
2894
2895 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2896 {"abs_g2_s", 0,
2897 0, /* adr_type */
2898 0,
2899 BFD_RELOC_AARCH64_MOVW_G2_S,
2900 0,
2901 0,
2902 0},
2903
2904 /* Less significant bits 32-47 of address/value: MOVK, no check */
2905 {"abs_g2_nc", 0,
2906 0, /* adr_type */
2907 0,
2908 BFD_RELOC_AARCH64_MOVW_G2_NC,
2909 0,
2910 0,
2911 0},
2912
2913 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2914 {"abs_g3", 0,
2915 0, /* adr_type */
2916 0,
2917 BFD_RELOC_AARCH64_MOVW_G3,
2918 0,
2919 0,
2920 0},
2921
2922 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2923 {"prel_g0", 1,
2924 0, /* adr_type */
2925 0,
2926 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2927 0,
2928 0,
2929 0},
2930
2931 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2932 {"prel_g0_nc", 1,
2933 0, /* adr_type */
2934 0,
2935 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2936 0,
2937 0,
2938 0},
2939
2940 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2941 {"prel_g1", 1,
2942 0, /* adr_type */
2943 0,
2944 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2945 0,
2946 0,
2947 0},
2948
2949 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2950 {"prel_g1_nc", 1,
2951 0, /* adr_type */
2952 0,
2953 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2954 0,
2955 0,
2956 0},
2957
2958 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2959 {"prel_g2", 1,
2960 0, /* adr_type */
2961 0,
2962 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2963 0,
2964 0,
2965 0},
2966
2967 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2968 {"prel_g2_nc", 1,
2969 0, /* adr_type */
2970 0,
2971 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2972 0,
2973 0,
2974 0},
2975
2976 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2977 {"prel_g3", 1,
2978 0, /* adr_type */
2979 0,
2980 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2981 0,
2982 0,
2983 0},
2984
2985 /* Get to the page containing GOT entry for a symbol. */
2986 {"got", 1,
2987 0, /* adr_type */
2988 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2989 0,
2990 0,
2991 0,
2992 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2993
2994 /* 12 bit offset into the page containing GOT entry for that symbol. */
2995 {"got_lo12", 0,
2996 0, /* adr_type */
2997 0,
2998 0,
2999 0,
3000 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
3001 0},
3002
3003 /* 0-15 bits of address/value: MOVk, no check. */
3004 {"gotoff_g0_nc", 0,
3005 0, /* adr_type */
3006 0,
3007 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
3008 0,
3009 0,
3010 0},
3011
3012 /* Most significant bits 16-31 of address/value: MOVZ. */
3013 {"gotoff_g1", 0,
3014 0, /* adr_type */
3015 0,
3016 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
3017 0,
3018 0,
3019 0},
3020
3021 /* 15 bit offset into the page containing GOT entry for that symbol. */
3022 {"gotoff_lo15", 0,
3023 0, /* adr_type */
3024 0,
3025 0,
3026 0,
3027 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
3028 0},
3029
3030 /* Get to the page containing GOT TLS entry for a symbol */
3031 {"gottprel_g0_nc", 0,
3032 0, /* adr_type */
3033 0,
3034 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
3035 0,
3036 0,
3037 0},
3038
3039 /* Get to the page containing GOT TLS entry for a symbol */
3040 {"gottprel_g1", 0,
3041 0, /* adr_type */
3042 0,
3043 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
3044 0,
3045 0,
3046 0},
3047
3048 /* Get to the page containing GOT TLS entry for a symbol */
3049 {"tlsgd", 0,
3050 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
3051 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
3052 0,
3053 0,
3054 0,
3055 0},
3056
3057 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3058 {"tlsgd_lo12", 0,
3059 0, /* adr_type */
3060 0,
3061 0,
3062 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
3063 0,
3064 0},
3065
3066 /* Lower 16 bits address/value: MOVk. */
3067 {"tlsgd_g0_nc", 0,
3068 0, /* adr_type */
3069 0,
3070 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
3071 0,
3072 0,
3073 0},
3074
3075 /* Most significant bits 16-31 of address/value: MOVZ. */
3076 {"tlsgd_g1", 0,
3077 0, /* adr_type */
3078 0,
3079 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
3080 0,
3081 0,
3082 0},
3083
3084 /* Get to the page containing GOT TLS entry for a symbol */
3085 {"tlsdesc", 0,
3086 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
3087 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
3088 0,
3089 0,
3090 0,
3091 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
3092
3093 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3094 {"tlsdesc_lo12", 0,
3095 0, /* adr_type */
3096 0,
3097 0,
3098 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
3099 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
3100 0},
3101
3102 /* Get to the page containing GOT TLS entry for a symbol.
3103 The same as GD, we allocate two consecutive GOT slots
3104 for module index and module offset, the only difference
3105 with GD is the module offset should be initialized to
3106 zero without any outstanding runtime relocation. */
3107 {"tlsldm", 0,
3108 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
3109 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
3110 0,
3111 0,
3112 0,
3113 0},
3114
3115 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3116 {"tlsldm_lo12_nc", 0,
3117 0, /* adr_type */
3118 0,
3119 0,
3120 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
3121 0,
3122 0},
3123
3124 /* 12 bit offset into the module TLS base address. */
3125 {"dtprel_lo12", 0,
3126 0, /* adr_type */
3127 0,
3128 0,
3129 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
3130 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
3131 0},
3132
3133 /* Same as dtprel_lo12, no overflow check. */
3134 {"dtprel_lo12_nc", 0,
3135 0, /* adr_type */
3136 0,
3137 0,
3138 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
3139 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
3140 0},
3141
3142 /* bits[23:12] of offset to the module TLS base address. */
3143 {"dtprel_hi12", 0,
3144 0, /* adr_type */
3145 0,
3146 0,
3147 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3148 0,
3149 0},
3150
3151 /* bits[15:0] of offset to the module TLS base address. */
3152 {"dtprel_g0", 0,
3153 0, /* adr_type */
3154 0,
3155 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3156 0,
3157 0,
3158 0},
3159
3160 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3161 {"dtprel_g0_nc", 0,
3162 0, /* adr_type */
3163 0,
3164 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3165 0,
3166 0,
3167 0},
3168
3169 /* bits[31:16] of offset to the module TLS base address. */
3170 {"dtprel_g1", 0,
3171 0, /* adr_type */
3172 0,
3173 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3174 0,
3175 0,
3176 0},
3177
3178 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3179 {"dtprel_g1_nc", 0,
3180 0, /* adr_type */
3181 0,
3182 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3183 0,
3184 0,
3185 0},
3186
3187 /* bits[47:32] of offset to the module TLS base address. */
3188 {"dtprel_g2", 0,
3189 0, /* adr_type */
3190 0,
3191 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3192 0,
3193 0,
3194 0},
3195
3196 /* Lower 16 bit offset into GOT entry for a symbol */
3197 {"tlsdesc_off_g0_nc", 0,
3198 0, /* adr_type */
3199 0,
3200 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3201 0,
3202 0,
3203 0},
3204
3205 /* Higher 16 bit offset into GOT entry for a symbol */
3206 {"tlsdesc_off_g1", 0,
3207 0, /* adr_type */
3208 0,
3209 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3210 0,
3211 0,
3212 0},
3213
3214 /* Get to the page containing GOT TLS entry for a symbol */
3215 {"gottprel", 0,
3216 0, /* adr_type */
3217 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3218 0,
3219 0,
3220 0,
3221 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3222
3223 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3224 {"gottprel_lo12", 0,
3225 0, /* adr_type */
3226 0,
3227 0,
3228 0,
3229 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3230 0},
3231
3232 /* Get tp offset for a symbol. */
3233 {"tprel", 0,
3234 0, /* adr_type */
3235 0,
3236 0,
3237 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3238 0,
3239 0},
3240
3241 /* Get tp offset for a symbol. */
3242 {"tprel_lo12", 0,
3243 0, /* adr_type */
3244 0,
3245 0,
3246 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3247 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3248 0},
3249
3250 /* Get tp offset for a symbol. */
3251 {"tprel_hi12", 0,
3252 0, /* adr_type */
3253 0,
3254 0,
3255 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3256 0,
3257 0},
3258
3259 /* Get tp offset for a symbol. */
3260 {"tprel_lo12_nc", 0,
3261 0, /* adr_type */
3262 0,
3263 0,
3264 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3265 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3266 0},
3267
3268 /* Most significant bits 32-47 of address/value: MOVZ. */
3269 {"tprel_g2", 0,
3270 0, /* adr_type */
3271 0,
3272 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3273 0,
3274 0,
3275 0},
3276
3277 /* Most significant bits 16-31 of address/value: MOVZ. */
3278 {"tprel_g1", 0,
3279 0, /* adr_type */
3280 0,
3281 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3282 0,
3283 0,
3284 0},
3285
3286 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3287 {"tprel_g1_nc", 0,
3288 0, /* adr_type */
3289 0,
3290 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3291 0,
3292 0,
3293 0},
3294
3295 /* Most significant bits 0-15 of address/value: MOVZ. */
3296 {"tprel_g0", 0,
3297 0, /* adr_type */
3298 0,
3299 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3300 0,
3301 0,
3302 0},
3303
3304 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3305 {"tprel_g0_nc", 0,
3306 0, /* adr_type */
3307 0,
3308 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3309 0,
3310 0,
3311 0},
3312
3313 /* 15bit offset from got entry to base address of GOT table. */
3314 {"gotpage_lo15", 0,
3315 0,
3316 0,
3317 0,
3318 0,
3319 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3320 0},
3321
3322 /* 14bit offset from got entry to base address of GOT table. */
3323 {"gotpage_lo14", 0,
3324 0,
3325 0,
3326 0,
3327 0,
3328 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3329 0},
3330 };
3331
3332 /* Given the address of a pointer pointing to the textual name of a
3333 relocation as may appear in assembler source, attempt to find its
3334 details in reloc_table. The pointer will be updated to the character
3335 after the trailing colon. On failure, NULL will be returned;
3336 otherwise return the reloc_table_entry. */
3337
3338 static struct reloc_table_entry *
3339 find_reloc_table_entry (char **str)
3340 {
3341 unsigned int i;
3342 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3343 {
3344 int length = strlen (reloc_table[i].name);
3345
3346 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3347 && (*str)[length] == ':')
3348 {
3349 *str += (length + 1);
3350 return &reloc_table[i];
3351 }
3352 }
3353
3354 return NULL;
3355 }
3356
3357 /* Returns 0 if the relocation should never be forced,
3358 1 if the relocation must be forced, and -1 if either
3359 result is OK. */
3360
3361 static signed int
3362 aarch64_force_reloc (unsigned int type)
3363 {
3364 switch (type)
3365 {
3366 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3367 /* Perform these "immediate" internal relocations
3368 even if the symbol is extern or weak. */
3369 return 0;
3370
3371 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3372 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3373 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3374 /* Pseudo relocs that need to be fixed up according to
3375 ilp32_p. */
3376 return 1;
3377
3378 case BFD_RELOC_AARCH64_ADD_LO12:
3379 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3380 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3381 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3382 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3383 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3384 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3385 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3386 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3387 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3388 case BFD_RELOC_AARCH64_LDST128_LO12:
3389 case BFD_RELOC_AARCH64_LDST16_LO12:
3390 case BFD_RELOC_AARCH64_LDST32_LO12:
3391 case BFD_RELOC_AARCH64_LDST64_LO12:
3392 case BFD_RELOC_AARCH64_LDST8_LO12:
3393 case BFD_RELOC_AARCH64_LDST_LO12:
3394 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3395 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3396 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3397 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3398 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3399 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3400 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3401 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3402 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3403 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3404 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3405 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3406 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3407 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3408 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3409 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3410 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3411 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3412 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3413 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3414 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3415 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3416 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3417 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3418 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3419 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3420 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3421 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3422 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3423 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3424 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3425 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3426 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3427 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3428 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3429 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3430 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3431 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3432 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3433 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3434 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3435 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3436 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3437 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3438 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3439 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3440 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3441 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3442 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3443 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3444 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3445 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3446 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3447 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3448 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3449 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3450 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3451 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3452 /* Always leave these relocations for the linker. */
3453 return 1;
3454
3455 default:
3456 return -1;
3457 }
3458 }
3459
3460 int
3461 aarch64_force_relocation (struct fix *fixp)
3462 {
3463 int res = aarch64_force_reloc (fixp->fx_r_type);
3464
3465 if (res == -1)
3466 return generic_force_reloc (fixp);
3467 return res;
3468 }
3469
3470 /* Mode argument to parse_shift and parser_shifter_operand. */
3471 enum parse_shift_mode
3472 {
3473 SHIFTED_NONE, /* no shifter allowed */
3474 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3475 "#imm{,lsl #n}" */
3476 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3477 "#imm" */
3478 SHIFTED_LSL, /* bare "lsl #n" */
3479 SHIFTED_MUL, /* bare "mul #n" */
3480 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3481 SHIFTED_MUL_VL, /* "mul vl" */
3482 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3483 };
3484
3485 /* Parse a <shift> operator on an AArch64 data processing instruction.
3486 Return TRUE on success; otherwise return FALSE. */
3487 static bool
3488 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3489 {
3490 const struct aarch64_name_value_pair *shift_op;
3491 enum aarch64_modifier_kind kind;
3492 expressionS exp;
3493 int exp_has_prefix;
3494 char *s = *str;
3495 char *p = s;
3496
3497 for (p = *str; ISALPHA (*p); p++)
3498 ;
3499
3500 if (p == *str)
3501 {
3502 set_syntax_error (_("shift expression expected"));
3503 return false;
3504 }
3505
3506 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3507
3508 if (shift_op == NULL)
3509 {
3510 set_syntax_error (_("shift operator expected"));
3511 return false;
3512 }
3513
3514 kind = aarch64_get_operand_modifier (shift_op);
3515
3516 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3517 {
3518 set_syntax_error (_("invalid use of 'MSL'"));
3519 return false;
3520 }
3521
3522 if (kind == AARCH64_MOD_MUL
3523 && mode != SHIFTED_MUL
3524 && mode != SHIFTED_MUL_VL)
3525 {
3526 set_syntax_error (_("invalid use of 'MUL'"));
3527 return false;
3528 }
3529
3530 switch (mode)
3531 {
3532 case SHIFTED_LOGIC_IMM:
3533 if (aarch64_extend_operator_p (kind))
3534 {
3535 set_syntax_error (_("extending shift is not permitted"));
3536 return false;
3537 }
3538 break;
3539
3540 case SHIFTED_ARITH_IMM:
3541 if (kind == AARCH64_MOD_ROR)
3542 {
3543 set_syntax_error (_("'ROR' shift is not permitted"));
3544 return false;
3545 }
3546 break;
3547
3548 case SHIFTED_LSL:
3549 if (kind != AARCH64_MOD_LSL)
3550 {
3551 set_syntax_error (_("only 'LSL' shift is permitted"));
3552 return false;
3553 }
3554 break;
3555
3556 case SHIFTED_MUL:
3557 if (kind != AARCH64_MOD_MUL)
3558 {
3559 set_syntax_error (_("only 'MUL' is permitted"));
3560 return false;
3561 }
3562 break;
3563
3564 case SHIFTED_MUL_VL:
3565 /* "MUL VL" consists of two separate tokens. Require the first
3566 token to be "MUL" and look for a following "VL". */
3567 if (kind == AARCH64_MOD_MUL)
3568 {
3569 skip_whitespace (p);
3570 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3571 {
3572 p += 2;
3573 kind = AARCH64_MOD_MUL_VL;
3574 break;
3575 }
3576 }
3577 set_syntax_error (_("only 'MUL VL' is permitted"));
3578 return false;
3579
3580 case SHIFTED_REG_OFFSET:
3581 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3582 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3583 {
3584 set_fatal_syntax_error
3585 (_("invalid shift for the register offset addressing mode"));
3586 return false;
3587 }
3588 break;
3589
3590 case SHIFTED_LSL_MSL:
3591 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3592 {
3593 set_syntax_error (_("invalid shift operator"));
3594 return false;
3595 }
3596 break;
3597
3598 default:
3599 abort ();
3600 }
3601
3602 /* Whitespace can appear here if the next thing is a bare digit. */
3603 skip_whitespace (p);
3604
3605 /* Parse shift amount. */
3606 exp_has_prefix = 0;
3607 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3608 exp.X_op = O_absent;
3609 else
3610 {
3611 if (is_immediate_prefix (*p))
3612 {
3613 p++;
3614 exp_has_prefix = 1;
3615 }
3616 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3617 }
3618 if (kind == AARCH64_MOD_MUL_VL)
3619 /* For consistency, give MUL VL the same shift amount as an implicit
3620 MUL #1. */
3621 operand->shifter.amount = 1;
3622 else if (exp.X_op == O_absent)
3623 {
3624 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3625 {
3626 set_syntax_error (_("missing shift amount"));
3627 return false;
3628 }
3629 operand->shifter.amount = 0;
3630 }
3631 else if (exp.X_op != O_constant)
3632 {
3633 set_syntax_error (_("constant shift amount required"));
3634 return false;
3635 }
3636 /* For parsing purposes, MUL #n has no inherent range. The range
3637 depends on the operand and will be checked by operand-specific
3638 routines. */
3639 else if (kind != AARCH64_MOD_MUL
3640 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3641 {
3642 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3643 return false;
3644 }
3645 else
3646 {
3647 operand->shifter.amount = exp.X_add_number;
3648 operand->shifter.amount_present = 1;
3649 }
3650
3651 operand->shifter.operator_present = 1;
3652 operand->shifter.kind = kind;
3653
3654 *str = p;
3655 return true;
3656 }
3657
3658 /* Parse a <shifter_operand> for a data processing instruction:
3659
3660 #<immediate>
3661 #<immediate>, LSL #imm
3662
3663 Validation of immediate operands is deferred to md_apply_fix.
3664
3665 Return TRUE on success; otherwise return FALSE. */
3666
3667 static bool
3668 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3669 enum parse_shift_mode mode)
3670 {
3671 char *p;
3672
3673 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3674 return false;
3675
3676 p = *str;
3677
3678 /* Accept an immediate expression. */
3679 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3680 REJECT_ABSENT))
3681 return false;
3682
3683 /* Accept optional LSL for arithmetic immediate values. */
3684 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3685 if (! parse_shift (&p, operand, SHIFTED_LSL))
3686 return false;
3687
3688 /* Not accept any shifter for logical immediate values. */
3689 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3690 && parse_shift (&p, operand, mode))
3691 {
3692 set_syntax_error (_("unexpected shift operator"));
3693 return false;
3694 }
3695
3696 *str = p;
3697 return true;
3698 }
3699
3700 /* Parse a <shifter_operand> for a data processing instruction:
3701
3702 <Rm>
3703 <Rm>, <shift>
3704 #<immediate>
3705 #<immediate>, LSL #imm
3706
3707 where <shift> is handled by parse_shift above, and the last two
3708 cases are handled by the function above.
3709
3710 Validation of immediate operands is deferred to md_apply_fix.
3711
3712 Return TRUE on success; otherwise return FALSE. */
3713
3714 static bool
3715 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3716 enum parse_shift_mode mode)
3717 {
3718 const reg_entry *reg;
3719 aarch64_opnd_qualifier_t qualifier;
3720 enum aarch64_operand_class opd_class
3721 = aarch64_get_operand_class (operand->type);
3722
3723 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3724 if (reg)
3725 {
3726 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3727 {
3728 set_syntax_error (_("unexpected register in the immediate operand"));
3729 return false;
3730 }
3731
3732 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR))
3733 {
3734 set_expected_reg_error (REG_TYPE_R_ZR, reg, 0);
3735 return false;
3736 }
3737
3738 operand->reg.regno = reg->number;
3739 operand->qualifier = qualifier;
3740
3741 /* Accept optional shift operation on register. */
3742 if (! skip_past_comma (str))
3743 return true;
3744
3745 if (! parse_shift (str, operand, mode))
3746 return false;
3747
3748 return true;
3749 }
3750 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3751 {
3752 set_syntax_error
3753 (_("integer register expected in the extended/shifted operand "
3754 "register"));
3755 return false;
3756 }
3757
3758 /* We have a shifted immediate variable. */
3759 return parse_shifter_operand_imm (str, operand, mode);
3760 }
3761
3762 /* Return TRUE on success; return FALSE otherwise. */
3763
3764 static bool
3765 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3766 enum parse_shift_mode mode)
3767 {
3768 char *p = *str;
3769
3770 /* Determine if we have the sequence of characters #: or just :
3771 coming next. If we do, then we check for a :rello: relocation
3772 modifier. If we don't, punt the whole lot to
3773 parse_shifter_operand. */
3774
3775 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3776 {
3777 struct reloc_table_entry *entry;
3778
3779 if (p[0] == '#')
3780 p += 2;
3781 else
3782 p++;
3783 *str = p;
3784
3785 /* Try to parse a relocation. Anything else is an error. */
3786 if (!(entry = find_reloc_table_entry (str)))
3787 {
3788 set_syntax_error (_("unknown relocation modifier"));
3789 return false;
3790 }
3791
3792 if (entry->add_type == 0)
3793 {
3794 set_syntax_error
3795 (_("this relocation modifier is not allowed on this instruction"));
3796 return false;
3797 }
3798
3799 /* Save str before we decompose it. */
3800 p = *str;
3801
3802 /* Next, we parse the expression. */
3803 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3804 REJECT_ABSENT))
3805 return false;
3806
3807 /* Record the relocation type (use the ADD variant here). */
3808 inst.reloc.type = entry->add_type;
3809 inst.reloc.pc_rel = entry->pc_rel;
3810
3811 /* If str is empty, we've reached the end, stop here. */
3812 if (**str == '\0')
3813 return true;
3814
3815 /* Otherwise, we have a shifted reloc modifier, so rewind to
3816 recover the variable name and continue parsing for the shifter. */
3817 *str = p;
3818 return parse_shifter_operand_imm (str, operand, mode);
3819 }
3820
3821 return parse_shifter_operand (str, operand, mode);
3822 }
3823
3824 /* Parse all forms of an address expression. Information is written
3825 to *OPERAND and/or inst.reloc.
3826
3827 The A64 instruction set has the following addressing modes:
3828
3829 Offset
3830 [base] // in SIMD ld/st structure
3831 [base{,#0}] // in ld/st exclusive
3832 [base{,#imm}]
3833 [base,Xm{,LSL #imm}]
3834 [base,Xm,SXTX {#imm}]
3835 [base,Wm,(S|U)XTW {#imm}]
3836 Pre-indexed
3837 [base]! // in ldraa/ldrab exclusive
3838 [base,#imm]!
3839 Post-indexed
3840 [base],#imm
3841 [base],Xm // in SIMD ld/st structure
3842 PC-relative (literal)
3843 label
3844 SVE:
3845 [base,#imm,MUL VL]
3846 [base,Zm.D{,LSL #imm}]
3847 [base,Zm.S,(S|U)XTW {#imm}]
3848 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3849 [Zn.S,#imm]
3850 [Zn.D,#imm]
3851 [Zn.S{, Xm}]
3852 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3853 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3854 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3855
3856 (As a convenience, the notation "=immediate" is permitted in conjunction
3857 with the pc-relative literal load instructions to automatically place an
3858 immediate value or symbolic address in a nearby literal pool and generate
3859 a hidden label which references it.)
3860
3861 Upon a successful parsing, the address structure in *OPERAND will be
3862 filled in the following way:
3863
3864 .base_regno = <base>
3865 .offset.is_reg // 1 if the offset is a register
3866 .offset.imm = <imm>
3867 .offset.regno = <Rm>
3868
3869 For different addressing modes defined in the A64 ISA:
3870
3871 Offset
3872 .pcrel=0; .preind=1; .postind=0; .writeback=0
3873 Pre-indexed
3874 .pcrel=0; .preind=1; .postind=0; .writeback=1
3875 Post-indexed
3876 .pcrel=0; .preind=0; .postind=1; .writeback=1
3877 PC-relative (literal)
3878 .pcrel=1; .preind=1; .postind=0; .writeback=0
3879
3880 The shift/extension information, if any, will be stored in .shifter.
3881 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3882 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3883 corresponding register.
3884
3885 BASE_TYPE says which types of base register should be accepted and
3886 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3887 is the type of shifter that is allowed for immediate offsets,
3888 or SHIFTED_NONE if none.
3889
3890 In all other respects, it is the caller's responsibility to check
3891 for addressing modes not supported by the instruction, and to set
3892 inst.reloc.type. */
3893
3894 static bool
3895 parse_address_main (char **str, aarch64_opnd_info *operand,
3896 aarch64_opnd_qualifier_t *base_qualifier,
3897 aarch64_opnd_qualifier_t *offset_qualifier,
3898 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3899 enum parse_shift_mode imm_shift_mode)
3900 {
3901 char *p = *str;
3902 const reg_entry *reg;
3903 expressionS *exp = &inst.reloc.exp;
3904
3905 *base_qualifier = AARCH64_OPND_QLF_NIL;
3906 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3907 if (! skip_past_char (&p, '['))
3908 {
3909 /* =immediate or label. */
3910 operand->addr.pcrel = 1;
3911 operand->addr.preind = 1;
3912
3913 /* #:<reloc_op>:<symbol> */
3914 skip_past_char (&p, '#');
3915 if (skip_past_char (&p, ':'))
3916 {
3917 bfd_reloc_code_real_type ty;
3918 struct reloc_table_entry *entry;
3919
3920 /* Try to parse a relocation modifier. Anything else is
3921 an error. */
3922 entry = find_reloc_table_entry (&p);
3923 if (! entry)
3924 {
3925 set_syntax_error (_("unknown relocation modifier"));
3926 return false;
3927 }
3928
3929 switch (operand->type)
3930 {
3931 case AARCH64_OPND_ADDR_PCREL21:
3932 /* adr */
3933 ty = entry->adr_type;
3934 break;
3935
3936 default:
3937 ty = entry->ld_literal_type;
3938 break;
3939 }
3940
3941 if (ty == 0)
3942 {
3943 set_syntax_error
3944 (_("this relocation modifier is not allowed on this "
3945 "instruction"));
3946 return false;
3947 }
3948
3949 /* #:<reloc_op>: */
3950 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3951 {
3952 set_syntax_error (_("invalid relocation expression"));
3953 return false;
3954 }
3955 /* #:<reloc_op>:<expr> */
3956 /* Record the relocation type. */
3957 inst.reloc.type = ty;
3958 inst.reloc.pc_rel = entry->pc_rel;
3959 }
3960 else
3961 {
3962 if (skip_past_char (&p, '='))
3963 /* =immediate; need to generate the literal in the literal pool. */
3964 inst.gen_lit_pool = 1;
3965
3966 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3967 {
3968 set_syntax_error (_("invalid address"));
3969 return false;
3970 }
3971 }
3972
3973 *str = p;
3974 return true;
3975 }
3976
3977 /* [ */
3978
3979 bool alpha_base_p = ISALPHA (*p);
3980 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3981 if (!reg || !aarch64_check_reg_type (reg, base_type))
3982 {
3983 if (reg
3984 && aarch64_check_reg_type (reg, REG_TYPE_R_SP)
3985 && *base_qualifier == AARCH64_OPND_QLF_W)
3986 set_syntax_error (_("expected a 64-bit base register"));
3987 else if (alpha_base_p)
3988 set_syntax_error (_("invalid base register"));
3989 else
3990 set_syntax_error (_("expected a base register"));
3991 return false;
3992 }
3993 operand->addr.base_regno = reg->number;
3994
3995 /* [Xn */
3996 if (skip_past_comma (&p))
3997 {
3998 /* [Xn, */
3999 operand->addr.preind = 1;
4000
4001 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
4002 if (reg)
4003 {
4004 if (!aarch64_check_reg_type (reg, offset_type))
4005 {
4006 set_syntax_error (_("invalid offset register"));
4007 return false;
4008 }
4009
4010 /* [Xn,Rm */
4011 operand->addr.offset.regno = reg->number;
4012 operand->addr.offset.is_reg = 1;
4013 /* Shifted index. */
4014 if (skip_past_comma (&p))
4015 {
4016 /* [Xn,Rm, */
4017 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
4018 /* Use the diagnostics set in parse_shift, so not set new
4019 error message here. */
4020 return false;
4021 }
4022 /* We only accept:
4023 [base,Xm] # For vector plus scalar SVE2 indexing.
4024 [base,Xm{,LSL #imm}]
4025 [base,Xm,SXTX {#imm}]
4026 [base,Wm,(S|U)XTW {#imm}] */
4027 if (operand->shifter.kind == AARCH64_MOD_NONE
4028 || operand->shifter.kind == AARCH64_MOD_LSL
4029 || operand->shifter.kind == AARCH64_MOD_SXTX)
4030 {
4031 if (*offset_qualifier == AARCH64_OPND_QLF_W)
4032 {
4033 set_syntax_error (_("invalid use of 32-bit register offset"));
4034 return false;
4035 }
4036 if (aarch64_get_qualifier_esize (*base_qualifier)
4037 != aarch64_get_qualifier_esize (*offset_qualifier)
4038 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
4039 || *base_qualifier != AARCH64_OPND_QLF_S_S
4040 || *offset_qualifier != AARCH64_OPND_QLF_X))
4041 {
4042 set_syntax_error (_("offset has different size from base"));
4043 return false;
4044 }
4045 }
4046 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
4047 {
4048 set_syntax_error (_("invalid use of 64-bit register offset"));
4049 return false;
4050 }
4051 }
4052 else
4053 {
4054 /* [Xn,#:<reloc_op>:<symbol> */
4055 skip_past_char (&p, '#');
4056 if (skip_past_char (&p, ':'))
4057 {
4058 struct reloc_table_entry *entry;
4059
4060 /* Try to parse a relocation modifier. Anything else is
4061 an error. */
4062 if (!(entry = find_reloc_table_entry (&p)))
4063 {
4064 set_syntax_error (_("unknown relocation modifier"));
4065 return false;
4066 }
4067
4068 if (entry->ldst_type == 0)
4069 {
4070 set_syntax_error
4071 (_("this relocation modifier is not allowed on this "
4072 "instruction"));
4073 return false;
4074 }
4075
4076 /* [Xn,#:<reloc_op>: */
4077 /* We now have the group relocation table entry corresponding to
4078 the name in the assembler source. Next, we parse the
4079 expression. */
4080 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4081 {
4082 set_syntax_error (_("invalid relocation expression"));
4083 return false;
4084 }
4085
4086 /* [Xn,#:<reloc_op>:<expr> */
4087 /* Record the load/store relocation type. */
4088 inst.reloc.type = entry->ldst_type;
4089 inst.reloc.pc_rel = entry->pc_rel;
4090 }
4091 else
4092 {
4093 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4094 {
4095 set_syntax_error (_("invalid expression in the address"));
4096 return false;
4097 }
4098 /* [Xn,<expr> */
4099 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
4100 /* [Xn,<expr>,<shifter> */
4101 if (! parse_shift (&p, operand, imm_shift_mode))
4102 return false;
4103 }
4104 }
4105 }
4106
4107 if (! skip_past_char (&p, ']'))
4108 {
4109 set_syntax_error (_("']' expected"));
4110 return false;
4111 }
4112
4113 if (skip_past_char (&p, '!'))
4114 {
4115 if (operand->addr.preind && operand->addr.offset.is_reg)
4116 {
4117 set_syntax_error (_("register offset not allowed in pre-indexed "
4118 "addressing mode"));
4119 return false;
4120 }
4121 /* [Xn]! */
4122 operand->addr.writeback = 1;
4123 }
4124 else if (skip_past_comma (&p))
4125 {
4126 /* [Xn], */
4127 operand->addr.postind = 1;
4128 operand->addr.writeback = 1;
4129
4130 if (operand->addr.preind)
4131 {
4132 set_syntax_error (_("cannot combine pre- and post-indexing"));
4133 return false;
4134 }
4135
4136 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
4137 if (reg)
4138 {
4139 /* [Xn],Xm */
4140 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
4141 {
4142 set_syntax_error (_("invalid offset register"));
4143 return false;
4144 }
4145
4146 operand->addr.offset.regno = reg->number;
4147 operand->addr.offset.is_reg = 1;
4148 }
4149 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4150 {
4151 /* [Xn],#expr */
4152 set_syntax_error (_("invalid expression in the address"));
4153 return false;
4154 }
4155 }
4156
4157 /* If at this point neither .preind nor .postind is set, we have a
4158 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4159 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4160 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4161 [Zn.<T>, xzr]. */
4162 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4163 {
4164 if (operand->addr.writeback)
4165 {
4166 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4167 {
4168 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4169 operand->addr.offset.is_reg = 0;
4170 operand->addr.offset.imm = 0;
4171 operand->addr.preind = 1;
4172 }
4173 else
4174 {
4175 /* Reject [Rn]! */
4176 set_syntax_error (_("missing offset in the pre-indexed address"));
4177 return false;
4178 }
4179 }
4180 else
4181 {
4182 operand->addr.preind = 1;
4183 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4184 {
4185 operand->addr.offset.is_reg = 1;
4186 operand->addr.offset.regno = REG_ZR;
4187 *offset_qualifier = AARCH64_OPND_QLF_X;
4188 }
4189 else
4190 {
4191 inst.reloc.exp.X_op = O_constant;
4192 inst.reloc.exp.X_add_number = 0;
4193 }
4194 }
4195 }
4196
4197 *str = p;
4198 return true;
4199 }
4200
4201 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4202 on success. */
4203 static bool
4204 parse_address (char **str, aarch64_opnd_info *operand)
4205 {
4206 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4207 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4208 REG_TYPE_R64_SP, REG_TYPE_R_ZR, SHIFTED_NONE);
4209 }
4210
4211 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4212 The arguments have the same meaning as for parse_address_main.
4213 Return TRUE on success. */
4214 static bool
4215 parse_sve_address (char **str, aarch64_opnd_info *operand,
4216 aarch64_opnd_qualifier_t *base_qualifier,
4217 aarch64_opnd_qualifier_t *offset_qualifier)
4218 {
4219 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4220 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4221 SHIFTED_MUL_VL);
4222 }
4223
4224 /* Parse a register X0-X30. The register must be 64-bit and register 31
4225 is unallocated. */
4226 static bool
4227 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4228 {
4229 const reg_entry *reg = parse_reg (str);
4230 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4231 {
4232 set_expected_reg_error (REG_TYPE_R_64, reg, 0);
4233 return false;
4234 }
4235 operand->reg.regno = reg->number;
4236 operand->qualifier = AARCH64_OPND_QLF_X;
4237 return true;
4238 }
4239
4240 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4241 Return TRUE on success; otherwise return FALSE. */
4242 static bool
4243 parse_half (char **str, int *internal_fixup_p)
4244 {
4245 char *p = *str;
4246
4247 skip_past_char (&p, '#');
4248
4249 gas_assert (internal_fixup_p);
4250 *internal_fixup_p = 0;
4251
4252 if (*p == ':')
4253 {
4254 struct reloc_table_entry *entry;
4255
4256 /* Try to parse a relocation. Anything else is an error. */
4257 ++p;
4258
4259 if (!(entry = find_reloc_table_entry (&p)))
4260 {
4261 set_syntax_error (_("unknown relocation modifier"));
4262 return false;
4263 }
4264
4265 if (entry->movw_type == 0)
4266 {
4267 set_syntax_error
4268 (_("this relocation modifier is not allowed on this instruction"));
4269 return false;
4270 }
4271
4272 inst.reloc.type = entry->movw_type;
4273 }
4274 else
4275 *internal_fixup_p = 1;
4276
4277 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4278 return false;
4279
4280 *str = p;
4281 return true;
4282 }
4283
4284 /* Parse an operand for an ADRP instruction:
4285 ADRP <Xd>, <label>
4286 Return TRUE on success; otherwise return FALSE. */
4287
4288 static bool
4289 parse_adrp (char **str)
4290 {
4291 char *p;
4292
4293 p = *str;
4294 if (*p == ':')
4295 {
4296 struct reloc_table_entry *entry;
4297
4298 /* Try to parse a relocation. Anything else is an error. */
4299 ++p;
4300 if (!(entry = find_reloc_table_entry (&p)))
4301 {
4302 set_syntax_error (_("unknown relocation modifier"));
4303 return false;
4304 }
4305
4306 if (entry->adrp_type == 0)
4307 {
4308 set_syntax_error
4309 (_("this relocation modifier is not allowed on this instruction"));
4310 return false;
4311 }
4312
4313 inst.reloc.type = entry->adrp_type;
4314 }
4315 else
4316 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4317
4318 inst.reloc.pc_rel = 1;
4319 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4320 return false;
4321 *str = p;
4322 return true;
4323 }
4324
4325 /* Miscellaneous. */
4326
4327 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4328 of SIZE tokens in which index I gives the token for field value I,
4329 or is null if field value I is invalid. If the symbolic operand
4330 can also be given as a 0-based integer, REG_TYPE says which register
4331 names should be treated as registers rather than as symbolic immediates
4332 while parsing that integer. REG_TYPE is REG_TYPE_MAX otherwise.
4333
4334 Return true on success, moving *STR past the operand and storing the
4335 field value in *VAL. */
4336
4337 static int
4338 parse_enum_string (char **str, int64_t *val, const char *const *array,
4339 size_t size, aarch64_reg_type reg_type)
4340 {
4341 expressionS exp;
4342 char *p, *q;
4343 size_t i;
4344
4345 /* Match C-like tokens. */
4346 p = q = *str;
4347 while (ISALNUM (*q))
4348 q++;
4349
4350 for (i = 0; i < size; ++i)
4351 if (array[i]
4352 && strncasecmp (array[i], p, q - p) == 0
4353 && array[i][q - p] == 0)
4354 {
4355 *val = i;
4356 *str = q;
4357 return true;
4358 }
4359
4360 if (reg_type == REG_TYPE_MAX)
4361 return false;
4362
4363 if (!parse_immediate_expression (&p, &exp, reg_type))
4364 return false;
4365
4366 if (exp.X_op == O_constant
4367 && (uint64_t) exp.X_add_number < size)
4368 {
4369 *val = exp.X_add_number;
4370 *str = p;
4371 return true;
4372 }
4373
4374 /* Use the default error for this operand. */
4375 return false;
4376 }
4377
4378 /* Parse an option for a preload instruction. Returns the encoding for the
4379 option, or PARSE_FAIL. */
4380
4381 static int
4382 parse_pldop (char **str)
4383 {
4384 char *p, *q;
4385 const struct aarch64_name_value_pair *o;
4386
4387 p = q = *str;
4388 while (ISALNUM (*q))
4389 q++;
4390
4391 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4392 if (!o)
4393 return PARSE_FAIL;
4394
4395 *str = q;
4396 return o->value;
4397 }
4398
4399 /* Parse an option for a barrier instruction. Returns the encoding for the
4400 option, or PARSE_FAIL. */
4401
4402 static int
4403 parse_barrier (char **str)
4404 {
4405 char *p, *q;
4406 const struct aarch64_name_value_pair *o;
4407
4408 p = q = *str;
4409 while (ISALPHA (*q))
4410 q++;
4411
4412 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4413 if (!o)
4414 return PARSE_FAIL;
4415
4416 *str = q;
4417 return o->value;
4418 }
4419
4420 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4421 return 0 if successful. Otherwise return PARSE_FAIL. */
4422
4423 static int
4424 parse_barrier_psb (char **str,
4425 const struct aarch64_name_value_pair ** hint_opt)
4426 {
4427 char *p, *q;
4428 const struct aarch64_name_value_pair *o;
4429
4430 p = q = *str;
4431 while (ISALPHA (*q))
4432 q++;
4433
4434 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4435 if (!o)
4436 {
4437 set_fatal_syntax_error
4438 ( _("unknown or missing option to PSB/TSB"));
4439 return PARSE_FAIL;
4440 }
4441
4442 if (o->value != 0x11)
4443 {
4444 /* PSB only accepts option name 'CSYNC'. */
4445 set_syntax_error
4446 (_("the specified option is not accepted for PSB/TSB"));
4447 return PARSE_FAIL;
4448 }
4449
4450 *str = q;
4451 *hint_opt = o;
4452 return 0;
4453 }
4454
4455 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4456 return 0 if successful. Otherwise return PARSE_FAIL. */
4457
4458 static int
4459 parse_bti_operand (char **str,
4460 const struct aarch64_name_value_pair ** hint_opt)
4461 {
4462 char *p, *q;
4463 const struct aarch64_name_value_pair *o;
4464
4465 p = q = *str;
4466 while (ISALPHA (*q))
4467 q++;
4468
4469 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4470 if (!o)
4471 {
4472 set_fatal_syntax_error
4473 ( _("unknown option to BTI"));
4474 return PARSE_FAIL;
4475 }
4476
4477 switch (o->value)
4478 {
4479 /* Valid BTI operands. */
4480 case HINT_OPD_C:
4481 case HINT_OPD_J:
4482 case HINT_OPD_JC:
4483 break;
4484
4485 default:
4486 set_syntax_error
4487 (_("unknown option to BTI"));
4488 return PARSE_FAIL;
4489 }
4490
4491 *str = q;
4492 *hint_opt = o;
4493 return 0;
4494 }
4495
4496 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4497 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4498 on failure. Format:
4499
4500 REG_TYPE.QUALIFIER
4501
4502 Side effect: Update STR with current parse position of success.
4503
4504 FLAGS is as for parse_typed_reg. */
4505
4506 static const reg_entry *
4507 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4508 aarch64_opnd_qualifier_t *qualifier, unsigned int flags)
4509 {
4510 struct vector_type_el vectype;
4511 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4512 PTR_FULL_REG | flags);
4513 if (!reg)
4514 return NULL;
4515
4516 if (vectype.type == NT_invtype)
4517 *qualifier = AARCH64_OPND_QLF_NIL;
4518 else
4519 {
4520 *qualifier = vectype_to_qualifier (&vectype);
4521 if (*qualifier == AARCH64_OPND_QLF_NIL)
4522 return NULL;
4523 }
4524
4525 return reg;
4526 }
4527
4528 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4529
4530 #<imm>
4531 <imm>
4532
4533 Function return TRUE if immediate was found, or FALSE.
4534 */
4535 static bool
4536 parse_sme_immediate (char **str, int64_t *imm)
4537 {
4538 int64_t val;
4539 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4540 return false;
4541
4542 *imm = val;
4543 return true;
4544 }
4545
4546 /* Parse index with selection register and immediate offset:
4547
4548 [<Wv>, <imm>]
4549 [<Wv>, #<imm>]
4550
4551 Return true on success, populating OPND with the parsed index. */
4552
4553 static bool
4554 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4555 {
4556 const reg_entry *reg;
4557
4558 if (!skip_past_char (str, '['))
4559 {
4560 set_syntax_error (_("expected '['"));
4561 return false;
4562 }
4563
4564 /* The selection register, encoded in the 2-bit Rv field. */
4565 reg = parse_reg (str);
4566 if (reg == NULL || reg->type != REG_TYPE_R_32)
4567 {
4568 set_syntax_error (_("expected a 32-bit selection register"));
4569 return false;
4570 }
4571 opnd->index.regno = reg->number;
4572
4573 if (!skip_past_char (str, ','))
4574 {
4575 set_syntax_error (_("missing immediate offset"));
4576 return false;
4577 }
4578
4579 if (!parse_sme_immediate (str, &opnd->index.imm))
4580 {
4581 set_syntax_error (_("expected a constant immediate offset"));
4582 return false;
4583 }
4584
4585 if (skip_past_char (str, ':'))
4586 {
4587 int64_t end;
4588 if (!parse_sme_immediate (str, &end))
4589 {
4590 set_syntax_error (_("expected a constant immediate offset"));
4591 return false;
4592 }
4593 if (end < opnd->index.imm)
4594 {
4595 set_syntax_error (_("the last offset is less than the"
4596 " first offset"));
4597 return false;
4598 }
4599 if (end == opnd->index.imm)
4600 {
4601 set_syntax_error (_("the last offset is equal to the"
4602 " first offset"));
4603 return false;
4604 }
4605 opnd->index.countm1 = (uint64_t) end - opnd->index.imm;
4606 }
4607
4608 opnd->group_size = 0;
4609 if (skip_past_char (str, ','))
4610 {
4611 if (strncasecmp (*str, "vgx2", 4) == 0 && !ISALPHA ((*str)[4]))
4612 {
4613 *str += 4;
4614 opnd->group_size = 2;
4615 }
4616 else if (strncasecmp (*str, "vgx4", 4) == 0 && !ISALPHA ((*str)[4]))
4617 {
4618 *str += 4;
4619 opnd->group_size = 4;
4620 }
4621 else
4622 {
4623 set_syntax_error (_("invalid vector group size"));
4624 return false;
4625 }
4626 }
4627
4628 if (!skip_past_char (str, ']'))
4629 {
4630 set_syntax_error (_("expected ']'"));
4631 return false;
4632 }
4633
4634 return true;
4635 }
4636
4637 /* Parse a register of type REG_TYPE that might have an element type
4638 qualifier and that is indexed by two values: a 32-bit register,
4639 followed by an immediate. The ranges of the register and the
4640 immediate vary by opcode and are checked in libopcodes.
4641
4642 Return true on success, populating OPND with information about
4643 the operand and setting QUALIFIER to the register qualifier.
4644
4645 Field format examples:
4646
4647 <Pm>.<T>[<Wv>< #<imm>]
4648 ZA[<Wv>, #<imm>]
4649 <ZAn><HV>.<T>[<Wv>, #<imm>]
4650
4651 FLAGS is as for parse_typed_reg. */
4652
4653 static bool
4654 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4655 struct aarch64_indexed_za *opnd,
4656 aarch64_opnd_qualifier_t *qualifier,
4657 unsigned int flags)
4658 {
4659 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier, flags);
4660 if (!reg)
4661 return false;
4662
4663 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4664 opnd->regno = reg->number;
4665
4666 return parse_sme_za_index (str, opnd);
4667 }
4668
4669 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4670 operand. */
4671
4672 static bool
4673 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4674 struct aarch64_indexed_za *opnd,
4675 aarch64_opnd_qualifier_t *qualifier)
4676 {
4677 if (!skip_past_char (str, '{'))
4678 {
4679 set_expected_reglist_error (REG_TYPE_ZATHV, parse_reg (str));
4680 return false;
4681 }
4682
4683 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier,
4684 PTR_IN_REGLIST))
4685 return false;
4686
4687 if (!skip_past_char (str, '}'))
4688 {
4689 set_syntax_error (_("expected '}'"));
4690 return false;
4691 }
4692
4693 return true;
4694 }
4695
4696 /* Parse list of up to eight 64-bit element tile names separated by commas in
4697 SME's ZERO instruction:
4698
4699 ZERO { <mask> }
4700
4701 Function returns <mask>:
4702
4703 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4704 */
4705 static int
4706 parse_sme_zero_mask(char **str)
4707 {
4708 char *q;
4709 int mask;
4710 aarch64_opnd_qualifier_t qualifier;
4711 unsigned int ptr_flags = PTR_IN_REGLIST;
4712
4713 mask = 0x00;
4714 q = *str;
4715 do
4716 {
4717 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4718 &qualifier, ptr_flags);
4719 if (!reg)
4720 return PARSE_FAIL;
4721
4722 if (reg->type == REG_TYPE_ZA)
4723 {
4724 if (qualifier != AARCH64_OPND_QLF_NIL)
4725 {
4726 set_syntax_error ("ZA should not have a size suffix");
4727 return PARSE_FAIL;
4728 }
4729 /* { ZA } is assembled as all-ones immediate. */
4730 mask = 0xff;
4731 }
4732 else
4733 {
4734 int regno = reg->number;
4735 if (qualifier == AARCH64_OPND_QLF_S_B)
4736 {
4737 /* { ZA0.B } is assembled as all-ones immediate. */
4738 mask = 0xff;
4739 }
4740 else if (qualifier == AARCH64_OPND_QLF_S_H)
4741 mask |= 0x55 << regno;
4742 else if (qualifier == AARCH64_OPND_QLF_S_S)
4743 mask |= 0x11 << regno;
4744 else if (qualifier == AARCH64_OPND_QLF_S_D)
4745 mask |= 0x01 << regno;
4746 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4747 {
4748 set_syntax_error (_("ZA tile masks do not operate at .Q"
4749 " granularity"));
4750 return PARSE_FAIL;
4751 }
4752 else if (qualifier == AARCH64_OPND_QLF_NIL)
4753 {
4754 set_syntax_error (_("missing ZA tile size"));
4755 return PARSE_FAIL;
4756 }
4757 else
4758 {
4759 set_syntax_error (_("invalid ZA tile"));
4760 return PARSE_FAIL;
4761 }
4762 }
4763 ptr_flags |= PTR_GOOD_MATCH;
4764 }
4765 while (skip_past_char (&q, ','));
4766
4767 *str = q;
4768 return mask;
4769 }
4770
4771 /* Wraps in curly braces <mask> operand ZERO instruction:
4772
4773 ZERO { <mask> }
4774
4775 Function returns value of <mask> bit-field.
4776 */
4777 static int
4778 parse_sme_list_of_64bit_tiles (char **str)
4779 {
4780 int regno;
4781
4782 if (!skip_past_char (str, '{'))
4783 {
4784 set_syntax_error (_("expected '{'"));
4785 return PARSE_FAIL;
4786 }
4787
4788 /* Empty <mask> list is an all-zeros immediate. */
4789 if (!skip_past_char (str, '}'))
4790 {
4791 regno = parse_sme_zero_mask (str);
4792 if (regno == PARSE_FAIL)
4793 return PARSE_FAIL;
4794
4795 if (!skip_past_char (str, '}'))
4796 {
4797 set_syntax_error (_("expected '}'"));
4798 return PARSE_FAIL;
4799 }
4800 }
4801 else
4802 regno = 0x00;
4803
4804 return regno;
4805 }
4806
4807 /* Parse streaming mode operand for SMSTART and SMSTOP.
4808
4809 {SM | ZA}
4810
4811 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4812 */
4813 static int
4814 parse_sme_sm_za (char **str)
4815 {
4816 char *p, *q;
4817
4818 p = q = *str;
4819 while (ISALPHA (*q))
4820 q++;
4821
4822 if ((q - p != 2)
4823 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4824 {
4825 set_syntax_error (_("expected SM or ZA operand"));
4826 return PARSE_FAIL;
4827 }
4828
4829 *str = q;
4830 return TOLOWER (p[0]);
4831 }
4832
4833 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4834 Returns the encoding for the option, or PARSE_FAIL.
4835
4836 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4837 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4838
4839 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4840 field, otherwise as a system register.
4841 */
4842
4843 static int
4844 parse_sys_reg (char **str, htab_t sys_regs,
4845 int imple_defined_p, int pstatefield_p,
4846 uint32_t* flags)
4847 {
4848 char *p, *q;
4849 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4850 const aarch64_sys_reg *o;
4851 int value;
4852
4853 p = buf;
4854 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4855 if (p < buf + (sizeof (buf) - 1))
4856 *p++ = TOLOWER (*q);
4857 *p = '\0';
4858
4859 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4860 valid system register. This is enforced by construction of the hash
4861 table. */
4862 if (p - buf != q - *str)
4863 return PARSE_FAIL;
4864
4865 o = str_hash_find (sys_regs, buf);
4866 if (!o)
4867 {
4868 if (!imple_defined_p)
4869 return PARSE_FAIL;
4870 else
4871 {
4872 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4873 unsigned int op0, op1, cn, cm, op2;
4874
4875 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4876 != 5)
4877 return PARSE_FAIL;
4878 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4879 return PARSE_FAIL;
4880 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4881 if (flags)
4882 *flags = 0;
4883 }
4884 }
4885 else
4886 {
4887 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4888 as_bad (_("selected processor does not support PSTATE field "
4889 "name '%s'"), buf);
4890 if (!pstatefield_p
4891 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4892 o->value, o->flags,
4893 &o->features))
4894 as_bad (_("selected processor does not support system register "
4895 "name '%s'"), buf);
4896 if (aarch64_sys_reg_deprecated_p (o->flags))
4897 as_warn (_("system register name '%s' is deprecated and may be "
4898 "removed in a future release"), buf);
4899 value = o->value;
4900 if (flags)
4901 *flags = o->flags;
4902 }
4903
4904 *str = q;
4905 return value;
4906 }
4907
4908 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4909 for the option, or NULL. */
4910
4911 static const aarch64_sys_ins_reg *
4912 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4913 {
4914 char *p, *q;
4915 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4916 const aarch64_sys_ins_reg *o;
4917
4918 p = buf;
4919 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4920 if (p < buf + (sizeof (buf) - 1))
4921 *p++ = TOLOWER (*q);
4922 *p = '\0';
4923
4924 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4925 valid system register. This is enforced by construction of the hash
4926 table. */
4927 if (p - buf != q - *str)
4928 return NULL;
4929
4930 o = str_hash_find (sys_ins_regs, buf);
4931 if (!o)
4932 return NULL;
4933
4934 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4935 o->name, o->value, o->flags, 0))
4936 as_bad (_("selected processor does not support system register "
4937 "name '%s'"), buf);
4938 if (aarch64_sys_reg_deprecated_p (o->flags))
4939 as_warn (_("system register name '%s' is deprecated and may be "
4940 "removed in a future release"), buf);
4941
4942 *str = q;
4943 return o;
4944 }
4945 \f
4946 #define po_char_or_fail(chr) do { \
4947 if (! skip_past_char (&str, chr)) \
4948 goto failure; \
4949 } while (0)
4950
4951 #define po_reg_or_fail(regtype) do { \
4952 reg = aarch64_reg_parse (&str, regtype, NULL); \
4953 if (!reg) \
4954 goto failure; \
4955 } while (0)
4956
4957 #define po_int_fp_reg_or_fail(reg_type) do { \
4958 reg = parse_reg (&str); \
4959 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4960 { \
4961 set_expected_reg_error (reg_type, reg, 0); \
4962 goto failure; \
4963 } \
4964 info->reg.regno = reg->number; \
4965 info->qualifier = inherent_reg_qualifier (reg); \
4966 } while (0)
4967
4968 #define po_imm_nc_or_fail() do { \
4969 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4970 goto failure; \
4971 } while (0)
4972
4973 #define po_imm_or_fail(min, max) do { \
4974 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4975 goto failure; \
4976 if (val < min || val > max) \
4977 { \
4978 set_fatal_syntax_error (_("immediate value out of range "\
4979 #min " to "#max)); \
4980 goto failure; \
4981 } \
4982 } while (0)
4983
4984 #define po_enum_or_fail(array) do { \
4985 if (!parse_enum_string (&str, &val, array, \
4986 ARRAY_SIZE (array), imm_reg_type)) \
4987 goto failure; \
4988 } while (0)
4989
4990 #define po_strict_enum_or_fail(array) do { \
4991 if (!parse_enum_string (&str, &val, array, \
4992 ARRAY_SIZE (array), REG_TYPE_MAX)) \
4993 goto failure; \
4994 } while (0)
4995
4996 #define po_misc_or_fail(expr) do { \
4997 if (!expr) \
4998 goto failure; \
4999 } while (0)
5000 \f
5001 /* A primitive log calculator. */
5002
5003 static inline unsigned int
5004 get_log2 (unsigned int n)
5005 {
5006 unsigned int count = 0;
5007 while (n > 1)
5008 {
5009 n >>= 1;
5010 count += 1;
5011 }
5012 return count;
5013 }
5014
5015 /* encode the 12-bit imm field of Add/sub immediate */
5016 static inline uint32_t
5017 encode_addsub_imm (uint32_t imm)
5018 {
5019 return imm << 10;
5020 }
5021
5022 /* encode the shift amount field of Add/sub immediate */
5023 static inline uint32_t
5024 encode_addsub_imm_shift_amount (uint32_t cnt)
5025 {
5026 return cnt << 22;
5027 }
5028
5029
5030 /* encode the imm field of Adr instruction */
5031 static inline uint32_t
5032 encode_adr_imm (uint32_t imm)
5033 {
5034 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
5035 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
5036 }
5037
5038 /* encode the immediate field of Move wide immediate */
5039 static inline uint32_t
5040 encode_movw_imm (uint32_t imm)
5041 {
5042 return imm << 5;
5043 }
5044
5045 /* encode the 26-bit offset of unconditional branch */
5046 static inline uint32_t
5047 encode_branch_ofs_26 (uint32_t ofs)
5048 {
5049 return ofs & ((1 << 26) - 1);
5050 }
5051
5052 /* encode the 19-bit offset of conditional branch and compare & branch */
5053 static inline uint32_t
5054 encode_cond_branch_ofs_19 (uint32_t ofs)
5055 {
5056 return (ofs & ((1 << 19) - 1)) << 5;
5057 }
5058
5059 /* encode the 19-bit offset of ld literal */
5060 static inline uint32_t
5061 encode_ld_lit_ofs_19 (uint32_t ofs)
5062 {
5063 return (ofs & ((1 << 19) - 1)) << 5;
5064 }
5065
5066 /* Encode the 14-bit offset of test & branch. */
5067 static inline uint32_t
5068 encode_tst_branch_ofs_14 (uint32_t ofs)
5069 {
5070 return (ofs & ((1 << 14) - 1)) << 5;
5071 }
5072
5073 /* Encode the 16-bit imm field of svc/hvc/smc. */
5074 static inline uint32_t
5075 encode_svc_imm (uint32_t imm)
5076 {
5077 return imm << 5;
5078 }
5079
5080 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
5081 static inline uint32_t
5082 reencode_addsub_switch_add_sub (uint32_t opcode)
5083 {
5084 return opcode ^ (1 << 30);
5085 }
5086
5087 static inline uint32_t
5088 reencode_movzn_to_movz (uint32_t opcode)
5089 {
5090 return opcode | (1 << 30);
5091 }
5092
5093 static inline uint32_t
5094 reencode_movzn_to_movn (uint32_t opcode)
5095 {
5096 return opcode & ~(1 << 30);
5097 }
5098
5099 /* Overall per-instruction processing. */
5100
5101 /* We need to be able to fix up arbitrary expressions in some statements.
5102 This is so that we can handle symbols that are an arbitrary distance from
5103 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5104 which returns part of an address in a form which will be valid for
5105 a data instruction. We do this by pushing the expression into a symbol
5106 in the expr_section, and creating a fix for that. */
5107
5108 static fixS *
5109 fix_new_aarch64 (fragS * frag,
5110 int where,
5111 short int size,
5112 expressionS * exp,
5113 int pc_rel,
5114 int reloc)
5115 {
5116 fixS *new_fix;
5117
5118 switch (exp->X_op)
5119 {
5120 case O_constant:
5121 case O_symbol:
5122 case O_add:
5123 case O_subtract:
5124 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5125 break;
5126
5127 default:
5128 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5129 pc_rel, reloc);
5130 break;
5131 }
5132 return new_fix;
5133 }
5134 \f
5135 /* Diagnostics on operands errors. */
5136
5137 /* By default, output verbose error message.
5138 Disable the verbose error message by -mno-verbose-error. */
5139 static int verbose_error_p = 1;
5140
5141 #ifdef DEBUG_AARCH64
5142 /* N.B. this is only for the purpose of debugging. */
5143 const char* operand_mismatch_kind_names[] =
5144 {
5145 "AARCH64_OPDE_NIL",
5146 "AARCH64_OPDE_RECOVERABLE",
5147 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5148 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5149 "AARCH64_OPDE_SYNTAX_ERROR",
5150 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5151 "AARCH64_OPDE_INVALID_VARIANT",
5152 "AARCH64_OPDE_INVALID_VG_SIZE",
5153 "AARCH64_OPDE_REG_LIST_LENGTH",
5154 "AARCH64_OPDE_REG_LIST_STRIDE",
5155 "AARCH64_OPDE_UNTIED_IMMS",
5156 "AARCH64_OPDE_UNTIED_OPERAND",
5157 "AARCH64_OPDE_OUT_OF_RANGE",
5158 "AARCH64_OPDE_UNALIGNED",
5159 "AARCH64_OPDE_OTHER_ERROR",
5160 "AARCH64_OPDE_INVALID_REGNO",
5161 };
5162 #endif /* DEBUG_AARCH64 */
5163
5164 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5165
5166 When multiple errors of different kinds are found in the same assembly
5167 line, only the error of the highest severity will be picked up for
5168 issuing the diagnostics. */
5169
5170 static inline bool
5171 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5172 enum aarch64_operand_error_kind rhs)
5173 {
5174 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5175 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5176 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5177 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5178 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5179 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5180 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5181 gas_assert (AARCH64_OPDE_INVALID_VG_SIZE > AARCH64_OPDE_INVALID_VARIANT);
5182 gas_assert (AARCH64_OPDE_REG_LIST_LENGTH > AARCH64_OPDE_INVALID_VG_SIZE);
5183 gas_assert (AARCH64_OPDE_REG_LIST_STRIDE > AARCH64_OPDE_REG_LIST_LENGTH);
5184 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_REG_LIST_STRIDE);
5185 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5186 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST_STRIDE);
5187 gas_assert (AARCH64_OPDE_INVALID_REGNO > AARCH64_OPDE_OTHER_ERROR);
5188 return lhs > rhs;
5189 }
5190
5191 /* Helper routine to get the mnemonic name from the assembly instruction
5192 line; should only be called for the diagnosis purpose, as there is
5193 string copy operation involved, which may affect the runtime
5194 performance if used in elsewhere. */
5195
5196 static const char*
5197 get_mnemonic_name (const char *str)
5198 {
5199 static char mnemonic[32];
5200 char *ptr;
5201
5202 /* Get the first 15 bytes and assume that the full name is included. */
5203 strncpy (mnemonic, str, 31);
5204 mnemonic[31] = '\0';
5205
5206 /* Scan up to the end of the mnemonic, which must end in white space,
5207 '.', or end of string. */
5208 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5209 ;
5210
5211 *ptr = '\0';
5212
5213 /* Append '...' to the truncated long name. */
5214 if (ptr - mnemonic == 31)
5215 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5216
5217 return mnemonic;
5218 }
5219
5220 static void
5221 reset_aarch64_instruction (aarch64_instruction *instruction)
5222 {
5223 memset (instruction, '\0', sizeof (aarch64_instruction));
5224 instruction->reloc.type = BFD_RELOC_UNUSED;
5225 }
5226
5227 /* Data structures storing one user error in the assembly code related to
5228 operands. */
5229
5230 struct operand_error_record
5231 {
5232 const aarch64_opcode *opcode;
5233 aarch64_operand_error detail;
5234 struct operand_error_record *next;
5235 };
5236
5237 typedef struct operand_error_record operand_error_record;
5238
5239 struct operand_errors
5240 {
5241 operand_error_record *head;
5242 operand_error_record *tail;
5243 };
5244
5245 typedef struct operand_errors operand_errors;
5246
5247 /* Top-level data structure reporting user errors for the current line of
5248 the assembly code.
5249 The way md_assemble works is that all opcodes sharing the same mnemonic
5250 name are iterated to find a match to the assembly line. In this data
5251 structure, each of the such opcodes will have one operand_error_record
5252 allocated and inserted. In other words, excessive errors related with
5253 a single opcode are disregarded. */
5254 operand_errors operand_error_report;
5255
5256 /* Free record nodes. */
5257 static operand_error_record *free_opnd_error_record_nodes = NULL;
5258
5259 /* Initialize the data structure that stores the operand mismatch
5260 information on assembling one line of the assembly code. */
5261 static void
5262 init_operand_error_report (void)
5263 {
5264 if (operand_error_report.head != NULL)
5265 {
5266 gas_assert (operand_error_report.tail != NULL);
5267 operand_error_report.tail->next = free_opnd_error_record_nodes;
5268 free_opnd_error_record_nodes = operand_error_report.head;
5269 operand_error_report.head = NULL;
5270 operand_error_report.tail = NULL;
5271 return;
5272 }
5273 gas_assert (operand_error_report.tail == NULL);
5274 }
5275
5276 /* Return TRUE if some operand error has been recorded during the
5277 parsing of the current assembly line using the opcode *OPCODE;
5278 otherwise return FALSE. */
5279 static inline bool
5280 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5281 {
5282 operand_error_record *record = operand_error_report.head;
5283 return record && record->opcode == opcode;
5284 }
5285
5286 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5287 OPCODE field is initialized with OPCODE.
5288 N.B. only one record for each opcode, i.e. the maximum of one error is
5289 recorded for each instruction template. */
5290
5291 static void
5292 add_operand_error_record (const operand_error_record* new_record)
5293 {
5294 const aarch64_opcode *opcode = new_record->opcode;
5295 operand_error_record* record = operand_error_report.head;
5296
5297 /* The record may have been created for this opcode. If not, we need
5298 to prepare one. */
5299 if (! opcode_has_operand_error_p (opcode))
5300 {
5301 /* Get one empty record. */
5302 if (free_opnd_error_record_nodes == NULL)
5303 {
5304 record = XNEW (operand_error_record);
5305 }
5306 else
5307 {
5308 record = free_opnd_error_record_nodes;
5309 free_opnd_error_record_nodes = record->next;
5310 }
5311 record->opcode = opcode;
5312 /* Insert at the head. */
5313 record->next = operand_error_report.head;
5314 operand_error_report.head = record;
5315 if (operand_error_report.tail == NULL)
5316 operand_error_report.tail = record;
5317 }
5318 else if (record->detail.kind != AARCH64_OPDE_NIL
5319 && record->detail.index <= new_record->detail.index
5320 && operand_error_higher_severity_p (record->detail.kind,
5321 new_record->detail.kind))
5322 {
5323 /* In the case of multiple errors found on operands related with a
5324 single opcode, only record the error of the leftmost operand and
5325 only if the error is of higher severity. */
5326 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5327 " the existing error %s on operand %d",
5328 operand_mismatch_kind_names[new_record->detail.kind],
5329 new_record->detail.index,
5330 operand_mismatch_kind_names[record->detail.kind],
5331 record->detail.index);
5332 return;
5333 }
5334
5335 record->detail = new_record->detail;
5336 }
5337
5338 static inline void
5339 record_operand_error_info (const aarch64_opcode *opcode,
5340 aarch64_operand_error *error_info)
5341 {
5342 operand_error_record record;
5343 record.opcode = opcode;
5344 record.detail = *error_info;
5345 add_operand_error_record (&record);
5346 }
5347
5348 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5349 error message *ERROR, for operand IDX (count from 0). */
5350
5351 static void
5352 record_operand_error (const aarch64_opcode *opcode, int idx,
5353 enum aarch64_operand_error_kind kind,
5354 const char* error)
5355 {
5356 aarch64_operand_error info;
5357 memset(&info, 0, sizeof (info));
5358 info.index = idx;
5359 info.kind = kind;
5360 info.error = error;
5361 info.non_fatal = false;
5362 record_operand_error_info (opcode, &info);
5363 }
5364
5365 static void
5366 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5367 enum aarch64_operand_error_kind kind,
5368 const char* error, const int *extra_data)
5369 {
5370 aarch64_operand_error info;
5371 info.index = idx;
5372 info.kind = kind;
5373 info.error = error;
5374 info.data[0].i = extra_data[0];
5375 info.data[1].i = extra_data[1];
5376 info.data[2].i = extra_data[2];
5377 info.non_fatal = false;
5378 record_operand_error_info (opcode, &info);
5379 }
5380
5381 static void
5382 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5383 const char* error, int lower_bound,
5384 int upper_bound)
5385 {
5386 int data[3] = {lower_bound, upper_bound, 0};
5387 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5388 error, data);
5389 }
5390
5391 /* Remove the operand error record for *OPCODE. */
5392 static void ATTRIBUTE_UNUSED
5393 remove_operand_error_record (const aarch64_opcode *opcode)
5394 {
5395 if (opcode_has_operand_error_p (opcode))
5396 {
5397 operand_error_record* record = operand_error_report.head;
5398 gas_assert (record != NULL && operand_error_report.tail != NULL);
5399 operand_error_report.head = record->next;
5400 record->next = free_opnd_error_record_nodes;
5401 free_opnd_error_record_nodes = record;
5402 if (operand_error_report.head == NULL)
5403 {
5404 gas_assert (operand_error_report.tail == record);
5405 operand_error_report.tail = NULL;
5406 }
5407 }
5408 }
5409
5410 /* Given the instruction in *INSTR, return the index of the best matched
5411 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5412
5413 Return -1 if there is no qualifier sequence; return the first match
5414 if there is multiple matches found. */
5415
5416 static int
5417 find_best_match (const aarch64_inst *instr,
5418 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5419 {
5420 int i, num_opnds, max_num_matched, idx;
5421
5422 num_opnds = aarch64_num_of_operands (instr->opcode);
5423 if (num_opnds == 0)
5424 {
5425 DEBUG_TRACE ("no operand");
5426 return -1;
5427 }
5428
5429 max_num_matched = 0;
5430 idx = 0;
5431
5432 /* For each pattern. */
5433 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5434 {
5435 int j, num_matched;
5436 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5437
5438 /* Most opcodes has much fewer patterns in the list. */
5439 if (empty_qualifier_sequence_p (qualifiers))
5440 {
5441 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5442 break;
5443 }
5444
5445 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5446 if (*qualifiers == instr->operands[j].qualifier)
5447 ++num_matched;
5448
5449 if (num_matched > max_num_matched)
5450 {
5451 max_num_matched = num_matched;
5452 idx = i;
5453 }
5454 }
5455
5456 DEBUG_TRACE ("return with %d", idx);
5457 return idx;
5458 }
5459
5460 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5461 corresponding operands in *INSTR. */
5462
5463 static inline void
5464 assign_qualifier_sequence (aarch64_inst *instr,
5465 const aarch64_opnd_qualifier_t *qualifiers)
5466 {
5467 int i = 0;
5468 int num_opnds = aarch64_num_of_operands (instr->opcode);
5469 gas_assert (num_opnds);
5470 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5471 instr->operands[i].qualifier = *qualifiers;
5472 }
5473
5474 /* Callback used by aarch64_print_operand to apply STYLE to the
5475 disassembler output created from FMT and ARGS. The STYLER object holds
5476 any required state. Must return a pointer to a string (created from FMT
5477 and ARGS) that will continue to be valid until the complete disassembled
5478 instruction has been printed.
5479
5480 We don't currently add any styling to the output of the disassembler as
5481 used within assembler error messages, and so STYLE is ignored here. A
5482 new string is allocated on the obstack help within STYLER and returned
5483 to the caller. */
5484
5485 static const char *aarch64_apply_style
5486 (struct aarch64_styler *styler,
5487 enum disassembler_style style ATTRIBUTE_UNUSED,
5488 const char *fmt, va_list args)
5489 {
5490 int res;
5491 char *ptr;
5492 struct obstack *stack = (struct obstack *) styler->state;
5493 va_list ap;
5494
5495 /* Calculate the required space. */
5496 va_copy (ap, args);
5497 res = vsnprintf (NULL, 0, fmt, ap);
5498 va_end (ap);
5499 gas_assert (res >= 0);
5500
5501 /* Allocate space on the obstack and format the result. */
5502 ptr = (char *) obstack_alloc (stack, res + 1);
5503 res = vsnprintf (ptr, (res + 1), fmt, args);
5504 gas_assert (res >= 0);
5505
5506 return ptr;
5507 }
5508
5509 /* Print operands for the diagnosis purpose. */
5510
5511 static void
5512 print_operands (char *buf, const aarch64_opcode *opcode,
5513 const aarch64_opnd_info *opnds)
5514 {
5515 int i;
5516 struct aarch64_styler styler;
5517 struct obstack content;
5518 obstack_init (&content);
5519
5520 styler.apply_style = aarch64_apply_style;
5521 styler.state = (void *) &content;
5522
5523 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5524 {
5525 char str[128];
5526 char cmt[128];
5527
5528 /* We regard the opcode operand info more, however we also look into
5529 the inst->operands to support the disassembling of the optional
5530 operand.
5531 The two operand code should be the same in all cases, apart from
5532 when the operand can be optional. */
5533 if (opcode->operands[i] == AARCH64_OPND_NIL
5534 || opnds[i].type == AARCH64_OPND_NIL)
5535 break;
5536
5537 /* Generate the operand string in STR. */
5538 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5539 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5540
5541 /* Delimiter. */
5542 if (str[0] != '\0')
5543 strcat (buf, i == 0 ? " " : ", ");
5544
5545 /* Append the operand string. */
5546 strcat (buf, str);
5547
5548 /* Append a comment. This works because only the last operand ever
5549 adds a comment. If that ever changes then we'll need to be
5550 smarter here. */
5551 if (cmt[0] != '\0')
5552 {
5553 strcat (buf, "\t// ");
5554 strcat (buf, cmt);
5555 }
5556 }
5557
5558 obstack_free (&content, NULL);
5559 }
5560
5561 /* Send to stderr a string as information. */
5562
5563 static void
5564 output_info (const char *format, ...)
5565 {
5566 const char *file;
5567 unsigned int line;
5568 va_list args;
5569
5570 file = as_where (&line);
5571 if (file)
5572 {
5573 if (line != 0)
5574 fprintf (stderr, "%s:%u: ", file, line);
5575 else
5576 fprintf (stderr, "%s: ", file);
5577 }
5578 fprintf (stderr, _("Info: "));
5579 va_start (args, format);
5580 vfprintf (stderr, format, args);
5581 va_end (args);
5582 (void) putc ('\n', stderr);
5583 }
5584
5585 /* See if the AARCH64_OPDE_SYNTAX_ERROR error described by DETAIL
5586 relates to registers or register lists. If so, return a string that
5587 reports the error against "operand %d", otherwise return null. */
5588
5589 static const char *
5590 get_reg_error_message (const aarch64_operand_error *detail)
5591 {
5592 /* Handle the case where we found a register that was expected
5593 to be in a register list outside of a register list. */
5594 if ((detail->data[1].i & detail->data[2].i) != 0
5595 && (detail->data[1].i & SEF_IN_REGLIST) == 0)
5596 return _("missing braces at operand %d");
5597
5598 /* If some opcodes expected a register, and we found a register,
5599 complain about the difference. */
5600 if (detail->data[2].i)
5601 {
5602 unsigned int expected = (detail->data[1].i & SEF_IN_REGLIST
5603 ? detail->data[1].i & ~SEF_IN_REGLIST
5604 : detail->data[0].i & ~SEF_DEFAULT_ERROR);
5605 const char *msg = get_reg_expected_msg (expected, detail->data[2].i);
5606 if (!msg)
5607 msg = N_("unexpected register type at operand %d");
5608 return msg;
5609 }
5610
5611 /* Handle the case where we got to the point of trying to parse a
5612 register within a register list, but didn't find a known register. */
5613 if (detail->data[1].i & SEF_IN_REGLIST)
5614 {
5615 unsigned int expected = detail->data[1].i & ~SEF_IN_REGLIST;
5616 const char *msg = get_reg_expected_msg (expected, 0);
5617 if (!msg)
5618 msg = _("invalid register list at operand %d");
5619 return msg;
5620 }
5621
5622 /* Punt if register-related problems weren't the only errors. */
5623 if (detail->data[0].i & SEF_DEFAULT_ERROR)
5624 return NULL;
5625
5626 /* Handle the case where the only acceptable things are registers. */
5627 if (detail->data[1].i == 0)
5628 {
5629 const char *msg = get_reg_expected_msg (detail->data[0].i, 0);
5630 if (!msg)
5631 msg = _("expected a register at operand %d");
5632 return msg;
5633 }
5634
5635 /* Handle the case where the only acceptable things are register lists,
5636 and there was no opening '{'. */
5637 if (detail->data[0].i == 0)
5638 return _("expected '{' at operand %d");
5639
5640 return _("expected a register or register list at operand %d");
5641 }
5642
5643 /* Output one operand error record. */
5644
5645 static void
5646 output_operand_error_record (const operand_error_record *record, char *str)
5647 {
5648 const aarch64_operand_error *detail = &record->detail;
5649 int idx = detail->index;
5650 const aarch64_opcode *opcode = record->opcode;
5651 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5652 : AARCH64_OPND_NIL);
5653
5654 typedef void (*handler_t)(const char *format, ...);
5655 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5656 const char *msg = detail->error;
5657
5658 switch (detail->kind)
5659 {
5660 case AARCH64_OPDE_NIL:
5661 gas_assert (0);
5662 break;
5663
5664 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5665 handler (_("this `%s' should have an immediately preceding `%s'"
5666 " -- `%s'"),
5667 detail->data[0].s, detail->data[1].s, str);
5668 break;
5669
5670 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5671 handler (_("the preceding `%s' should be followed by `%s` rather"
5672 " than `%s` -- `%s'"),
5673 detail->data[1].s, detail->data[0].s, opcode->name, str);
5674 break;
5675
5676 case AARCH64_OPDE_SYNTAX_ERROR:
5677 if (!msg && idx >= 0)
5678 {
5679 msg = get_reg_error_message (detail);
5680 if (msg)
5681 {
5682 char *full_msg = xasprintf (msg, idx + 1);
5683 handler (_("%s -- `%s'"), full_msg, str);
5684 free (full_msg);
5685 break;
5686 }
5687 }
5688 /* Fall through. */
5689
5690 case AARCH64_OPDE_RECOVERABLE:
5691 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5692 case AARCH64_OPDE_OTHER_ERROR:
5693 /* Use the prepared error message if there is, otherwise use the
5694 operand description string to describe the error. */
5695 if (msg != NULL)
5696 {
5697 if (idx < 0)
5698 handler (_("%s -- `%s'"), msg, str);
5699 else
5700 handler (_("%s at operand %d -- `%s'"),
5701 msg, idx + 1, str);
5702 }
5703 else
5704 {
5705 gas_assert (idx >= 0);
5706 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5707 aarch64_get_operand_desc (opd_code), str);
5708 }
5709 break;
5710
5711 case AARCH64_OPDE_INVALID_VARIANT:
5712 handler (_("operand mismatch -- `%s'"), str);
5713 if (verbose_error_p)
5714 {
5715 /* We will try to correct the erroneous instruction and also provide
5716 more information e.g. all other valid variants.
5717
5718 The string representation of the corrected instruction and other
5719 valid variants are generated by
5720
5721 1) obtaining the intermediate representation of the erroneous
5722 instruction;
5723 2) manipulating the IR, e.g. replacing the operand qualifier;
5724 3) printing out the instruction by calling the printer functions
5725 shared with the disassembler.
5726
5727 The limitation of this method is that the exact input assembly
5728 line cannot be accurately reproduced in some cases, for example an
5729 optional operand present in the actual assembly line will be
5730 omitted in the output; likewise for the optional syntax rules,
5731 e.g. the # before the immediate. Another limitation is that the
5732 assembly symbols and relocation operations in the assembly line
5733 currently cannot be printed out in the error report. Last but not
5734 least, when there is other error(s) co-exist with this error, the
5735 'corrected' instruction may be still incorrect, e.g. given
5736 'ldnp h0,h1,[x0,#6]!'
5737 this diagnosis will provide the version:
5738 'ldnp s0,s1,[x0,#6]!'
5739 which is still not right. */
5740 size_t len = strlen (get_mnemonic_name (str));
5741 int i, qlf_idx;
5742 bool result;
5743 char buf[2048];
5744 aarch64_inst *inst_base = &inst.base;
5745 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5746
5747 /* Init inst. */
5748 reset_aarch64_instruction (&inst);
5749 inst_base->opcode = opcode;
5750
5751 /* Reset the error report so that there is no side effect on the
5752 following operand parsing. */
5753 init_operand_error_report ();
5754
5755 /* Fill inst. */
5756 result = parse_operands (str + len, opcode)
5757 && programmer_friendly_fixup (&inst);
5758 gas_assert (result);
5759 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5760 NULL, NULL, insn_sequence);
5761 gas_assert (!result);
5762
5763 /* Find the most matched qualifier sequence. */
5764 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5765 gas_assert (qlf_idx > -1);
5766
5767 /* Assign the qualifiers. */
5768 assign_qualifier_sequence (inst_base,
5769 opcode->qualifiers_list[qlf_idx]);
5770
5771 /* Print the hint. */
5772 output_info (_(" did you mean this?"));
5773 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5774 print_operands (buf, opcode, inst_base->operands);
5775 output_info (_(" %s"), buf);
5776
5777 /* Print out other variant(s) if there is any. */
5778 if (qlf_idx != 0 ||
5779 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5780 output_info (_(" other valid variant(s):"));
5781
5782 /* For each pattern. */
5783 qualifiers_list = opcode->qualifiers_list;
5784 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5785 {
5786 /* Most opcodes has much fewer patterns in the list.
5787 First NIL qualifier indicates the end in the list. */
5788 if (empty_qualifier_sequence_p (*qualifiers_list))
5789 break;
5790
5791 if (i != qlf_idx)
5792 {
5793 /* Mnemonics name. */
5794 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5795
5796 /* Assign the qualifiers. */
5797 assign_qualifier_sequence (inst_base, *qualifiers_list);
5798
5799 /* Print instruction. */
5800 print_operands (buf, opcode, inst_base->operands);
5801
5802 output_info (_(" %s"), buf);
5803 }
5804 }
5805 }
5806 break;
5807
5808 case AARCH64_OPDE_UNTIED_IMMS:
5809 handler (_("operand %d must have the same immediate value "
5810 "as operand 1 -- `%s'"),
5811 detail->index + 1, str);
5812 break;
5813
5814 case AARCH64_OPDE_UNTIED_OPERAND:
5815 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5816 detail->index + 1, str);
5817 break;
5818
5819 case AARCH64_OPDE_INVALID_REGNO:
5820 handler (_("%s%d-%s%d expected at operand %d -- `%s'"),
5821 detail->data[0].s, detail->data[1].i,
5822 detail->data[0].s, detail->data[2].i, idx + 1, str);
5823 break;
5824
5825 case AARCH64_OPDE_OUT_OF_RANGE:
5826 if (detail->data[0].i != detail->data[1].i)
5827 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5828 msg ? msg : _("immediate value"),
5829 detail->data[0].i, detail->data[1].i, idx + 1, str);
5830 else
5831 handler (_("%s must be %d at operand %d -- `%s'"),
5832 msg ? msg : _("immediate value"),
5833 detail->data[0].i, idx + 1, str);
5834 break;
5835
5836 case AARCH64_OPDE_INVALID_VG_SIZE:
5837 if (detail->data[0].i == 0)
5838 handler (_("unexpected vector group size at operand %d -- `%s'"),
5839 idx + 1, str);
5840 else
5841 handler (_("operand %d must have a vector group size of %d -- `%s'"),
5842 idx + 1, detail->data[0].i, str);
5843 break;
5844
5845 case AARCH64_OPDE_REG_LIST_LENGTH:
5846 if (detail->data[0].i == (1 << 1))
5847 handler (_("expected a single-register list at operand %d -- `%s'"),
5848 idx + 1, str);
5849 else if ((detail->data[0].i & -detail->data[0].i) == detail->data[0].i)
5850 handler (_("expected a list of %d registers at operand %d -- `%s'"),
5851 get_log2 (detail->data[0].i), idx + 1, str);
5852 else if (detail->data[0].i == 0x14)
5853 handler (_("expected a list of %d or %d registers at"
5854 " operand %d -- `%s'"),
5855 2, 4, idx + 1, str);
5856 else
5857 handler (_("invalid number of registers in the list"
5858 " at operand %d -- `%s'"), idx + 1, str);
5859 break;
5860
5861 case AARCH64_OPDE_REG_LIST_STRIDE:
5862 if (detail->data[0].i == (1 << 1))
5863 handler (_("the register list must have a stride of %d"
5864 " at operand %d -- `%s'"), 1, idx + 1, str);
5865 else if (detail->data[0].i == 0x12 || detail->data[0].i == 0x102)
5866 handler (_("the register list must have a stride of %d or %d"
5867 " at operand %d -- `%s`"), 1,
5868 detail->data[0].i == 0x12 ? 4 : 8, idx + 1, str);
5869 else
5870 handler (_("invalid register stride at operand %d -- `%s'"),
5871 idx + 1, str);
5872 break;
5873
5874 case AARCH64_OPDE_UNALIGNED:
5875 handler (_("immediate value must be a multiple of "
5876 "%d at operand %d -- `%s'"),
5877 detail->data[0].i, idx + 1, str);
5878 break;
5879
5880 default:
5881 gas_assert (0);
5882 break;
5883 }
5884 }
5885
5886 /* Return true if the presence of error A against an instruction means
5887 that error B should not be reported. This is only used as a first pass,
5888 to pick the kind of error that we should report. */
5889
5890 static bool
5891 better_error_p (operand_error_record *a, operand_error_record *b)
5892 {
5893 /* For errors reported during parsing, prefer errors that relate to
5894 later operands, since that implies that the earlier operands were
5895 syntactically valid.
5896
5897 For example, if we see a register R instead of an immediate in
5898 operand N, we'll report that as a recoverable "immediate operand
5899 required" error. This is because there is often another opcode
5900 entry that accepts a register operand N, and any errors about R
5901 should be reported against the register forms of the instruction.
5902 But if no such register form exists, the recoverable error should
5903 still win over a syntax error against operand N-1.
5904
5905 For these purposes, count an error reported at the end of the
5906 assembly string as equivalent to an error reported against the
5907 final operand. This means that opcode entries that expect more
5908 operands win over "unexpected characters following instruction". */
5909 if (a->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR
5910 && b->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR)
5911 {
5912 int a_index = (a->detail.index < 0
5913 ? aarch64_num_of_operands (a->opcode) - 1
5914 : a->detail.index);
5915 int b_index = (b->detail.index < 0
5916 ? aarch64_num_of_operands (b->opcode) - 1
5917 : b->detail.index);
5918 if (a_index != b_index)
5919 return a_index > b_index;
5920 }
5921 return operand_error_higher_severity_p (a->detail.kind, b->detail.kind);
5922 }
5923
5924 /* Process and output the error message about the operand mismatching.
5925
5926 When this function is called, the operand error information had
5927 been collected for an assembly line and there will be multiple
5928 errors in the case of multiple instruction templates; output the
5929 error message that most closely describes the problem.
5930
5931 The errors to be printed can be filtered on printing all errors
5932 or only non-fatal errors. This distinction has to be made because
5933 the error buffer may already be filled with fatal errors we don't want to
5934 print due to the different instruction templates. */
5935
5936 static void
5937 output_operand_error_report (char *str, bool non_fatal_only)
5938 {
5939 enum aarch64_operand_error_kind kind;
5940 operand_error_record *curr;
5941 operand_error_record *head = operand_error_report.head;
5942 operand_error_record *record;
5943
5944 /* No error to report. */
5945 if (head == NULL)
5946 return;
5947
5948 gas_assert (head != NULL && operand_error_report.tail != NULL);
5949
5950 /* Only one error. */
5951 if (head == operand_error_report.tail)
5952 {
5953 /* If the only error is a non-fatal one and we don't want to print it,
5954 just exit. */
5955 if (!non_fatal_only || head->detail.non_fatal)
5956 {
5957 DEBUG_TRACE ("single opcode entry with error kind: %s",
5958 operand_mismatch_kind_names[head->detail.kind]);
5959 output_operand_error_record (head, str);
5960 }
5961 return;
5962 }
5963
5964 /* Find the error kind of the highest severity. */
5965 DEBUG_TRACE ("multiple opcode entries with error kind");
5966 record = NULL;
5967 for (curr = head; curr != NULL; curr = curr->next)
5968 {
5969 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5970 if (curr->detail.kind == AARCH64_OPDE_SYNTAX_ERROR)
5971 {
5972 DEBUG_TRACE ("\t%s [%x, %x, %x]",
5973 operand_mismatch_kind_names[curr->detail.kind],
5974 curr->detail.data[0].i, curr->detail.data[1].i,
5975 curr->detail.data[2].i);
5976 }
5977 else if (curr->detail.kind == AARCH64_OPDE_REG_LIST_LENGTH
5978 || curr->detail.kind == AARCH64_OPDE_REG_LIST_STRIDE)
5979 {
5980 DEBUG_TRACE ("\t%s [%x]",
5981 operand_mismatch_kind_names[curr->detail.kind],
5982 curr->detail.data[0].i);
5983 }
5984 else
5985 {
5986 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5987 }
5988 if ((!non_fatal_only || curr->detail.non_fatal)
5989 && (!record || better_error_p (curr, record)))
5990 record = curr;
5991 }
5992
5993 kind = (record ? record->detail.kind : AARCH64_OPDE_NIL);
5994 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5995
5996 /* Pick up one of errors of KIND to report. */
5997 record = NULL;
5998 for (curr = head; curr != NULL; curr = curr->next)
5999 {
6000 /* If we don't want to print non-fatal errors then don't consider them
6001 at all. */
6002 if (curr->detail.kind != kind
6003 || (non_fatal_only && !curr->detail.non_fatal))
6004 continue;
6005 /* If there are multiple errors, pick up the one with the highest
6006 mismatching operand index. In the case of multiple errors with
6007 the equally highest operand index, pick up the first one or the
6008 first one with non-NULL error message. */
6009 if (!record || curr->detail.index > record->detail.index)
6010 record = curr;
6011 else if (curr->detail.index == record->detail.index
6012 && !record->detail.error)
6013 {
6014 if (curr->detail.error)
6015 record = curr;
6016 else if (kind == AARCH64_OPDE_SYNTAX_ERROR)
6017 {
6018 record->detail.data[0].i |= curr->detail.data[0].i;
6019 record->detail.data[1].i |= curr->detail.data[1].i;
6020 record->detail.data[2].i |= curr->detail.data[2].i;
6021 DEBUG_TRACE ("\t--> %s [%x, %x, %x]",
6022 operand_mismatch_kind_names[kind],
6023 curr->detail.data[0].i, curr->detail.data[1].i,
6024 curr->detail.data[2].i);
6025 }
6026 else if (kind == AARCH64_OPDE_REG_LIST_LENGTH
6027 || kind == AARCH64_OPDE_REG_LIST_STRIDE)
6028 {
6029 record->detail.data[0].i |= curr->detail.data[0].i;
6030 DEBUG_TRACE ("\t--> %s [%x]",
6031 operand_mismatch_kind_names[kind],
6032 curr->detail.data[0].i);
6033 }
6034 /* Pick the variant with the cloest match. */
6035 else if (kind == AARCH64_OPDE_INVALID_VARIANT
6036 && record->detail.data[0].i > curr->detail.data[0].i)
6037 record = curr;
6038 }
6039 }
6040
6041 /* The way errors are collected in the back-end is a bit non-intuitive. But
6042 essentially, because each operand template is tried recursively you may
6043 always have errors collected from the previous tried OPND. These are
6044 usually skipped if there is one successful match. However now with the
6045 non-fatal errors we have to ignore those previously collected hard errors
6046 when we're only interested in printing the non-fatal ones. This condition
6047 prevents us from printing errors that are not appropriate, since we did
6048 match a condition, but it also has warnings that it wants to print. */
6049 if (non_fatal_only && !record)
6050 return;
6051
6052 gas_assert (record);
6053 DEBUG_TRACE ("Pick up error kind %s to report",
6054 operand_mismatch_kind_names[kind]);
6055
6056 /* Output. */
6057 output_operand_error_record (record, str);
6058 }
6059 \f
6060 /* Write an AARCH64 instruction to buf - always little-endian. */
6061 static void
6062 put_aarch64_insn (char *buf, uint32_t insn)
6063 {
6064 unsigned char *where = (unsigned char *) buf;
6065 where[0] = insn;
6066 where[1] = insn >> 8;
6067 where[2] = insn >> 16;
6068 where[3] = insn >> 24;
6069 }
6070
6071 static uint32_t
6072 get_aarch64_insn (char *buf)
6073 {
6074 unsigned char *where = (unsigned char *) buf;
6075 uint32_t result;
6076 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
6077 | ((uint32_t) where[3] << 24)));
6078 return result;
6079 }
6080
6081 static void
6082 output_inst (struct aarch64_inst *new_inst)
6083 {
6084 char *to = NULL;
6085
6086 to = frag_more (INSN_SIZE);
6087
6088 frag_now->tc_frag_data.recorded = 1;
6089
6090 put_aarch64_insn (to, inst.base.value);
6091
6092 if (inst.reloc.type != BFD_RELOC_UNUSED)
6093 {
6094 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
6095 INSN_SIZE, &inst.reloc.exp,
6096 inst.reloc.pc_rel,
6097 inst.reloc.type);
6098 DEBUG_TRACE ("Prepared relocation fix up");
6099 /* Don't check the addend value against the instruction size,
6100 that's the job of our code in md_apply_fix(). */
6101 fixp->fx_no_overflow = 1;
6102 if (new_inst != NULL)
6103 fixp->tc_fix_data.inst = new_inst;
6104 if (aarch64_gas_internal_fixup_p ())
6105 {
6106 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
6107 fixp->tc_fix_data.opnd = inst.reloc.opnd;
6108 fixp->fx_addnumber = inst.reloc.flags;
6109 }
6110 }
6111
6112 dwarf2_emit_insn (INSN_SIZE);
6113 }
6114
6115 /* Link together opcodes of the same name. */
6116
6117 struct templates
6118 {
6119 const aarch64_opcode *opcode;
6120 struct templates *next;
6121 };
6122
6123 typedef struct templates templates;
6124
6125 static templates *
6126 lookup_mnemonic (const char *start, int len)
6127 {
6128 templates *templ = NULL;
6129
6130 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
6131 return templ;
6132 }
6133
6134 /* Subroutine of md_assemble, responsible for looking up the primary
6135 opcode from the mnemonic the user wrote. BASE points to the beginning
6136 of the mnemonic, DOT points to the first '.' within the mnemonic
6137 (if any) and END points to the end of the mnemonic. */
6138
6139 static templates *
6140 opcode_lookup (char *base, char *dot, char *end)
6141 {
6142 const aarch64_cond *cond;
6143 char condname[16];
6144 int len;
6145
6146 if (dot == end)
6147 return 0;
6148
6149 inst.cond = COND_ALWAYS;
6150
6151 /* Handle a possible condition. */
6152 if (dot)
6153 {
6154 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
6155 if (!cond)
6156 return 0;
6157 inst.cond = cond->value;
6158 len = dot - base;
6159 }
6160 else
6161 len = end - base;
6162
6163 if (inst.cond == COND_ALWAYS)
6164 {
6165 /* Look for unaffixed mnemonic. */
6166 return lookup_mnemonic (base, len);
6167 }
6168 else if (len <= 13)
6169 {
6170 /* append ".c" to mnemonic if conditional */
6171 memcpy (condname, base, len);
6172 memcpy (condname + len, ".c", 2);
6173 base = condname;
6174 len += 2;
6175 return lookup_mnemonic (base, len);
6176 }
6177
6178 return NULL;
6179 }
6180
6181 /* Process an optional operand that is found omitted from the assembly line.
6182 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
6183 instruction's opcode entry while IDX is the index of this omitted operand.
6184 */
6185
6186 static void
6187 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
6188 int idx, aarch64_opnd_info *operand)
6189 {
6190 aarch64_insn default_value = get_optional_operand_default_value (opcode);
6191 gas_assert (optional_operand_p (opcode, idx));
6192 gas_assert (!operand->present);
6193
6194 switch (type)
6195 {
6196 case AARCH64_OPND_Rd:
6197 case AARCH64_OPND_Rn:
6198 case AARCH64_OPND_Rm:
6199 case AARCH64_OPND_Rt:
6200 case AARCH64_OPND_Rt2:
6201 case AARCH64_OPND_Rt_LS64:
6202 case AARCH64_OPND_Rt_SP:
6203 case AARCH64_OPND_Rs:
6204 case AARCH64_OPND_Ra:
6205 case AARCH64_OPND_Rt_SYS:
6206 case AARCH64_OPND_Rd_SP:
6207 case AARCH64_OPND_Rn_SP:
6208 case AARCH64_OPND_Rm_SP:
6209 case AARCH64_OPND_Fd:
6210 case AARCH64_OPND_Fn:
6211 case AARCH64_OPND_Fm:
6212 case AARCH64_OPND_Fa:
6213 case AARCH64_OPND_Ft:
6214 case AARCH64_OPND_Ft2:
6215 case AARCH64_OPND_Sd:
6216 case AARCH64_OPND_Sn:
6217 case AARCH64_OPND_Sm:
6218 case AARCH64_OPND_Va:
6219 case AARCH64_OPND_Vd:
6220 case AARCH64_OPND_Vn:
6221 case AARCH64_OPND_Vm:
6222 case AARCH64_OPND_VdD1:
6223 case AARCH64_OPND_VnD1:
6224 operand->reg.regno = default_value;
6225 break;
6226
6227 case AARCH64_OPND_Ed:
6228 case AARCH64_OPND_En:
6229 case AARCH64_OPND_Em:
6230 case AARCH64_OPND_Em16:
6231 case AARCH64_OPND_SM3_IMM2:
6232 operand->reglane.regno = default_value;
6233 break;
6234
6235 case AARCH64_OPND_IDX:
6236 case AARCH64_OPND_BIT_NUM:
6237 case AARCH64_OPND_IMMR:
6238 case AARCH64_OPND_IMMS:
6239 case AARCH64_OPND_SHLL_IMM:
6240 case AARCH64_OPND_IMM_VLSL:
6241 case AARCH64_OPND_IMM_VLSR:
6242 case AARCH64_OPND_CCMP_IMM:
6243 case AARCH64_OPND_FBITS:
6244 case AARCH64_OPND_UIMM4:
6245 case AARCH64_OPND_UIMM3_OP1:
6246 case AARCH64_OPND_UIMM3_OP2:
6247 case AARCH64_OPND_IMM:
6248 case AARCH64_OPND_IMM_2:
6249 case AARCH64_OPND_WIDTH:
6250 case AARCH64_OPND_UIMM7:
6251 case AARCH64_OPND_NZCV:
6252 case AARCH64_OPND_SVE_PATTERN:
6253 case AARCH64_OPND_SVE_PRFOP:
6254 operand->imm.value = default_value;
6255 break;
6256
6257 case AARCH64_OPND_SVE_PATTERN_SCALED:
6258 operand->imm.value = default_value;
6259 operand->shifter.kind = AARCH64_MOD_MUL;
6260 operand->shifter.amount = 1;
6261 break;
6262
6263 case AARCH64_OPND_EXCEPTION:
6264 inst.reloc.type = BFD_RELOC_UNUSED;
6265 break;
6266
6267 case AARCH64_OPND_BARRIER_ISB:
6268 operand->barrier = aarch64_barrier_options + default_value;
6269 break;
6270
6271 case AARCH64_OPND_BTI_TARGET:
6272 operand->hint_option = aarch64_hint_options + default_value;
6273 break;
6274
6275 default:
6276 break;
6277 }
6278 }
6279
6280 /* Process the relocation type for move wide instructions.
6281 Return TRUE on success; otherwise return FALSE. */
6282
6283 static bool
6284 process_movw_reloc_info (void)
6285 {
6286 int is32;
6287 unsigned shift;
6288
6289 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6290
6291 if (inst.base.opcode->op == OP_MOVK)
6292 switch (inst.reloc.type)
6293 {
6294 case BFD_RELOC_AARCH64_MOVW_G0_S:
6295 case BFD_RELOC_AARCH64_MOVW_G1_S:
6296 case BFD_RELOC_AARCH64_MOVW_G2_S:
6297 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6298 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6299 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6300 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6301 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6302 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6303 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6304 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6305 set_syntax_error
6306 (_("the specified relocation type is not allowed for MOVK"));
6307 return false;
6308 default:
6309 break;
6310 }
6311
6312 switch (inst.reloc.type)
6313 {
6314 case BFD_RELOC_AARCH64_MOVW_G0:
6315 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6316 case BFD_RELOC_AARCH64_MOVW_G0_S:
6317 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6318 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6319 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6320 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6321 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6322 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6323 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6324 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6325 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6326 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6327 shift = 0;
6328 break;
6329 case BFD_RELOC_AARCH64_MOVW_G1:
6330 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6331 case BFD_RELOC_AARCH64_MOVW_G1_S:
6332 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6333 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6334 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6335 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6336 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6337 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6338 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6339 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6340 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6341 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6342 shift = 16;
6343 break;
6344 case BFD_RELOC_AARCH64_MOVW_G2:
6345 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6346 case BFD_RELOC_AARCH64_MOVW_G2_S:
6347 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6348 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6349 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6350 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6351 if (is32)
6352 {
6353 set_fatal_syntax_error
6354 (_("the specified relocation type is not allowed for 32-bit "
6355 "register"));
6356 return false;
6357 }
6358 shift = 32;
6359 break;
6360 case BFD_RELOC_AARCH64_MOVW_G3:
6361 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6362 if (is32)
6363 {
6364 set_fatal_syntax_error
6365 (_("the specified relocation type is not allowed for 32-bit "
6366 "register"));
6367 return false;
6368 }
6369 shift = 48;
6370 break;
6371 default:
6372 /* More cases should be added when more MOVW-related relocation types
6373 are supported in GAS. */
6374 gas_assert (aarch64_gas_internal_fixup_p ());
6375 /* The shift amount should have already been set by the parser. */
6376 return true;
6377 }
6378 inst.base.operands[1].shifter.amount = shift;
6379 return true;
6380 }
6381
6382 /* Determine and return the real reloc type code for an instruction
6383 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6384
6385 static inline bfd_reloc_code_real_type
6386 ldst_lo12_determine_real_reloc_type (void)
6387 {
6388 unsigned logsz, max_logsz;
6389 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6390 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6391
6392 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6393 {
6394 BFD_RELOC_AARCH64_LDST8_LO12,
6395 BFD_RELOC_AARCH64_LDST16_LO12,
6396 BFD_RELOC_AARCH64_LDST32_LO12,
6397 BFD_RELOC_AARCH64_LDST64_LO12,
6398 BFD_RELOC_AARCH64_LDST128_LO12
6399 },
6400 {
6401 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6402 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6403 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6404 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6405 BFD_RELOC_AARCH64_NONE
6406 },
6407 {
6408 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6409 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6410 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6411 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6412 BFD_RELOC_AARCH64_NONE
6413 },
6414 {
6415 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6416 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6417 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6418 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6419 BFD_RELOC_AARCH64_NONE
6420 },
6421 {
6422 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6423 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6424 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6425 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6426 BFD_RELOC_AARCH64_NONE
6427 }
6428 };
6429
6430 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6431 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6432 || (inst.reloc.type
6433 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6434 || (inst.reloc.type
6435 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6436 || (inst.reloc.type
6437 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6438 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6439
6440 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6441 opd1_qlf =
6442 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6443 1, opd0_qlf, 0);
6444 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6445
6446 logsz = get_log2 (aarch64_get_qualifier_esize (opd1_qlf));
6447
6448 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6449 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6450 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6451 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6452 max_logsz = 3;
6453 else
6454 max_logsz = 4;
6455
6456 if (logsz > max_logsz)
6457 {
6458 /* SEE PR 27904 for an example of this. */
6459 set_fatal_syntax_error
6460 (_("relocation qualifier does not match instruction size"));
6461 return BFD_RELOC_AARCH64_NONE;
6462 }
6463
6464 /* In reloc.c, these pseudo relocation types should be defined in similar
6465 order as above reloc_ldst_lo12 array. Because the array index calculation
6466 below relies on this. */
6467 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6468 }
6469
6470 /* Check whether a register list REGINFO is valid. The registers have type
6471 REG_TYPE and must be numbered in increasing order (modulo the register
6472 bank size). They must have a consistent stride.
6473
6474 Return true if the list is valid, describing it in LIST if so. */
6475
6476 static bool
6477 reg_list_valid_p (uint32_t reginfo, struct aarch64_reglist *list,
6478 aarch64_reg_type reg_type)
6479 {
6480 uint32_t i, nb_regs, prev_regno, incr, mask;
6481 mask = reg_type_mask (reg_type);
6482
6483 nb_regs = 1 + (reginfo & 0x3);
6484 reginfo >>= 2;
6485 prev_regno = reginfo & 0x1f;
6486 incr = 1;
6487
6488 list->first_regno = prev_regno;
6489 list->num_regs = nb_regs;
6490
6491 for (i = 1; i < nb_regs; ++i)
6492 {
6493 uint32_t curr_regno, curr_incr;
6494 reginfo >>= 5;
6495 curr_regno = reginfo & 0x1f;
6496 curr_incr = (curr_regno - prev_regno) & mask;
6497 if (curr_incr == 0)
6498 return false;
6499 else if (i == 1)
6500 incr = curr_incr;
6501 else if (curr_incr != incr)
6502 return false;
6503 prev_regno = curr_regno;
6504 }
6505
6506 list->stride = incr;
6507 return true;
6508 }
6509
6510 /* Generic instruction operand parser. This does no encoding and no
6511 semantic validation; it merely squirrels values away in the inst
6512 structure. Returns TRUE or FALSE depending on whether the
6513 specified grammar matched. */
6514
6515 static bool
6516 parse_operands (char *str, const aarch64_opcode *opcode)
6517 {
6518 int i;
6519 char *backtrack_pos = 0;
6520 const enum aarch64_opnd *operands = opcode->operands;
6521 aarch64_reg_type imm_reg_type;
6522
6523 clear_error ();
6524 skip_whitespace (str);
6525
6526 if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SME2))
6527 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP_PN;
6528 else if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE)
6529 || AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2))
6530 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP;
6531 else
6532 imm_reg_type = REG_TYPE_R_ZR_BHSDQ_V;
6533
6534 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6535 {
6536 int64_t val;
6537 const reg_entry *reg;
6538 int comma_skipped_p = 0;
6539 struct vector_type_el vectype;
6540 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6541 aarch64_opnd_info *info = &inst.base.operands[i];
6542 aarch64_reg_type reg_type;
6543
6544 DEBUG_TRACE ("parse operand %d", i);
6545
6546 /* Assign the operand code. */
6547 info->type = operands[i];
6548
6549 if (optional_operand_p (opcode, i))
6550 {
6551 /* Remember where we are in case we need to backtrack. */
6552 gas_assert (!backtrack_pos);
6553 backtrack_pos = str;
6554 }
6555
6556 /* Expect comma between operands; the backtrack mechanism will take
6557 care of cases of omitted optional operand. */
6558 if (i > 0 && ! skip_past_char (&str, ','))
6559 {
6560 set_syntax_error (_("comma expected between operands"));
6561 goto failure;
6562 }
6563 else
6564 comma_skipped_p = 1;
6565
6566 switch (operands[i])
6567 {
6568 case AARCH64_OPND_Rd:
6569 case AARCH64_OPND_Rn:
6570 case AARCH64_OPND_Rm:
6571 case AARCH64_OPND_Rt:
6572 case AARCH64_OPND_Rt2:
6573 case AARCH64_OPND_Rs:
6574 case AARCH64_OPND_Ra:
6575 case AARCH64_OPND_Rt_LS64:
6576 case AARCH64_OPND_Rt_SYS:
6577 case AARCH64_OPND_PAIRREG:
6578 case AARCH64_OPND_SVE_Rm:
6579 po_int_fp_reg_or_fail (REG_TYPE_R_ZR);
6580
6581 /* In LS64 load/store instructions Rt register number must be even
6582 and <=22. */
6583 if (operands[i] == AARCH64_OPND_Rt_LS64)
6584 {
6585 /* We've already checked if this is valid register.
6586 This will check if register number (Rt) is not undefined for LS64
6587 instructions:
6588 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6589 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6590 {
6591 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6592 goto failure;
6593 }
6594 }
6595 break;
6596
6597 case AARCH64_OPND_Rd_SP:
6598 case AARCH64_OPND_Rn_SP:
6599 case AARCH64_OPND_Rt_SP:
6600 case AARCH64_OPND_SVE_Rn_SP:
6601 case AARCH64_OPND_Rm_SP:
6602 po_int_fp_reg_or_fail (REG_TYPE_R_SP);
6603 break;
6604
6605 case AARCH64_OPND_Rm_EXT:
6606 case AARCH64_OPND_Rm_SFT:
6607 po_misc_or_fail (parse_shifter_operand
6608 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6609 ? SHIFTED_ARITH_IMM
6610 : SHIFTED_LOGIC_IMM)));
6611 if (!info->shifter.operator_present)
6612 {
6613 /* Default to LSL if not present. Libopcodes prefers shifter
6614 kind to be explicit. */
6615 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6616 info->shifter.kind = AARCH64_MOD_LSL;
6617 /* For Rm_EXT, libopcodes will carry out further check on whether
6618 or not stack pointer is used in the instruction (Recall that
6619 "the extend operator is not optional unless at least one of
6620 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6621 }
6622 break;
6623
6624 case AARCH64_OPND_Fd:
6625 case AARCH64_OPND_Fn:
6626 case AARCH64_OPND_Fm:
6627 case AARCH64_OPND_Fa:
6628 case AARCH64_OPND_Ft:
6629 case AARCH64_OPND_Ft2:
6630 case AARCH64_OPND_Sd:
6631 case AARCH64_OPND_Sn:
6632 case AARCH64_OPND_Sm:
6633 case AARCH64_OPND_SVE_VZn:
6634 case AARCH64_OPND_SVE_Vd:
6635 case AARCH64_OPND_SVE_Vm:
6636 case AARCH64_OPND_SVE_Vn:
6637 po_int_fp_reg_or_fail (REG_TYPE_BHSDQ);
6638 break;
6639
6640 case AARCH64_OPND_SVE_Pd:
6641 case AARCH64_OPND_SVE_Pg3:
6642 case AARCH64_OPND_SVE_Pg4_5:
6643 case AARCH64_OPND_SVE_Pg4_10:
6644 case AARCH64_OPND_SVE_Pg4_16:
6645 case AARCH64_OPND_SVE_Pm:
6646 case AARCH64_OPND_SVE_Pn:
6647 case AARCH64_OPND_SVE_Pt:
6648 case AARCH64_OPND_SME_Pm:
6649 reg_type = REG_TYPE_P;
6650 goto vector_reg;
6651
6652 case AARCH64_OPND_SVE_Za_5:
6653 case AARCH64_OPND_SVE_Za_16:
6654 case AARCH64_OPND_SVE_Zd:
6655 case AARCH64_OPND_SVE_Zm_5:
6656 case AARCH64_OPND_SVE_Zm_16:
6657 case AARCH64_OPND_SVE_Zn:
6658 case AARCH64_OPND_SVE_Zt:
6659 case AARCH64_OPND_SME_Zm:
6660 reg_type = REG_TYPE_Z;
6661 goto vector_reg;
6662
6663 case AARCH64_OPND_SVE_PNd:
6664 case AARCH64_OPND_SVE_PNg4_10:
6665 case AARCH64_OPND_SVE_PNn:
6666 case AARCH64_OPND_SVE_PNt:
6667 case AARCH64_OPND_SME_PNd3:
6668 case AARCH64_OPND_SME_PNg3:
6669 case AARCH64_OPND_SME_PNn:
6670 reg_type = REG_TYPE_PN;
6671 goto vector_reg;
6672
6673 case AARCH64_OPND_Va:
6674 case AARCH64_OPND_Vd:
6675 case AARCH64_OPND_Vn:
6676 case AARCH64_OPND_Vm:
6677 reg_type = REG_TYPE_V;
6678 vector_reg:
6679 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6680 if (!reg)
6681 goto failure;
6682 if (vectype.defined & NTA_HASINDEX)
6683 goto failure;
6684
6685 info->reg.regno = reg->number;
6686 if ((reg_type == REG_TYPE_P
6687 || reg_type == REG_TYPE_PN
6688 || reg_type == REG_TYPE_Z)
6689 && vectype.type == NT_invtype)
6690 /* Unqualified P and Z registers are allowed in certain
6691 contexts. Rely on F_STRICT qualifier checking to catch
6692 invalid uses. */
6693 info->qualifier = AARCH64_OPND_QLF_NIL;
6694 else
6695 {
6696 info->qualifier = vectype_to_qualifier (&vectype);
6697 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6698 goto failure;
6699 }
6700 break;
6701
6702 case AARCH64_OPND_VdD1:
6703 case AARCH64_OPND_VnD1:
6704 reg = aarch64_reg_parse (&str, REG_TYPE_V, &vectype);
6705 if (!reg)
6706 goto failure;
6707 if (vectype.type != NT_d || vectype.index != 1)
6708 {
6709 set_fatal_syntax_error
6710 (_("the top half of a 128-bit FP/SIMD register is expected"));
6711 goto failure;
6712 }
6713 info->reg.regno = reg->number;
6714 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6715 here; it is correct for the purpose of encoding/decoding since
6716 only the register number is explicitly encoded in the related
6717 instructions, although this appears a bit hacky. */
6718 info->qualifier = AARCH64_OPND_QLF_S_D;
6719 break;
6720
6721 case AARCH64_OPND_SVE_Zm3_INDEX:
6722 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6723 case AARCH64_OPND_SVE_Zm3_19_INDEX:
6724 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6725 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6726 case AARCH64_OPND_SVE_Zm4_INDEX:
6727 case AARCH64_OPND_SVE_Zn_INDEX:
6728 case AARCH64_OPND_SME_Zm_INDEX1:
6729 case AARCH64_OPND_SME_Zm_INDEX2:
6730 case AARCH64_OPND_SME_Zm_INDEX3_1:
6731 case AARCH64_OPND_SME_Zm_INDEX3_2:
6732 case AARCH64_OPND_SME_Zm_INDEX3_10:
6733 case AARCH64_OPND_SME_Zm_INDEX4_1:
6734 case AARCH64_OPND_SME_Zm_INDEX4_10:
6735 case AARCH64_OPND_SME_Zn_INDEX1_16:
6736 case AARCH64_OPND_SME_Zn_INDEX2_15:
6737 case AARCH64_OPND_SME_Zn_INDEX2_16:
6738 case AARCH64_OPND_SME_Zn_INDEX3_14:
6739 case AARCH64_OPND_SME_Zn_INDEX3_15:
6740 case AARCH64_OPND_SME_Zn_INDEX4_14:
6741 reg_type = REG_TYPE_Z;
6742 goto vector_reg_index;
6743
6744 case AARCH64_OPND_Ed:
6745 case AARCH64_OPND_En:
6746 case AARCH64_OPND_Em:
6747 case AARCH64_OPND_Em16:
6748 case AARCH64_OPND_SM3_IMM2:
6749 reg_type = REG_TYPE_V;
6750 vector_reg_index:
6751 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6752 if (!reg)
6753 goto failure;
6754 if (!(vectype.defined & NTA_HASINDEX))
6755 goto failure;
6756
6757 if (reg->type == REG_TYPE_Z && vectype.type == NT_invtype)
6758 /* Unqualified Zn[index] is allowed in LUTI2 instructions. */
6759 info->qualifier = AARCH64_OPND_QLF_NIL;
6760 else
6761 {
6762 if (vectype.type == NT_invtype)
6763 goto failure;
6764 info->qualifier = vectype_to_qualifier (&vectype);
6765 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6766 goto failure;
6767 }
6768
6769 info->reglane.regno = reg->number;
6770 info->reglane.index = vectype.index;
6771 break;
6772
6773 case AARCH64_OPND_SVE_ZnxN:
6774 case AARCH64_OPND_SVE_ZtxN:
6775 case AARCH64_OPND_SME_Zdnx2:
6776 case AARCH64_OPND_SME_Zdnx4:
6777 case AARCH64_OPND_SME_Zmx2:
6778 case AARCH64_OPND_SME_Zmx4:
6779 case AARCH64_OPND_SME_Znx2:
6780 case AARCH64_OPND_SME_Znx4:
6781 case AARCH64_OPND_SME_Ztx2_STRIDED:
6782 case AARCH64_OPND_SME_Ztx4_STRIDED:
6783 reg_type = REG_TYPE_Z;
6784 goto vector_reg_list;
6785
6786 case AARCH64_OPND_SME_Pdx2:
6787 case AARCH64_OPND_SME_PdxN:
6788 reg_type = REG_TYPE_P;
6789 goto vector_reg_list;
6790
6791 case AARCH64_OPND_LVn:
6792 case AARCH64_OPND_LVt:
6793 case AARCH64_OPND_LVt_AL:
6794 case AARCH64_OPND_LEt:
6795 reg_type = REG_TYPE_V;
6796 vector_reg_list:
6797 if (reg_type == REG_TYPE_Z
6798 && get_opcode_dependent_value (opcode) == 1
6799 && *str != '{')
6800 {
6801 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6802 if (!reg)
6803 goto failure;
6804 info->reglist.first_regno = reg->number;
6805 info->reglist.num_regs = 1;
6806 info->reglist.stride = 1;
6807 }
6808 else
6809 {
6810 val = parse_vector_reg_list (&str, reg_type, &vectype);
6811 if (val == PARSE_FAIL)
6812 goto failure;
6813
6814 if (! reg_list_valid_p (val, &info->reglist, reg_type))
6815 {
6816 set_fatal_syntax_error (_("invalid register list"));
6817 goto failure;
6818 }
6819
6820 if ((int) vectype.width > 0 && *str != ',')
6821 {
6822 set_fatal_syntax_error
6823 (_("expected element type rather than vector type"));
6824 goto failure;
6825 }
6826 }
6827 if (operands[i] == AARCH64_OPND_LEt)
6828 {
6829 if (!(vectype.defined & NTA_HASINDEX))
6830 goto failure;
6831 info->reglist.has_index = 1;
6832 info->reglist.index = vectype.index;
6833 }
6834 else
6835 {
6836 if (vectype.defined & NTA_HASINDEX)
6837 goto failure;
6838 if (!(vectype.defined & NTA_HASTYPE))
6839 {
6840 if (reg_type == REG_TYPE_Z || reg_type == REG_TYPE_P)
6841 set_fatal_syntax_error (_("missing type suffix"));
6842 goto failure;
6843 }
6844 }
6845 info->qualifier = vectype_to_qualifier (&vectype);
6846 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6847 goto failure;
6848 break;
6849
6850 case AARCH64_OPND_CRn:
6851 case AARCH64_OPND_CRm:
6852 {
6853 char prefix = *(str++);
6854 if (prefix != 'c' && prefix != 'C')
6855 goto failure;
6856
6857 po_imm_nc_or_fail ();
6858 if (val > 15)
6859 {
6860 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6861 goto failure;
6862 }
6863 info->qualifier = AARCH64_OPND_QLF_CR;
6864 info->imm.value = val;
6865 break;
6866 }
6867
6868 case AARCH64_OPND_SHLL_IMM:
6869 case AARCH64_OPND_IMM_VLSR:
6870 po_imm_or_fail (1, 64);
6871 info->imm.value = val;
6872 break;
6873
6874 case AARCH64_OPND_CCMP_IMM:
6875 case AARCH64_OPND_SIMM5:
6876 case AARCH64_OPND_FBITS:
6877 case AARCH64_OPND_TME_UIMM16:
6878 case AARCH64_OPND_UIMM4:
6879 case AARCH64_OPND_UIMM4_ADDG:
6880 case AARCH64_OPND_UIMM10:
6881 case AARCH64_OPND_UIMM3_OP1:
6882 case AARCH64_OPND_UIMM3_OP2:
6883 case AARCH64_OPND_IMM_VLSL:
6884 case AARCH64_OPND_IMM:
6885 case AARCH64_OPND_IMM_2:
6886 case AARCH64_OPND_WIDTH:
6887 case AARCH64_OPND_SVE_INV_LIMM:
6888 case AARCH64_OPND_SVE_LIMM:
6889 case AARCH64_OPND_SVE_LIMM_MOV:
6890 case AARCH64_OPND_SVE_SHLIMM_PRED:
6891 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6892 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6893 case AARCH64_OPND_SME_SHRIMM4:
6894 case AARCH64_OPND_SME_SHRIMM5:
6895 case AARCH64_OPND_SVE_SHRIMM_PRED:
6896 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6897 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6898 case AARCH64_OPND_SVE_SIMM5:
6899 case AARCH64_OPND_SVE_SIMM5B:
6900 case AARCH64_OPND_SVE_SIMM6:
6901 case AARCH64_OPND_SVE_SIMM8:
6902 case AARCH64_OPND_SVE_UIMM3:
6903 case AARCH64_OPND_SVE_UIMM7:
6904 case AARCH64_OPND_SVE_UIMM8:
6905 case AARCH64_OPND_SVE_UIMM8_53:
6906 case AARCH64_OPND_IMM_ROT1:
6907 case AARCH64_OPND_IMM_ROT2:
6908 case AARCH64_OPND_IMM_ROT3:
6909 case AARCH64_OPND_SVE_IMM_ROT1:
6910 case AARCH64_OPND_SVE_IMM_ROT2:
6911 case AARCH64_OPND_SVE_IMM_ROT3:
6912 case AARCH64_OPND_CSSC_SIMM8:
6913 case AARCH64_OPND_CSSC_UIMM8:
6914 po_imm_nc_or_fail ();
6915 info->imm.value = val;
6916 break;
6917
6918 case AARCH64_OPND_SVE_AIMM:
6919 case AARCH64_OPND_SVE_ASIMM:
6920 po_imm_nc_or_fail ();
6921 info->imm.value = val;
6922 skip_whitespace (str);
6923 if (skip_past_comma (&str))
6924 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6925 else
6926 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6927 break;
6928
6929 case AARCH64_OPND_SVE_PATTERN:
6930 po_enum_or_fail (aarch64_sve_pattern_array);
6931 info->imm.value = val;
6932 break;
6933
6934 case AARCH64_OPND_SVE_PATTERN_SCALED:
6935 po_enum_or_fail (aarch64_sve_pattern_array);
6936 info->imm.value = val;
6937 if (skip_past_comma (&str)
6938 && !parse_shift (&str, info, SHIFTED_MUL))
6939 goto failure;
6940 if (!info->shifter.operator_present)
6941 {
6942 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6943 info->shifter.kind = AARCH64_MOD_MUL;
6944 info->shifter.amount = 1;
6945 }
6946 break;
6947
6948 case AARCH64_OPND_SVE_PRFOP:
6949 po_enum_or_fail (aarch64_sve_prfop_array);
6950 info->imm.value = val;
6951 break;
6952
6953 case AARCH64_OPND_UIMM7:
6954 po_imm_or_fail (0, 127);
6955 info->imm.value = val;
6956 break;
6957
6958 case AARCH64_OPND_IDX:
6959 case AARCH64_OPND_MASK:
6960 case AARCH64_OPND_BIT_NUM:
6961 case AARCH64_OPND_IMMR:
6962 case AARCH64_OPND_IMMS:
6963 po_imm_or_fail (0, 63);
6964 info->imm.value = val;
6965 break;
6966
6967 case AARCH64_OPND_IMM0:
6968 po_imm_nc_or_fail ();
6969 if (val != 0)
6970 {
6971 set_fatal_syntax_error (_("immediate zero expected"));
6972 goto failure;
6973 }
6974 info->imm.value = 0;
6975 break;
6976
6977 case AARCH64_OPND_FPIMM0:
6978 {
6979 int qfloat;
6980 bool res1 = false, res2 = false;
6981 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6982 it is probably not worth the effort to support it. */
6983 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6984 imm_reg_type))
6985 && (error_p ()
6986 || !(res2 = parse_constant_immediate (&str, &val,
6987 imm_reg_type))))
6988 goto failure;
6989 if ((res1 && qfloat == 0) || (res2 && val == 0))
6990 {
6991 info->imm.value = 0;
6992 info->imm.is_fp = 1;
6993 break;
6994 }
6995 set_fatal_syntax_error (_("immediate zero expected"));
6996 goto failure;
6997 }
6998
6999 case AARCH64_OPND_IMM_MOV:
7000 {
7001 char *saved = str;
7002 if (reg_name_p (str, REG_TYPE_R_ZR_SP)
7003 || reg_name_p (str, REG_TYPE_V))
7004 goto failure;
7005 str = saved;
7006 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
7007 GE_OPT_PREFIX, REJECT_ABSENT));
7008 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
7009 later. fix_mov_imm_insn will try to determine a machine
7010 instruction (MOVZ, MOVN or ORR) for it and will issue an error
7011 message if the immediate cannot be moved by a single
7012 instruction. */
7013 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7014 inst.base.operands[i].skip = 1;
7015 }
7016 break;
7017
7018 case AARCH64_OPND_SIMD_IMM:
7019 case AARCH64_OPND_SIMD_IMM_SFT:
7020 if (! parse_big_immediate (&str, &val, imm_reg_type))
7021 goto failure;
7022 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7023 /* addr_off_p */ 0,
7024 /* need_libopcodes_p */ 1,
7025 /* skip_p */ 1);
7026 /* Parse shift.
7027 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
7028 shift, we don't check it here; we leave the checking to
7029 the libopcodes (operand_general_constraint_met_p). By
7030 doing this, we achieve better diagnostics. */
7031 if (skip_past_comma (&str)
7032 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
7033 goto failure;
7034 if (!info->shifter.operator_present
7035 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
7036 {
7037 /* Default to LSL if not present. Libopcodes prefers shifter
7038 kind to be explicit. */
7039 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7040 info->shifter.kind = AARCH64_MOD_LSL;
7041 }
7042 break;
7043
7044 case AARCH64_OPND_FPIMM:
7045 case AARCH64_OPND_SIMD_FPIMM:
7046 case AARCH64_OPND_SVE_FPIMM8:
7047 {
7048 int qfloat;
7049 bool dp_p;
7050
7051 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7052 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
7053 || !aarch64_imm_float_p (qfloat))
7054 {
7055 if (!error_p ())
7056 set_fatal_syntax_error (_("invalid floating-point"
7057 " constant"));
7058 goto failure;
7059 }
7060 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
7061 inst.base.operands[i].imm.is_fp = 1;
7062 }
7063 break;
7064
7065 case AARCH64_OPND_SVE_I1_HALF_ONE:
7066 case AARCH64_OPND_SVE_I1_HALF_TWO:
7067 case AARCH64_OPND_SVE_I1_ZERO_ONE:
7068 {
7069 int qfloat;
7070 bool dp_p;
7071
7072 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7073 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
7074 {
7075 if (!error_p ())
7076 set_fatal_syntax_error (_("invalid floating-point"
7077 " constant"));
7078 goto failure;
7079 }
7080 inst.base.operands[i].imm.value = qfloat;
7081 inst.base.operands[i].imm.is_fp = 1;
7082 }
7083 break;
7084
7085 case AARCH64_OPND_LIMM:
7086 po_misc_or_fail (parse_shifter_operand (&str, info,
7087 SHIFTED_LOGIC_IMM));
7088 if (info->shifter.operator_present)
7089 {
7090 set_fatal_syntax_error
7091 (_("shift not allowed for bitmask immediate"));
7092 goto failure;
7093 }
7094 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7095 /* addr_off_p */ 0,
7096 /* need_libopcodes_p */ 1,
7097 /* skip_p */ 1);
7098 break;
7099
7100 case AARCH64_OPND_AIMM:
7101 if (opcode->op == OP_ADD)
7102 /* ADD may have relocation types. */
7103 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
7104 SHIFTED_ARITH_IMM));
7105 else
7106 po_misc_or_fail (parse_shifter_operand (&str, info,
7107 SHIFTED_ARITH_IMM));
7108 switch (inst.reloc.type)
7109 {
7110 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7111 info->shifter.amount = 12;
7112 break;
7113 case BFD_RELOC_UNUSED:
7114 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7115 if (info->shifter.kind != AARCH64_MOD_NONE)
7116 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
7117 inst.reloc.pc_rel = 0;
7118 break;
7119 default:
7120 break;
7121 }
7122 info->imm.value = 0;
7123 if (!info->shifter.operator_present)
7124 {
7125 /* Default to LSL if not present. Libopcodes prefers shifter
7126 kind to be explicit. */
7127 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7128 info->shifter.kind = AARCH64_MOD_LSL;
7129 }
7130 break;
7131
7132 case AARCH64_OPND_HALF:
7133 {
7134 /* #<imm16> or relocation. */
7135 int internal_fixup_p;
7136 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
7137 if (internal_fixup_p)
7138 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7139 skip_whitespace (str);
7140 if (skip_past_comma (&str))
7141 {
7142 /* {, LSL #<shift>} */
7143 if (! aarch64_gas_internal_fixup_p ())
7144 {
7145 set_fatal_syntax_error (_("can't mix relocation modifier "
7146 "with explicit shift"));
7147 goto failure;
7148 }
7149 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
7150 }
7151 else
7152 inst.base.operands[i].shifter.amount = 0;
7153 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
7154 inst.base.operands[i].imm.value = 0;
7155 if (! process_movw_reloc_info ())
7156 goto failure;
7157 }
7158 break;
7159
7160 case AARCH64_OPND_EXCEPTION:
7161 case AARCH64_OPND_UNDEFINED:
7162 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
7163 imm_reg_type));
7164 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7165 /* addr_off_p */ 0,
7166 /* need_libopcodes_p */ 0,
7167 /* skip_p */ 1);
7168 break;
7169
7170 case AARCH64_OPND_NZCV:
7171 {
7172 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
7173 if (nzcv != NULL)
7174 {
7175 str += 4;
7176 info->imm.value = nzcv->value;
7177 break;
7178 }
7179 po_imm_or_fail (0, 15);
7180 info->imm.value = val;
7181 }
7182 break;
7183
7184 case AARCH64_OPND_COND:
7185 case AARCH64_OPND_COND1:
7186 {
7187 char *start = str;
7188 do
7189 str++;
7190 while (ISALPHA (*str));
7191 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
7192 if (info->cond == NULL)
7193 {
7194 set_syntax_error (_("invalid condition"));
7195 goto failure;
7196 }
7197 else if (operands[i] == AARCH64_OPND_COND1
7198 && (info->cond->value & 0xe) == 0xe)
7199 {
7200 /* Do not allow AL or NV. */
7201 set_default_error ();
7202 goto failure;
7203 }
7204 }
7205 break;
7206
7207 case AARCH64_OPND_ADDR_ADRP:
7208 po_misc_or_fail (parse_adrp (&str));
7209 /* Clear the value as operand needs to be relocated. */
7210 info->imm.value = 0;
7211 break;
7212
7213 case AARCH64_OPND_ADDR_PCREL14:
7214 case AARCH64_OPND_ADDR_PCREL19:
7215 case AARCH64_OPND_ADDR_PCREL21:
7216 case AARCH64_OPND_ADDR_PCREL26:
7217 po_misc_or_fail (parse_address (&str, info));
7218 if (!info->addr.pcrel)
7219 {
7220 set_syntax_error (_("invalid pc-relative address"));
7221 goto failure;
7222 }
7223 if (inst.gen_lit_pool
7224 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
7225 {
7226 /* Only permit "=value" in the literal load instructions.
7227 The literal will be generated by programmer_friendly_fixup. */
7228 set_syntax_error (_("invalid use of \"=immediate\""));
7229 goto failure;
7230 }
7231 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
7232 {
7233 set_syntax_error (_("unrecognized relocation suffix"));
7234 goto failure;
7235 }
7236 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7237 {
7238 info->imm.value = inst.reloc.exp.X_add_number;
7239 inst.reloc.type = BFD_RELOC_UNUSED;
7240 }
7241 else
7242 {
7243 info->imm.value = 0;
7244 if (inst.reloc.type == BFD_RELOC_UNUSED)
7245 switch (opcode->iclass)
7246 {
7247 case compbranch:
7248 case condbranch:
7249 /* e.g. CBZ or B.COND */
7250 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7251 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7252 break;
7253 case testbranch:
7254 /* e.g. TBZ */
7255 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7256 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7257 break;
7258 case branch_imm:
7259 /* e.g. B or BL */
7260 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7261 inst.reloc.type =
7262 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7263 : BFD_RELOC_AARCH64_JUMP26;
7264 break;
7265 case loadlit:
7266 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7267 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7268 break;
7269 case pcreladdr:
7270 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7271 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7272 break;
7273 default:
7274 gas_assert (0);
7275 abort ();
7276 }
7277 inst.reloc.pc_rel = 1;
7278 }
7279 break;
7280
7281 case AARCH64_OPND_ADDR_SIMPLE:
7282 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7283 {
7284 /* [<Xn|SP>{, #<simm>}] */
7285 char *start = str;
7286 /* First use the normal address-parsing routines, to get
7287 the usual syntax errors. */
7288 po_misc_or_fail (parse_address (&str, info));
7289 if (info->addr.pcrel || info->addr.offset.is_reg
7290 || !info->addr.preind || info->addr.postind
7291 || info->addr.writeback)
7292 {
7293 set_syntax_error (_("invalid addressing mode"));
7294 goto failure;
7295 }
7296
7297 /* Then retry, matching the specific syntax of these addresses. */
7298 str = start;
7299 po_char_or_fail ('[');
7300 po_reg_or_fail (REG_TYPE_R64_SP);
7301 /* Accept optional ", #0". */
7302 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7303 && skip_past_char (&str, ','))
7304 {
7305 skip_past_char (&str, '#');
7306 if (! skip_past_char (&str, '0'))
7307 {
7308 set_fatal_syntax_error
7309 (_("the optional immediate offset can only be 0"));
7310 goto failure;
7311 }
7312 }
7313 po_char_or_fail (']');
7314 break;
7315 }
7316
7317 case AARCH64_OPND_ADDR_REGOFF:
7318 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7319 po_misc_or_fail (parse_address (&str, info));
7320 regoff_addr:
7321 if (info->addr.pcrel || !info->addr.offset.is_reg
7322 || !info->addr.preind || info->addr.postind
7323 || info->addr.writeback)
7324 {
7325 set_syntax_error (_("invalid addressing mode"));
7326 goto failure;
7327 }
7328 if (!info->shifter.operator_present)
7329 {
7330 /* Default to LSL if not present. Libopcodes prefers shifter
7331 kind to be explicit. */
7332 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7333 info->shifter.kind = AARCH64_MOD_LSL;
7334 }
7335 /* Qualifier to be deduced by libopcodes. */
7336 break;
7337
7338 case AARCH64_OPND_ADDR_SIMM7:
7339 po_misc_or_fail (parse_address (&str, info));
7340 if (info->addr.pcrel || info->addr.offset.is_reg
7341 || (!info->addr.preind && !info->addr.postind))
7342 {
7343 set_syntax_error (_("invalid addressing mode"));
7344 goto failure;
7345 }
7346 if (inst.reloc.type != BFD_RELOC_UNUSED)
7347 {
7348 set_syntax_error (_("relocation not allowed"));
7349 goto failure;
7350 }
7351 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7352 /* addr_off_p */ 1,
7353 /* need_libopcodes_p */ 1,
7354 /* skip_p */ 0);
7355 break;
7356
7357 case AARCH64_OPND_ADDR_SIMM9:
7358 case AARCH64_OPND_ADDR_SIMM9_2:
7359 case AARCH64_OPND_ADDR_SIMM11:
7360 case AARCH64_OPND_ADDR_SIMM13:
7361 po_misc_or_fail (parse_address (&str, info));
7362 if (info->addr.pcrel || info->addr.offset.is_reg
7363 || (!info->addr.preind && !info->addr.postind)
7364 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7365 && info->addr.writeback))
7366 {
7367 set_syntax_error (_("invalid addressing mode"));
7368 goto failure;
7369 }
7370 if (inst.reloc.type != BFD_RELOC_UNUSED)
7371 {
7372 set_syntax_error (_("relocation not allowed"));
7373 goto failure;
7374 }
7375 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7376 /* addr_off_p */ 1,
7377 /* need_libopcodes_p */ 1,
7378 /* skip_p */ 0);
7379 break;
7380
7381 case AARCH64_OPND_ADDR_SIMM10:
7382 case AARCH64_OPND_ADDR_OFFSET:
7383 po_misc_or_fail (parse_address (&str, info));
7384 if (info->addr.pcrel || info->addr.offset.is_reg
7385 || !info->addr.preind || info->addr.postind)
7386 {
7387 set_syntax_error (_("invalid addressing mode"));
7388 goto failure;
7389 }
7390 if (inst.reloc.type != BFD_RELOC_UNUSED)
7391 {
7392 set_syntax_error (_("relocation not allowed"));
7393 goto failure;
7394 }
7395 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7396 /* addr_off_p */ 1,
7397 /* need_libopcodes_p */ 1,
7398 /* skip_p */ 0);
7399 break;
7400
7401 case AARCH64_OPND_ADDR_UIMM12:
7402 po_misc_or_fail (parse_address (&str, info));
7403 if (info->addr.pcrel || info->addr.offset.is_reg
7404 || !info->addr.preind || info->addr.writeback)
7405 {
7406 set_syntax_error (_("invalid addressing mode"));
7407 goto failure;
7408 }
7409 if (inst.reloc.type == BFD_RELOC_UNUSED)
7410 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7411 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7412 || (inst.reloc.type
7413 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7414 || (inst.reloc.type
7415 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7416 || (inst.reloc.type
7417 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7418 || (inst.reloc.type
7419 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7420 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7421 /* Leave qualifier to be determined by libopcodes. */
7422 break;
7423
7424 case AARCH64_OPND_SIMD_ADDR_POST:
7425 /* [<Xn|SP>], <Xm|#<amount>> */
7426 po_misc_or_fail (parse_address (&str, info));
7427 if (!info->addr.postind || !info->addr.writeback)
7428 {
7429 set_syntax_error (_("invalid addressing mode"));
7430 goto failure;
7431 }
7432 if (!info->addr.offset.is_reg)
7433 {
7434 if (inst.reloc.exp.X_op == O_constant)
7435 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7436 else
7437 {
7438 set_fatal_syntax_error
7439 (_("writeback value must be an immediate constant"));
7440 goto failure;
7441 }
7442 }
7443 /* No qualifier. */
7444 break;
7445
7446 case AARCH64_OPND_SME_SM_ZA:
7447 /* { SM | ZA } */
7448 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7449 {
7450 set_syntax_error (_("unknown or missing PSTATE field name"));
7451 goto failure;
7452 }
7453 info->reg.regno = val;
7454 break;
7455
7456 case AARCH64_OPND_SME_PnT_Wm_imm:
7457 if (!parse_dual_indexed_reg (&str, REG_TYPE_P,
7458 &info->indexed_za, &qualifier, 0))
7459 goto failure;
7460 info->qualifier = qualifier;
7461 break;
7462
7463 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7464 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7465 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7466 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7467 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7468 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7469 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7470 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7471 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7472 case AARCH64_OPND_SVE_ADDR_RI_U6:
7473 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7474 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7475 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7476 /* [X<n>{, #imm, MUL VL}]
7477 [X<n>{, #imm}]
7478 but recognizing SVE registers. */
7479 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7480 &offset_qualifier));
7481 if (base_qualifier != AARCH64_OPND_QLF_X)
7482 {
7483 set_syntax_error (_("invalid addressing mode"));
7484 goto failure;
7485 }
7486 sve_regimm:
7487 if (info->addr.pcrel || info->addr.offset.is_reg
7488 || !info->addr.preind || info->addr.writeback)
7489 {
7490 set_syntax_error (_("invalid addressing mode"));
7491 goto failure;
7492 }
7493 if (inst.reloc.type != BFD_RELOC_UNUSED
7494 || inst.reloc.exp.X_op != O_constant)
7495 {
7496 /* Make sure this has priority over
7497 "invalid addressing mode". */
7498 set_fatal_syntax_error (_("constant offset required"));
7499 goto failure;
7500 }
7501 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7502 break;
7503
7504 case AARCH64_OPND_SVE_ADDR_R:
7505 /* [<Xn|SP>{, <R><m>}]
7506 but recognizing SVE registers. */
7507 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7508 &offset_qualifier));
7509 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7510 {
7511 offset_qualifier = AARCH64_OPND_QLF_X;
7512 info->addr.offset.is_reg = 1;
7513 info->addr.offset.regno = 31;
7514 }
7515 else if (base_qualifier != AARCH64_OPND_QLF_X
7516 || offset_qualifier != AARCH64_OPND_QLF_X)
7517 {
7518 set_syntax_error (_("invalid addressing mode"));
7519 goto failure;
7520 }
7521 goto regoff_addr;
7522
7523 case AARCH64_OPND_SVE_ADDR_RR:
7524 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7525 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7526 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7527 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7528 case AARCH64_OPND_SVE_ADDR_RX:
7529 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7530 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7531 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7532 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7533 but recognizing SVE registers. */
7534 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7535 &offset_qualifier));
7536 if (base_qualifier != AARCH64_OPND_QLF_X
7537 || offset_qualifier != AARCH64_OPND_QLF_X)
7538 {
7539 set_syntax_error (_("invalid addressing mode"));
7540 goto failure;
7541 }
7542 goto regoff_addr;
7543
7544 case AARCH64_OPND_SVE_ADDR_RZ:
7545 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7546 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7547 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7548 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7549 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7550 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7551 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7552 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7553 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7554 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7555 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7556 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7557 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7558 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7559 &offset_qualifier));
7560 if (base_qualifier != AARCH64_OPND_QLF_X
7561 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7562 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7563 {
7564 set_syntax_error (_("invalid addressing mode"));
7565 goto failure;
7566 }
7567 info->qualifier = offset_qualifier;
7568 goto regoff_addr;
7569
7570 case AARCH64_OPND_SVE_ADDR_ZX:
7571 /* [Zn.<T>{, <Xm>}]. */
7572 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7573 &offset_qualifier));
7574 /* Things to check:
7575 base_qualifier either S_S or S_D
7576 offset_qualifier must be X
7577 */
7578 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7579 && base_qualifier != AARCH64_OPND_QLF_S_D)
7580 || offset_qualifier != AARCH64_OPND_QLF_X)
7581 {
7582 set_syntax_error (_("invalid addressing mode"));
7583 goto failure;
7584 }
7585 info->qualifier = base_qualifier;
7586 if (!info->addr.offset.is_reg || info->addr.pcrel
7587 || !info->addr.preind || info->addr.writeback
7588 || info->shifter.operator_present != 0)
7589 {
7590 set_syntax_error (_("invalid addressing mode"));
7591 goto failure;
7592 }
7593 info->shifter.kind = AARCH64_MOD_LSL;
7594 break;
7595
7596
7597 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7598 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7599 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7600 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7601 /* [Z<n>.<T>{, #imm}] */
7602 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7603 &offset_qualifier));
7604 if (base_qualifier != AARCH64_OPND_QLF_S_S
7605 && base_qualifier != AARCH64_OPND_QLF_S_D)
7606 {
7607 set_syntax_error (_("invalid addressing mode"));
7608 goto failure;
7609 }
7610 info->qualifier = base_qualifier;
7611 goto sve_regimm;
7612
7613 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7614 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7615 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7616 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7617 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7618
7619 We don't reject:
7620
7621 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7622
7623 here since we get better error messages by leaving it to
7624 the qualifier checking routines. */
7625 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7626 &offset_qualifier));
7627 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7628 && base_qualifier != AARCH64_OPND_QLF_S_D)
7629 || offset_qualifier != base_qualifier)
7630 {
7631 set_syntax_error (_("invalid addressing mode"));
7632 goto failure;
7633 }
7634 info->qualifier = base_qualifier;
7635 goto regoff_addr;
7636
7637 case AARCH64_OPND_SYSREG:
7638 {
7639 uint32_t sysreg_flags;
7640 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7641 &sysreg_flags)) == PARSE_FAIL)
7642 {
7643 set_syntax_error (_("unknown or missing system register name"));
7644 goto failure;
7645 }
7646 inst.base.operands[i].sysreg.value = val;
7647 inst.base.operands[i].sysreg.flags = sysreg_flags;
7648 break;
7649 }
7650
7651 case AARCH64_OPND_PSTATEFIELD:
7652 {
7653 uint32_t sysreg_flags;
7654 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7655 &sysreg_flags)) == PARSE_FAIL)
7656 {
7657 set_syntax_error (_("unknown or missing PSTATE field name"));
7658 goto failure;
7659 }
7660 inst.base.operands[i].pstatefield = val;
7661 inst.base.operands[i].sysreg.flags = sysreg_flags;
7662 break;
7663 }
7664
7665 case AARCH64_OPND_SYSREG_IC:
7666 inst.base.operands[i].sysins_op =
7667 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7668 goto sys_reg_ins;
7669
7670 case AARCH64_OPND_SYSREG_DC:
7671 inst.base.operands[i].sysins_op =
7672 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7673 goto sys_reg_ins;
7674
7675 case AARCH64_OPND_SYSREG_AT:
7676 inst.base.operands[i].sysins_op =
7677 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7678 goto sys_reg_ins;
7679
7680 case AARCH64_OPND_SYSREG_SR:
7681 inst.base.operands[i].sysins_op =
7682 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7683 goto sys_reg_ins;
7684
7685 case AARCH64_OPND_SYSREG_TLBI:
7686 inst.base.operands[i].sysins_op =
7687 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7688 sys_reg_ins:
7689 if (inst.base.operands[i].sysins_op == NULL)
7690 {
7691 set_fatal_syntax_error ( _("unknown or missing operation name"));
7692 goto failure;
7693 }
7694 break;
7695
7696 case AARCH64_OPND_BARRIER:
7697 case AARCH64_OPND_BARRIER_ISB:
7698 val = parse_barrier (&str);
7699 if (val != PARSE_FAIL
7700 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7701 {
7702 /* ISB only accepts options name 'sy'. */
7703 set_syntax_error
7704 (_("the specified option is not accepted in ISB"));
7705 /* Turn off backtrack as this optional operand is present. */
7706 backtrack_pos = 0;
7707 goto failure;
7708 }
7709 if (val != PARSE_FAIL
7710 && operands[i] == AARCH64_OPND_BARRIER)
7711 {
7712 /* Regular barriers accept options CRm (C0-C15).
7713 DSB nXS barrier variant accepts values > 15. */
7714 if (val < 0 || val > 15)
7715 {
7716 set_syntax_error (_("the specified option is not accepted in DSB"));
7717 goto failure;
7718 }
7719 }
7720 /* This is an extension to accept a 0..15 immediate. */
7721 if (val == PARSE_FAIL)
7722 po_imm_or_fail (0, 15);
7723 info->barrier = aarch64_barrier_options + val;
7724 break;
7725
7726 case AARCH64_OPND_BARRIER_DSB_NXS:
7727 val = parse_barrier (&str);
7728 if (val != PARSE_FAIL)
7729 {
7730 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7731 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7732 {
7733 set_syntax_error (_("the specified option is not accepted in DSB"));
7734 /* Turn off backtrack as this optional operand is present. */
7735 backtrack_pos = 0;
7736 goto failure;
7737 }
7738 }
7739 else
7740 {
7741 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7742 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7743 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7744 goto failure;
7745 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7746 {
7747 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7748 goto failure;
7749 }
7750 }
7751 /* Option index is encoded as 2-bit value in val<3:2>. */
7752 val = (val >> 2) - 4;
7753 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7754 break;
7755
7756 case AARCH64_OPND_PRFOP:
7757 val = parse_pldop (&str);
7758 /* This is an extension to accept a 0..31 immediate. */
7759 if (val == PARSE_FAIL)
7760 po_imm_or_fail (0, 31);
7761 inst.base.operands[i].prfop = aarch64_prfops + val;
7762 break;
7763
7764 case AARCH64_OPND_RPRFMOP:
7765 po_enum_or_fail (aarch64_rprfmop_array);
7766 info->imm.value = val;
7767 break;
7768
7769 case AARCH64_OPND_BARRIER_PSB:
7770 val = parse_barrier_psb (&str, &(info->hint_option));
7771 if (val == PARSE_FAIL)
7772 goto failure;
7773 break;
7774
7775 case AARCH64_OPND_SME_ZT0:
7776 po_reg_or_fail (REG_TYPE_ZT0);
7777 break;
7778
7779 case AARCH64_OPND_SME_ZT0_INDEX:
7780 reg = aarch64_reg_parse (&str, REG_TYPE_ZT0, &vectype);
7781 if (!reg || vectype.type != NT_invtype)
7782 goto failure;
7783 if (!(vectype.defined & NTA_HASINDEX))
7784 {
7785 set_syntax_error (_("missing register index"));
7786 goto failure;
7787 }
7788 info->imm.value = vectype.index;
7789 break;
7790
7791 case AARCH64_OPND_SME_ZT0_LIST:
7792 if (*str != '{')
7793 {
7794 set_expected_reglist_error (REG_TYPE_ZT0, parse_reg (&str));
7795 goto failure;
7796 }
7797 str++;
7798 if (!parse_typed_reg (&str, REG_TYPE_ZT0, &vectype, PTR_IN_REGLIST))
7799 goto failure;
7800 if (*str != '}')
7801 {
7802 set_syntax_error (_("expected '}' after ZT0"));
7803 goto failure;
7804 }
7805 str++;
7806 break;
7807
7808 case AARCH64_OPND_SME_PNn3_INDEX1:
7809 case AARCH64_OPND_SME_PNn3_INDEX2:
7810 reg = aarch64_reg_parse (&str, REG_TYPE_PN, &vectype);
7811 if (!reg)
7812 goto failure;
7813 if (!(vectype.defined & NTA_HASINDEX))
7814 {
7815 set_syntax_error (_("missing register index"));
7816 goto failure;
7817 }
7818 info->reglane.regno = reg->number;
7819 info->reglane.index = vectype.index;
7820 if (vectype.type == NT_invtype)
7821 info->qualifier = AARCH64_OPND_QLF_NIL;
7822 else
7823 info->qualifier = vectype_to_qualifier (&vectype);
7824 break;
7825
7826 case AARCH64_OPND_BTI_TARGET:
7827 val = parse_bti_operand (&str, &(info->hint_option));
7828 if (val == PARSE_FAIL)
7829 goto failure;
7830 break;
7831
7832 case AARCH64_OPND_SME_ZAda_2b:
7833 case AARCH64_OPND_SME_ZAda_3b:
7834 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier, 0);
7835 if (!reg)
7836 goto failure;
7837 info->reg.regno = reg->number;
7838 info->qualifier = qualifier;
7839 break;
7840
7841 case AARCH64_OPND_SME_ZA_HV_idx_src:
7842 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
7843 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7844 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
7845 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7846 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7847 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7848 &info->indexed_za,
7849 &qualifier)
7850 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7851 &info->indexed_za, &qualifier, 0))
7852 goto failure;
7853 info->qualifier = qualifier;
7854 break;
7855
7856 case AARCH64_OPND_SME_list_of_64bit_tiles:
7857 val = parse_sme_list_of_64bit_tiles (&str);
7858 if (val == PARSE_FAIL)
7859 goto failure;
7860 info->imm.value = val;
7861 break;
7862
7863 case AARCH64_OPND_SME_ZA_array_off1x4:
7864 case AARCH64_OPND_SME_ZA_array_off2x2:
7865 case AARCH64_OPND_SME_ZA_array_off2x4:
7866 case AARCH64_OPND_SME_ZA_array_off3_0:
7867 case AARCH64_OPND_SME_ZA_array_off3_5:
7868 case AARCH64_OPND_SME_ZA_array_off3x2:
7869 case AARCH64_OPND_SME_ZA_array_off4:
7870 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7871 &info->indexed_za, &qualifier, 0))
7872 goto failure;
7873 info->qualifier = qualifier;
7874 break;
7875
7876 case AARCH64_OPND_SME_VLxN_10:
7877 case AARCH64_OPND_SME_VLxN_13:
7878 po_strict_enum_or_fail (aarch64_sme_vlxn_array);
7879 info->imm.value = val;
7880 break;
7881
7882 case AARCH64_OPND_MOPS_ADDR_Rd:
7883 case AARCH64_OPND_MOPS_ADDR_Rs:
7884 po_char_or_fail ('[');
7885 if (!parse_x0_to_x30 (&str, info))
7886 goto failure;
7887 po_char_or_fail (']');
7888 po_char_or_fail ('!');
7889 break;
7890
7891 case AARCH64_OPND_MOPS_WB_Rn:
7892 if (!parse_x0_to_x30 (&str, info))
7893 goto failure;
7894 po_char_or_fail ('!');
7895 break;
7896
7897 default:
7898 as_fatal (_("unhandled operand code %d"), operands[i]);
7899 }
7900
7901 /* If we get here, this operand was successfully parsed. */
7902 inst.base.operands[i].present = 1;
7903 continue;
7904
7905 failure:
7906 /* The parse routine should already have set the error, but in case
7907 not, set a default one here. */
7908 if (! error_p ())
7909 set_default_error ();
7910
7911 if (! backtrack_pos)
7912 goto parse_operands_return;
7913
7914 {
7915 /* We reach here because this operand is marked as optional, and
7916 either no operand was supplied or the operand was supplied but it
7917 was syntactically incorrect. In the latter case we report an
7918 error. In the former case we perform a few more checks before
7919 dropping through to the code to insert the default operand. */
7920
7921 char *tmp = backtrack_pos;
7922 char endchar = END_OF_INSN;
7923
7924 if (i != (aarch64_num_of_operands (opcode) - 1))
7925 endchar = ',';
7926 skip_past_char (&tmp, ',');
7927
7928 if (*tmp != endchar)
7929 /* The user has supplied an operand in the wrong format. */
7930 goto parse_operands_return;
7931
7932 /* Make sure there is not a comma before the optional operand.
7933 For example the fifth operand of 'sys' is optional:
7934
7935 sys #0,c0,c0,#0, <--- wrong
7936 sys #0,c0,c0,#0 <--- correct. */
7937 if (comma_skipped_p && i && endchar == END_OF_INSN)
7938 {
7939 set_fatal_syntax_error
7940 (_("unexpected comma before the omitted optional operand"));
7941 goto parse_operands_return;
7942 }
7943 }
7944
7945 /* Reaching here means we are dealing with an optional operand that is
7946 omitted from the assembly line. */
7947 gas_assert (optional_operand_p (opcode, i));
7948 info->present = 0;
7949 process_omitted_operand (operands[i], opcode, i, info);
7950
7951 /* Try again, skipping the optional operand at backtrack_pos. */
7952 str = backtrack_pos;
7953 backtrack_pos = 0;
7954
7955 /* Clear any error record after the omitted optional operand has been
7956 successfully handled. */
7957 clear_error ();
7958 }
7959
7960 /* Check if we have parsed all the operands. */
7961 if (*str != '\0' && ! error_p ())
7962 {
7963 /* Set I to the index of the last present operand; this is
7964 for the purpose of diagnostics. */
7965 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7966 ;
7967 set_fatal_syntax_error
7968 (_("unexpected characters following instruction"));
7969 }
7970
7971 parse_operands_return:
7972
7973 if (error_p ())
7974 {
7975 inst.parsing_error.index = i;
7976 DEBUG_TRACE ("parsing FAIL: %s - %s",
7977 operand_mismatch_kind_names[inst.parsing_error.kind],
7978 inst.parsing_error.error);
7979 /* Record the operand error properly; this is useful when there
7980 are multiple instruction templates for a mnemonic name, so that
7981 later on, we can select the error that most closely describes
7982 the problem. */
7983 record_operand_error_info (opcode, &inst.parsing_error);
7984 return false;
7985 }
7986 else
7987 {
7988 DEBUG_TRACE ("parsing SUCCESS");
7989 return true;
7990 }
7991 }
7992
7993 /* It does some fix-up to provide some programmer friendly feature while
7994 keeping the libopcodes happy, i.e. libopcodes only accepts
7995 the preferred architectural syntax.
7996 Return FALSE if there is any failure; otherwise return TRUE. */
7997
7998 static bool
7999 programmer_friendly_fixup (aarch64_instruction *instr)
8000 {
8001 aarch64_inst *base = &instr->base;
8002 const aarch64_opcode *opcode = base->opcode;
8003 enum aarch64_op op = opcode->op;
8004 aarch64_opnd_info *operands = base->operands;
8005
8006 DEBUG_TRACE ("enter");
8007
8008 switch (opcode->iclass)
8009 {
8010 case testbranch:
8011 /* TBNZ Xn|Wn, #uimm6, label
8012 Test and Branch Not Zero: conditionally jumps to label if bit number
8013 uimm6 in register Xn is not zero. The bit number implies the width of
8014 the register, which may be written and should be disassembled as Wn if
8015 uimm is less than 32. */
8016 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
8017 {
8018 if (operands[1].imm.value >= 32)
8019 {
8020 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
8021 0, 31);
8022 return false;
8023 }
8024 operands[0].qualifier = AARCH64_OPND_QLF_X;
8025 }
8026 break;
8027 case loadlit:
8028 /* LDR Wt, label | =value
8029 As a convenience assemblers will typically permit the notation
8030 "=value" in conjunction with the pc-relative literal load instructions
8031 to automatically place an immediate value or symbolic address in a
8032 nearby literal pool and generate a hidden label which references it.
8033 ISREG has been set to 0 in the case of =value. */
8034 if (instr->gen_lit_pool
8035 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
8036 {
8037 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
8038 if (op == OP_LDRSW_LIT)
8039 size = 4;
8040 if (instr->reloc.exp.X_op != O_constant
8041 && instr->reloc.exp.X_op != O_big
8042 && instr->reloc.exp.X_op != O_symbol)
8043 {
8044 record_operand_error (opcode, 1,
8045 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
8046 _("constant expression expected"));
8047 return false;
8048 }
8049 if (! add_to_lit_pool (&instr->reloc.exp, size))
8050 {
8051 record_operand_error (opcode, 1,
8052 AARCH64_OPDE_OTHER_ERROR,
8053 _("literal pool insertion failed"));
8054 return false;
8055 }
8056 }
8057 break;
8058 case log_shift:
8059 case bitfield:
8060 /* UXT[BHW] Wd, Wn
8061 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
8062 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
8063 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
8064 A programmer-friendly assembler should accept a destination Xd in
8065 place of Wd, however that is not the preferred form for disassembly.
8066 */
8067 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
8068 && operands[1].qualifier == AARCH64_OPND_QLF_W
8069 && operands[0].qualifier == AARCH64_OPND_QLF_X)
8070 operands[0].qualifier = AARCH64_OPND_QLF_W;
8071 break;
8072
8073 case addsub_ext:
8074 {
8075 /* In the 64-bit form, the final register operand is written as Wm
8076 for all but the (possibly omitted) UXTX/LSL and SXTX
8077 operators.
8078 As a programmer-friendly assembler, we accept e.g.
8079 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
8080 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
8081 int idx = aarch64_operand_index (opcode->operands,
8082 AARCH64_OPND_Rm_EXT);
8083 gas_assert (idx == 1 || idx == 2);
8084 if (operands[0].qualifier == AARCH64_OPND_QLF_X
8085 && operands[idx].qualifier == AARCH64_OPND_QLF_X
8086 && operands[idx].shifter.kind != AARCH64_MOD_LSL
8087 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
8088 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
8089 operands[idx].qualifier = AARCH64_OPND_QLF_W;
8090 }
8091 break;
8092
8093 default:
8094 break;
8095 }
8096
8097 DEBUG_TRACE ("exit with SUCCESS");
8098 return true;
8099 }
8100
8101 /* Check for loads and stores that will cause unpredictable behavior. */
8102
8103 static void
8104 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
8105 {
8106 aarch64_inst *base = &instr->base;
8107 const aarch64_opcode *opcode = base->opcode;
8108 const aarch64_opnd_info *opnds = base->operands;
8109 switch (opcode->iclass)
8110 {
8111 case ldst_pos:
8112 case ldst_imm9:
8113 case ldst_imm10:
8114 case ldst_unscaled:
8115 case ldst_unpriv:
8116 /* Loading/storing the base register is unpredictable if writeback. */
8117 if ((aarch64_get_operand_class (opnds[0].type)
8118 == AARCH64_OPND_CLASS_INT_REG)
8119 && opnds[0].reg.regno == opnds[1].addr.base_regno
8120 && opnds[1].addr.base_regno != REG_SP
8121 /* Exempt STG/STZG/ST2G/STZ2G. */
8122 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
8123 && opnds[1].addr.writeback)
8124 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8125 break;
8126
8127 case ldstpair_off:
8128 case ldstnapair_offs:
8129 case ldstpair_indexed:
8130 /* Loading/storing the base register is unpredictable if writeback. */
8131 if ((aarch64_get_operand_class (opnds[0].type)
8132 == AARCH64_OPND_CLASS_INT_REG)
8133 && (opnds[0].reg.regno == opnds[2].addr.base_regno
8134 || opnds[1].reg.regno == opnds[2].addr.base_regno)
8135 && opnds[2].addr.base_regno != REG_SP
8136 /* Exempt STGP. */
8137 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
8138 && opnds[2].addr.writeback)
8139 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8140 /* Load operations must load different registers. */
8141 if ((opcode->opcode & (1 << 22))
8142 && opnds[0].reg.regno == opnds[1].reg.regno)
8143 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8144 break;
8145
8146 case ldstexcl:
8147 if ((aarch64_get_operand_class (opnds[0].type)
8148 == AARCH64_OPND_CLASS_INT_REG)
8149 && (aarch64_get_operand_class (opnds[1].type)
8150 == AARCH64_OPND_CLASS_INT_REG))
8151 {
8152 if ((opcode->opcode & (1 << 22)))
8153 {
8154 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
8155 if ((opcode->opcode & (1 << 21))
8156 && opnds[0].reg.regno == opnds[1].reg.regno)
8157 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8158 }
8159 else
8160 {
8161 /* Store-Exclusive is unpredictable if Rt == Rs. */
8162 if (opnds[0].reg.regno == opnds[1].reg.regno)
8163 as_warn
8164 (_("unpredictable: identical transfer and status registers"
8165 " --`%s'"),str);
8166
8167 if (opnds[0].reg.regno == opnds[2].reg.regno)
8168 {
8169 if (!(opcode->opcode & (1 << 21)))
8170 /* Store-Exclusive is unpredictable if Rn == Rs. */
8171 as_warn
8172 (_("unpredictable: identical base and status registers"
8173 " --`%s'"),str);
8174 else
8175 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
8176 as_warn
8177 (_("unpredictable: "
8178 "identical transfer and status registers"
8179 " --`%s'"),str);
8180 }
8181
8182 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
8183 if ((opcode->opcode & (1 << 21))
8184 && opnds[0].reg.regno == opnds[3].reg.regno
8185 && opnds[3].reg.regno != REG_SP)
8186 as_warn (_("unpredictable: identical base and status registers"
8187 " --`%s'"),str);
8188 }
8189 }
8190 break;
8191
8192 default:
8193 break;
8194 }
8195 }
8196
8197 static void
8198 force_automatic_sequence_close (void)
8199 {
8200 struct aarch64_segment_info_type *tc_seg_info;
8201
8202 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8203 if (tc_seg_info->insn_sequence.instr)
8204 {
8205 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
8206 _("previous `%s' sequence has not been closed"),
8207 tc_seg_info->insn_sequence.instr->opcode->name);
8208 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
8209 }
8210 }
8211
8212 /* A wrapper function to interface with libopcodes on encoding and
8213 record the error message if there is any.
8214
8215 Return TRUE on success; otherwise return FALSE. */
8216
8217 static bool
8218 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
8219 aarch64_insn *code)
8220 {
8221 aarch64_operand_error error_info;
8222 memset (&error_info, '\0', sizeof (error_info));
8223 error_info.kind = AARCH64_OPDE_NIL;
8224 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
8225 && !error_info.non_fatal)
8226 return true;
8227
8228 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
8229 record_operand_error_info (opcode, &error_info);
8230 return error_info.non_fatal;
8231 }
8232
8233 #ifdef DEBUG_AARCH64
8234 static inline void
8235 dump_opcode_operands (const aarch64_opcode *opcode)
8236 {
8237 int i = 0;
8238 while (opcode->operands[i] != AARCH64_OPND_NIL)
8239 {
8240 aarch64_verbose ("\t\t opnd%d: %s", i,
8241 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
8242 ? aarch64_get_operand_name (opcode->operands[i])
8243 : aarch64_get_operand_desc (opcode->operands[i]));
8244 ++i;
8245 }
8246 }
8247 #endif /* DEBUG_AARCH64 */
8248
8249 /* This is the guts of the machine-dependent assembler. STR points to a
8250 machine dependent instruction. This function is supposed to emit
8251 the frags/bytes it assembles to. */
8252
8253 void
8254 md_assemble (char *str)
8255 {
8256 templates *template;
8257 const aarch64_opcode *opcode;
8258 struct aarch64_segment_info_type *tc_seg_info;
8259 aarch64_inst *inst_base;
8260 unsigned saved_cond;
8261
8262 /* Align the previous label if needed. */
8263 if (last_label_seen != NULL)
8264 {
8265 symbol_set_frag (last_label_seen, frag_now);
8266 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
8267 S_SET_SEGMENT (last_label_seen, now_seg);
8268 }
8269
8270 /* Update the current insn_sequence from the segment. */
8271 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8272 insn_sequence = &tc_seg_info->insn_sequence;
8273 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
8274
8275 inst.reloc.type = BFD_RELOC_UNUSED;
8276
8277 DEBUG_TRACE ("\n\n");
8278 DEBUG_TRACE ("==============================");
8279 DEBUG_TRACE ("Enter md_assemble with %s", str);
8280
8281 /* Scan up to the end of the mnemonic, which must end in whitespace,
8282 '.', or end of string. */
8283 char *p = str;
8284 char *dot = 0;
8285 for (; is_part_of_name (*p); p++)
8286 if (*p == '.' && !dot)
8287 dot = p;
8288
8289 if (p == str)
8290 {
8291 as_bad (_("unknown mnemonic -- `%s'"), str);
8292 return;
8293 }
8294
8295 if (!dot && create_register_alias (str, p))
8296 return;
8297
8298 template = opcode_lookup (str, dot, p);
8299 if (!template)
8300 {
8301 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8302 str);
8303 return;
8304 }
8305
8306 skip_whitespace (p);
8307 if (*p == ',')
8308 {
8309 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8310 get_mnemonic_name (str), str);
8311 return;
8312 }
8313
8314 init_operand_error_report ();
8315
8316 /* Sections are assumed to start aligned. In executable section, there is no
8317 MAP_DATA symbol pending. So we only align the address during
8318 MAP_DATA --> MAP_INSN transition.
8319 For other sections, this is not guaranteed. */
8320 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8321 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8322 frag_align_code (2, 0);
8323
8324 saved_cond = inst.cond;
8325 reset_aarch64_instruction (&inst);
8326 inst.cond = saved_cond;
8327
8328 /* Iterate through all opcode entries with the same mnemonic name. */
8329 do
8330 {
8331 opcode = template->opcode;
8332
8333 DEBUG_TRACE ("opcode %s found", opcode->name);
8334 #ifdef DEBUG_AARCH64
8335 if (debug_dump)
8336 dump_opcode_operands (opcode);
8337 #endif /* DEBUG_AARCH64 */
8338
8339 mapping_state (MAP_INSN);
8340
8341 inst_base = &inst.base;
8342 inst_base->opcode = opcode;
8343
8344 /* Truly conditionally executed instructions, e.g. b.cond. */
8345 if (opcode->flags & F_COND)
8346 {
8347 gas_assert (inst.cond != COND_ALWAYS);
8348 inst_base->cond = get_cond_from_value (inst.cond);
8349 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8350 }
8351 else if (inst.cond != COND_ALWAYS)
8352 {
8353 /* It shouldn't arrive here, where the assembly looks like a
8354 conditional instruction but the found opcode is unconditional. */
8355 gas_assert (0);
8356 continue;
8357 }
8358
8359 if (parse_operands (p, opcode)
8360 && programmer_friendly_fixup (&inst)
8361 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8362 {
8363 /* Check that this instruction is supported for this CPU. */
8364 if (!aarch64_cpu_supports_inst_p (cpu_variant, inst_base))
8365 {
8366 as_bad (_("selected processor does not support `%s'"), str);
8367 return;
8368 }
8369
8370 warn_unpredictable_ldst (&inst, str);
8371
8372 if (inst.reloc.type == BFD_RELOC_UNUSED
8373 || !inst.reloc.need_libopcodes_p)
8374 output_inst (NULL);
8375 else
8376 {
8377 /* If there is relocation generated for the instruction,
8378 store the instruction information for the future fix-up. */
8379 struct aarch64_inst *copy;
8380 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8381 copy = XNEW (struct aarch64_inst);
8382 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8383 output_inst (copy);
8384 }
8385
8386 /* Issue non-fatal messages if any. */
8387 output_operand_error_report (str, true);
8388 return;
8389 }
8390
8391 template = template->next;
8392 if (template != NULL)
8393 {
8394 reset_aarch64_instruction (&inst);
8395 inst.cond = saved_cond;
8396 }
8397 }
8398 while (template != NULL);
8399
8400 /* Issue the error messages if any. */
8401 output_operand_error_report (str, false);
8402 }
8403
8404 /* Various frobbings of labels and their addresses. */
8405
8406 void
8407 aarch64_start_line_hook (void)
8408 {
8409 last_label_seen = NULL;
8410 }
8411
8412 void
8413 aarch64_frob_label (symbolS * sym)
8414 {
8415 last_label_seen = sym;
8416
8417 dwarf2_emit_label (sym);
8418 }
8419
8420 void
8421 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8422 {
8423 /* Check to see if we have a block to close. */
8424 force_automatic_sequence_close ();
8425 }
8426
8427 int
8428 aarch64_data_in_code (void)
8429 {
8430 if (startswith (input_line_pointer + 1, "data:"))
8431 {
8432 *input_line_pointer = '/';
8433 input_line_pointer += 5;
8434 *input_line_pointer = 0;
8435 return 1;
8436 }
8437
8438 return 0;
8439 }
8440
8441 char *
8442 aarch64_canonicalize_symbol_name (char *name)
8443 {
8444 int len;
8445
8446 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8447 *(name + len - 5) = 0;
8448
8449 return name;
8450 }
8451 \f
8452 /* Table of all register names defined by default. The user can
8453 define additional names with .req. Note that all register names
8454 should appear in both upper and lowercase variants. Some registers
8455 also have mixed-case names. */
8456
8457 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8458 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8459 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8460 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8461 #define REGSET16(p,t) \
8462 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8463 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8464 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8465 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8466 #define REGSET16S(p,s,t) \
8467 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8468 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8469 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8470 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8471 #define REGSET31(p,t) \
8472 REGSET16(p, t), \
8473 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8474 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8475 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8476 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8477 #define REGSET(p,t) \
8478 REGSET31(p,t), REGNUM(p,31,t)
8479
8480 /* These go into aarch64_reg_hsh hash-table. */
8481 static const reg_entry reg_names[] = {
8482 /* Integer registers. */
8483 REGSET31 (x, R_64), REGSET31 (X, R_64),
8484 REGSET31 (w, R_32), REGSET31 (W, R_32),
8485
8486 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8487 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8488 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8489 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8490 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8491 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8492
8493 REGDEF (wzr, 31, ZR_32), REGDEF (WZR, 31, ZR_32),
8494 REGDEF (xzr, 31, ZR_64), REGDEF (XZR, 31, ZR_64),
8495
8496 /* Floating-point single precision registers. */
8497 REGSET (s, FP_S), REGSET (S, FP_S),
8498
8499 /* Floating-point double precision registers. */
8500 REGSET (d, FP_D), REGSET (D, FP_D),
8501
8502 /* Floating-point half precision registers. */
8503 REGSET (h, FP_H), REGSET (H, FP_H),
8504
8505 /* Floating-point byte precision registers. */
8506 REGSET (b, FP_B), REGSET (B, FP_B),
8507
8508 /* Floating-point quad precision registers. */
8509 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8510
8511 /* FP/SIMD registers. */
8512 REGSET (v, V), REGSET (V, V),
8513
8514 /* SVE vector registers. */
8515 REGSET (z, Z), REGSET (Z, Z),
8516
8517 /* SVE predicate(-as-mask) registers. */
8518 REGSET16 (p, P), REGSET16 (P, P),
8519
8520 /* SVE predicate-as-counter registers. */
8521 REGSET16 (pn, PN), REGSET16 (PN, PN),
8522
8523 /* SME ZA. We model this as a register because it acts syntactically
8524 like ZA0H, supporting qualifier suffixes and indexing. */
8525 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8526
8527 /* SME ZA tile registers. */
8528 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8529
8530 /* SME ZA tile registers (horizontal slice). */
8531 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8532
8533 /* SME ZA tile registers (vertical slice). */
8534 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV),
8535
8536 /* SME2 ZT0. */
8537 REGDEF (zt0, 0, ZT0), REGDEF (ZT0, 0, ZT0)
8538 };
8539
8540 #undef REGDEF
8541 #undef REGDEF_ALIAS
8542 #undef REGNUM
8543 #undef REGSET16
8544 #undef REGSET31
8545 #undef REGSET
8546
8547 #define N 1
8548 #define n 0
8549 #define Z 1
8550 #define z 0
8551 #define C 1
8552 #define c 0
8553 #define V 1
8554 #define v 0
8555 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8556 static const asm_nzcv nzcv_names[] = {
8557 {"nzcv", B (n, z, c, v)},
8558 {"nzcV", B (n, z, c, V)},
8559 {"nzCv", B (n, z, C, v)},
8560 {"nzCV", B (n, z, C, V)},
8561 {"nZcv", B (n, Z, c, v)},
8562 {"nZcV", B (n, Z, c, V)},
8563 {"nZCv", B (n, Z, C, v)},
8564 {"nZCV", B (n, Z, C, V)},
8565 {"Nzcv", B (N, z, c, v)},
8566 {"NzcV", B (N, z, c, V)},
8567 {"NzCv", B (N, z, C, v)},
8568 {"NzCV", B (N, z, C, V)},
8569 {"NZcv", B (N, Z, c, v)},
8570 {"NZcV", B (N, Z, c, V)},
8571 {"NZCv", B (N, Z, C, v)},
8572 {"NZCV", B (N, Z, C, V)}
8573 };
8574
8575 #undef N
8576 #undef n
8577 #undef Z
8578 #undef z
8579 #undef C
8580 #undef c
8581 #undef V
8582 #undef v
8583 #undef B
8584 \f
8585 /* MD interface: bits in the object file. */
8586
8587 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8588 for use in the a.out file, and stores them in the array pointed to by buf.
8589 This knows about the endian-ness of the target machine and does
8590 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8591 2 (short) and 4 (long) Floating numbers are put out as a series of
8592 LITTLENUMS (shorts, here at least). */
8593
8594 void
8595 md_number_to_chars (char *buf, valueT val, int n)
8596 {
8597 if (target_big_endian)
8598 number_to_chars_bigendian (buf, val, n);
8599 else
8600 number_to_chars_littleendian (buf, val, n);
8601 }
8602
8603 /* MD interface: Sections. */
8604
8605 /* Estimate the size of a frag before relaxing. Assume everything fits in
8606 4 bytes. */
8607
8608 int
8609 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8610 {
8611 fragp->fr_var = 4;
8612 return 4;
8613 }
8614
8615 /* Round up a section size to the appropriate boundary. */
8616
8617 valueT
8618 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8619 {
8620 return size;
8621 }
8622
8623 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8624 of an rs_align_code fragment.
8625
8626 Here we fill the frag with the appropriate info for padding the
8627 output stream. The resulting frag will consist of a fixed (fr_fix)
8628 and of a repeating (fr_var) part.
8629
8630 The fixed content is always emitted before the repeating content and
8631 these two parts are used as follows in constructing the output:
8632 - the fixed part will be used to align to a valid instruction word
8633 boundary, in case that we start at a misaligned address; as no
8634 executable instruction can live at the misaligned location, we
8635 simply fill with zeros;
8636 - the variable part will be used to cover the remaining padding and
8637 we fill using the AArch64 NOP instruction.
8638
8639 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8640 enough storage space for up to 3 bytes for padding the back to a valid
8641 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8642
8643 void
8644 aarch64_handle_align (fragS * fragP)
8645 {
8646 /* NOP = d503201f */
8647 /* AArch64 instructions are always little-endian. */
8648 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8649
8650 int bytes, fix, noop_size;
8651 char *p;
8652
8653 if (fragP->fr_type != rs_align_code)
8654 return;
8655
8656 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8657 p = fragP->fr_literal + fragP->fr_fix;
8658
8659 #ifdef OBJ_ELF
8660 gas_assert (fragP->tc_frag_data.recorded);
8661 #endif
8662
8663 noop_size = sizeof (aarch64_noop);
8664
8665 fix = bytes & (noop_size - 1);
8666 if (fix)
8667 {
8668 #if defined OBJ_ELF || defined OBJ_COFF
8669 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8670 #endif
8671 memset (p, 0, fix);
8672 p += fix;
8673 fragP->fr_fix += fix;
8674 }
8675
8676 if (noop_size)
8677 memcpy (p, aarch64_noop, noop_size);
8678 fragP->fr_var = noop_size;
8679 }
8680
8681 /* Perform target specific initialisation of a frag.
8682 Note - despite the name this initialisation is not done when the frag
8683 is created, but only when its type is assigned. A frag can be created
8684 and used a long time before its type is set, so beware of assuming that
8685 this initialisation is performed first. */
8686
8687 #ifndef OBJ_ELF
8688 void
8689 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8690 int max_chars ATTRIBUTE_UNUSED)
8691 {
8692 }
8693
8694 #else /* OBJ_ELF is defined. */
8695 void
8696 aarch64_init_frag (fragS * fragP, int max_chars)
8697 {
8698 /* Record a mapping symbol for alignment frags. We will delete this
8699 later if the alignment ends up empty. */
8700 if (!fragP->tc_frag_data.recorded)
8701 fragP->tc_frag_data.recorded = 1;
8702
8703 /* PR 21809: Do not set a mapping state for debug sections
8704 - it just confuses other tools. */
8705 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8706 return;
8707
8708 switch (fragP->fr_type)
8709 {
8710 case rs_align_test:
8711 case rs_fill:
8712 mapping_state_2 (MAP_DATA, max_chars);
8713 break;
8714 case rs_align:
8715 /* PR 20364: We can get alignment frags in code sections,
8716 so do not just assume that we should use the MAP_DATA state. */
8717 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8718 break;
8719 case rs_align_code:
8720 mapping_state_2 (MAP_INSN, max_chars);
8721 break;
8722 default:
8723 break;
8724 }
8725 }
8726
8727 /* Whether SFrame stack trace info is supported. */
8728
8729 bool
8730 aarch64_support_sframe_p (void)
8731 {
8732 /* At this time, SFrame is supported for aarch64 only. */
8733 return (aarch64_abi == AARCH64_ABI_LP64);
8734 }
8735
8736 /* Specify if RA tracking is needed. */
8737
8738 bool
8739 aarch64_sframe_ra_tracking_p (void)
8740 {
8741 return true;
8742 }
8743
8744 /* Specify the fixed offset to recover RA from CFA.
8745 (useful only when RA tracking is not needed). */
8746
8747 offsetT
8748 aarch64_sframe_cfa_ra_offset (void)
8749 {
8750 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8751 }
8752
8753 /* Get the abi/arch indentifier for SFrame. */
8754
8755 unsigned char
8756 aarch64_sframe_get_abi_arch (void)
8757 {
8758 unsigned char sframe_abi_arch = 0;
8759
8760 if (aarch64_support_sframe_p ())
8761 {
8762 sframe_abi_arch = target_big_endian
8763 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8764 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8765 }
8766
8767 return sframe_abi_arch;
8768 }
8769
8770 #endif /* OBJ_ELF */
8771 \f
8772 /* Initialize the DWARF-2 unwind information for this procedure. */
8773
8774 void
8775 tc_aarch64_frame_initial_instructions (void)
8776 {
8777 cfi_add_CFA_def_cfa (REG_SP, 0);
8778 }
8779
8780 /* Convert REGNAME to a DWARF-2 register number. */
8781
8782 int
8783 tc_aarch64_regname_to_dw2regnum (char *regname)
8784 {
8785 const reg_entry *reg = parse_reg (&regname);
8786 if (reg == NULL)
8787 return -1;
8788
8789 switch (reg->type)
8790 {
8791 case REG_TYPE_SP_32:
8792 case REG_TYPE_SP_64:
8793 case REG_TYPE_R_32:
8794 case REG_TYPE_R_64:
8795 return reg->number;
8796
8797 case REG_TYPE_FP_B:
8798 case REG_TYPE_FP_H:
8799 case REG_TYPE_FP_S:
8800 case REG_TYPE_FP_D:
8801 case REG_TYPE_FP_Q:
8802 return reg->number + 64;
8803
8804 default:
8805 break;
8806 }
8807 return -1;
8808 }
8809
8810 /* Implement DWARF2_ADDR_SIZE. */
8811
8812 int
8813 aarch64_dwarf2_addr_size (void)
8814 {
8815 if (ilp32_p)
8816 return 4;
8817 else if (llp64_p)
8818 return 8;
8819 return bfd_arch_bits_per_address (stdoutput) / 8;
8820 }
8821
8822 /* MD interface: Symbol and relocation handling. */
8823
8824 /* Return the address within the segment that a PC-relative fixup is
8825 relative to. For AArch64 PC-relative fixups applied to instructions
8826 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8827
8828 long
8829 md_pcrel_from_section (fixS * fixP, segT seg)
8830 {
8831 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8832
8833 /* If this is pc-relative and we are going to emit a relocation
8834 then we just want to put out any pipeline compensation that the linker
8835 will need. Otherwise we want to use the calculated base. */
8836 if (fixP->fx_pcrel
8837 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8838 || aarch64_force_relocation (fixP)))
8839 base = 0;
8840
8841 /* AArch64 should be consistent for all pc-relative relocations. */
8842 return base + AARCH64_PCREL_OFFSET;
8843 }
8844
8845 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8846 Otherwise we have no need to default values of symbols. */
8847
8848 symbolS *
8849 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8850 {
8851 #ifdef OBJ_ELF
8852 if (name[0] == '_' && name[1] == 'G'
8853 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8854 {
8855 if (!GOT_symbol)
8856 {
8857 if (symbol_find (name))
8858 as_bad (_("GOT already in the symbol table"));
8859
8860 GOT_symbol = symbol_new (name, undefined_section,
8861 &zero_address_frag, 0);
8862 }
8863
8864 return GOT_symbol;
8865 }
8866 #endif
8867
8868 return 0;
8869 }
8870
8871 /* Return non-zero if the indicated VALUE has overflowed the maximum
8872 range expressible by a unsigned number with the indicated number of
8873 BITS. */
8874
8875 static bool
8876 unsigned_overflow (valueT value, unsigned bits)
8877 {
8878 valueT lim;
8879 if (bits >= sizeof (valueT) * 8)
8880 return false;
8881 lim = (valueT) 1 << bits;
8882 return (value >= lim);
8883 }
8884
8885
8886 /* Return non-zero if the indicated VALUE has overflowed the maximum
8887 range expressible by an signed number with the indicated number of
8888 BITS. */
8889
8890 static bool
8891 signed_overflow (offsetT value, unsigned bits)
8892 {
8893 offsetT lim;
8894 if (bits >= sizeof (offsetT) * 8)
8895 return false;
8896 lim = (offsetT) 1 << (bits - 1);
8897 return (value < -lim || value >= lim);
8898 }
8899
8900 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8901 unsigned immediate offset load/store instruction, try to encode it as
8902 an unscaled, 9-bit, signed immediate offset load/store instruction.
8903 Return TRUE if it is successful; otherwise return FALSE.
8904
8905 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8906 in response to the standard LDR/STR mnemonics when the immediate offset is
8907 unambiguous, i.e. when it is negative or unaligned. */
8908
8909 static bool
8910 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8911 {
8912 int idx;
8913 enum aarch64_op new_op;
8914 const aarch64_opcode *new_opcode;
8915
8916 gas_assert (instr->opcode->iclass == ldst_pos);
8917
8918 switch (instr->opcode->op)
8919 {
8920 case OP_LDRB_POS:new_op = OP_LDURB; break;
8921 case OP_STRB_POS: new_op = OP_STURB; break;
8922 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8923 case OP_LDRH_POS: new_op = OP_LDURH; break;
8924 case OP_STRH_POS: new_op = OP_STURH; break;
8925 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8926 case OP_LDR_POS: new_op = OP_LDUR; break;
8927 case OP_STR_POS: new_op = OP_STUR; break;
8928 case OP_LDRF_POS: new_op = OP_LDURV; break;
8929 case OP_STRF_POS: new_op = OP_STURV; break;
8930 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8931 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8932 default: new_op = OP_NIL; break;
8933 }
8934
8935 if (new_op == OP_NIL)
8936 return false;
8937
8938 new_opcode = aarch64_get_opcode (new_op);
8939 gas_assert (new_opcode != NULL);
8940
8941 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8942 instr->opcode->op, new_opcode->op);
8943
8944 aarch64_replace_opcode (instr, new_opcode);
8945
8946 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8947 qualifier matching may fail because the out-of-date qualifier will
8948 prevent the operand being updated with a new and correct qualifier. */
8949 idx = aarch64_operand_index (instr->opcode->operands,
8950 AARCH64_OPND_ADDR_SIMM9);
8951 gas_assert (idx == 1);
8952 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8953
8954 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8955
8956 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8957 insn_sequence))
8958 return false;
8959
8960 return true;
8961 }
8962
8963 /* Called by fix_insn to fix a MOV immediate alias instruction.
8964
8965 Operand for a generic move immediate instruction, which is an alias
8966 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8967 a 32-bit/64-bit immediate value into general register. An assembler error
8968 shall result if the immediate cannot be created by a single one of these
8969 instructions. If there is a choice, then to ensure reversability an
8970 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8971
8972 static void
8973 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8974 {
8975 const aarch64_opcode *opcode;
8976
8977 /* Need to check if the destination is SP/ZR. The check has to be done
8978 before any aarch64_replace_opcode. */
8979 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8980 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8981
8982 instr->operands[1].imm.value = value;
8983 instr->operands[1].skip = 0;
8984
8985 if (try_mov_wide_p)
8986 {
8987 /* Try the MOVZ alias. */
8988 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8989 aarch64_replace_opcode (instr, opcode);
8990 if (aarch64_opcode_encode (instr->opcode, instr,
8991 &instr->value, NULL, NULL, insn_sequence))
8992 {
8993 put_aarch64_insn (buf, instr->value);
8994 return;
8995 }
8996 /* Try the MOVK alias. */
8997 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8998 aarch64_replace_opcode (instr, opcode);
8999 if (aarch64_opcode_encode (instr->opcode, instr,
9000 &instr->value, NULL, NULL, insn_sequence))
9001 {
9002 put_aarch64_insn (buf, instr->value);
9003 return;
9004 }
9005 }
9006
9007 if (try_mov_bitmask_p)
9008 {
9009 /* Try the ORR alias. */
9010 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
9011 aarch64_replace_opcode (instr, opcode);
9012 if (aarch64_opcode_encode (instr->opcode, instr,
9013 &instr->value, NULL, NULL, insn_sequence))
9014 {
9015 put_aarch64_insn (buf, instr->value);
9016 return;
9017 }
9018 }
9019
9020 as_bad_where (fixP->fx_file, fixP->fx_line,
9021 _("immediate cannot be moved by a single instruction"));
9022 }
9023
9024 /* An instruction operand which is immediate related may have symbol used
9025 in the assembly, e.g.
9026
9027 mov w0, u32
9028 .set u32, 0x00ffff00
9029
9030 At the time when the assembly instruction is parsed, a referenced symbol,
9031 like 'u32' in the above example may not have been seen; a fixS is created
9032 in such a case and is handled here after symbols have been resolved.
9033 Instruction is fixed up with VALUE using the information in *FIXP plus
9034 extra information in FLAGS.
9035
9036 This function is called by md_apply_fix to fix up instructions that need
9037 a fix-up described above but does not involve any linker-time relocation. */
9038
9039 static void
9040 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
9041 {
9042 int idx;
9043 uint32_t insn;
9044 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9045 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
9046 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
9047
9048 if (new_inst)
9049 {
9050 /* Now the instruction is about to be fixed-up, so the operand that
9051 was previously marked as 'ignored' needs to be unmarked in order
9052 to get the encoding done properly. */
9053 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9054 new_inst->operands[idx].skip = 0;
9055 }
9056
9057 gas_assert (opnd != AARCH64_OPND_NIL);
9058
9059 switch (opnd)
9060 {
9061 case AARCH64_OPND_EXCEPTION:
9062 case AARCH64_OPND_UNDEFINED:
9063 if (unsigned_overflow (value, 16))
9064 as_bad_where (fixP->fx_file, fixP->fx_line,
9065 _("immediate out of range"));
9066 insn = get_aarch64_insn (buf);
9067 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
9068 put_aarch64_insn (buf, insn);
9069 break;
9070
9071 case AARCH64_OPND_AIMM:
9072 /* ADD or SUB with immediate.
9073 NOTE this assumes we come here with a add/sub shifted reg encoding
9074 3 322|2222|2 2 2 21111 111111
9075 1 098|7654|3 2 1 09876 543210 98765 43210
9076 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
9077 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
9078 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
9079 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
9080 ->
9081 3 322|2222|2 2 221111111111
9082 1 098|7654|3 2 109876543210 98765 43210
9083 11000000 sf 001|0001|shift imm12 Rn Rd ADD
9084 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
9085 51000000 sf 101|0001|shift imm12 Rn Rd SUB
9086 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
9087 Fields sf Rn Rd are already set. */
9088 insn = get_aarch64_insn (buf);
9089 if (value < 0)
9090 {
9091 /* Add <-> sub. */
9092 insn = reencode_addsub_switch_add_sub (insn);
9093 value = -value;
9094 }
9095
9096 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
9097 && unsigned_overflow (value, 12))
9098 {
9099 /* Try to shift the value by 12 to make it fit. */
9100 if (((value >> 12) << 12) == value
9101 && ! unsigned_overflow (value, 12 + 12))
9102 {
9103 value >>= 12;
9104 insn |= encode_addsub_imm_shift_amount (1);
9105 }
9106 }
9107
9108 if (unsigned_overflow (value, 12))
9109 as_bad_where (fixP->fx_file, fixP->fx_line,
9110 _("immediate out of range"));
9111
9112 insn |= encode_addsub_imm (value);
9113
9114 put_aarch64_insn (buf, insn);
9115 break;
9116
9117 case AARCH64_OPND_SIMD_IMM:
9118 case AARCH64_OPND_SIMD_IMM_SFT:
9119 case AARCH64_OPND_LIMM:
9120 /* Bit mask immediate. */
9121 gas_assert (new_inst != NULL);
9122 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9123 new_inst->operands[idx].imm.value = value;
9124 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9125 &new_inst->value, NULL, NULL, insn_sequence))
9126 put_aarch64_insn (buf, new_inst->value);
9127 else
9128 as_bad_where (fixP->fx_file, fixP->fx_line,
9129 _("invalid immediate"));
9130 break;
9131
9132 case AARCH64_OPND_HALF:
9133 /* 16-bit unsigned immediate. */
9134 if (unsigned_overflow (value, 16))
9135 as_bad_where (fixP->fx_file, fixP->fx_line,
9136 _("immediate out of range"));
9137 insn = get_aarch64_insn (buf);
9138 insn |= encode_movw_imm (value & 0xffff);
9139 put_aarch64_insn (buf, insn);
9140 break;
9141
9142 case AARCH64_OPND_IMM_MOV:
9143 /* Operand for a generic move immediate instruction, which is
9144 an alias instruction that generates a single MOVZ, MOVN or ORR
9145 instruction to loads a 32-bit/64-bit immediate value into general
9146 register. An assembler error shall result if the immediate cannot be
9147 created by a single one of these instructions. If there is a choice,
9148 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
9149 and MOVZ or MOVN to ORR. */
9150 gas_assert (new_inst != NULL);
9151 fix_mov_imm_insn (fixP, buf, new_inst, value);
9152 break;
9153
9154 case AARCH64_OPND_ADDR_SIMM7:
9155 case AARCH64_OPND_ADDR_SIMM9:
9156 case AARCH64_OPND_ADDR_SIMM9_2:
9157 case AARCH64_OPND_ADDR_SIMM10:
9158 case AARCH64_OPND_ADDR_UIMM12:
9159 case AARCH64_OPND_ADDR_SIMM11:
9160 case AARCH64_OPND_ADDR_SIMM13:
9161 /* Immediate offset in an address. */
9162 insn = get_aarch64_insn (buf);
9163
9164 gas_assert (new_inst != NULL && new_inst->value == insn);
9165 gas_assert (new_inst->opcode->operands[1] == opnd
9166 || new_inst->opcode->operands[2] == opnd);
9167
9168 /* Get the index of the address operand. */
9169 if (new_inst->opcode->operands[1] == opnd)
9170 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
9171 idx = 1;
9172 else
9173 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
9174 idx = 2;
9175
9176 /* Update the resolved offset value. */
9177 new_inst->operands[idx].addr.offset.imm = value;
9178
9179 /* Encode/fix-up. */
9180 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9181 &new_inst->value, NULL, NULL, insn_sequence))
9182 {
9183 put_aarch64_insn (buf, new_inst->value);
9184 break;
9185 }
9186 else if (new_inst->opcode->iclass == ldst_pos
9187 && try_to_encode_as_unscaled_ldst (new_inst))
9188 {
9189 put_aarch64_insn (buf, new_inst->value);
9190 break;
9191 }
9192
9193 as_bad_where (fixP->fx_file, fixP->fx_line,
9194 _("immediate offset out of range"));
9195 break;
9196
9197 default:
9198 gas_assert (0);
9199 as_fatal (_("unhandled operand code %d"), opnd);
9200 }
9201 }
9202
9203 /* Apply a fixup (fixP) to segment data, once it has been determined
9204 by our caller that we have all the info we need to fix it up.
9205
9206 Parameter valP is the pointer to the value of the bits. */
9207
9208 void
9209 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
9210 {
9211 offsetT value = *valP;
9212 uint32_t insn;
9213 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9214 int scale;
9215 unsigned flags = fixP->fx_addnumber;
9216
9217 DEBUG_TRACE ("\n\n");
9218 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
9219 DEBUG_TRACE ("Enter md_apply_fix");
9220
9221 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
9222
9223 /* Note whether this will delete the relocation. */
9224
9225 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
9226 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
9227 fixP->fx_done = 1;
9228
9229 /* Process the relocations. */
9230 switch (fixP->fx_r_type)
9231 {
9232 case BFD_RELOC_NONE:
9233 /* This will need to go in the object file. */
9234 fixP->fx_done = 0;
9235 break;
9236
9237 case BFD_RELOC_8:
9238 case BFD_RELOC_8_PCREL:
9239 if (fixP->fx_done || !seg->use_rela_p)
9240 md_number_to_chars (buf, value, 1);
9241 break;
9242
9243 case BFD_RELOC_16:
9244 case BFD_RELOC_16_PCREL:
9245 if (fixP->fx_done || !seg->use_rela_p)
9246 md_number_to_chars (buf, value, 2);
9247 break;
9248
9249 case BFD_RELOC_32:
9250 case BFD_RELOC_32_PCREL:
9251 if (fixP->fx_done || !seg->use_rela_p)
9252 md_number_to_chars (buf, value, 4);
9253 break;
9254
9255 case BFD_RELOC_64:
9256 case BFD_RELOC_64_PCREL:
9257 if (fixP->fx_done || !seg->use_rela_p)
9258 md_number_to_chars (buf, value, 8);
9259 break;
9260
9261 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
9262 /* We claim that these fixups have been processed here, even if
9263 in fact we generate an error because we do not have a reloc
9264 for them, so tc_gen_reloc() will reject them. */
9265 fixP->fx_done = 1;
9266 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
9267 {
9268 as_bad_where (fixP->fx_file, fixP->fx_line,
9269 _("undefined symbol %s used as an immediate value"),
9270 S_GET_NAME (fixP->fx_addsy));
9271 goto apply_fix_return;
9272 }
9273 fix_insn (fixP, flags, value);
9274 break;
9275
9276 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
9277 if (fixP->fx_done || !seg->use_rela_p)
9278 {
9279 if (value & 3)
9280 as_bad_where (fixP->fx_file, fixP->fx_line,
9281 _("pc-relative load offset not word aligned"));
9282 if (signed_overflow (value, 21))
9283 as_bad_where (fixP->fx_file, fixP->fx_line,
9284 _("pc-relative load offset out of range"));
9285 insn = get_aarch64_insn (buf);
9286 insn |= encode_ld_lit_ofs_19 (value >> 2);
9287 put_aarch64_insn (buf, insn);
9288 }
9289 break;
9290
9291 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9292 if (fixP->fx_done || !seg->use_rela_p)
9293 {
9294 if (signed_overflow (value, 21))
9295 as_bad_where (fixP->fx_file, fixP->fx_line,
9296 _("pc-relative address offset out of range"));
9297 insn = get_aarch64_insn (buf);
9298 insn |= encode_adr_imm (value);
9299 put_aarch64_insn (buf, insn);
9300 }
9301 break;
9302
9303 case BFD_RELOC_AARCH64_BRANCH19:
9304 if (fixP->fx_done || !seg->use_rela_p)
9305 {
9306 if (value & 3)
9307 as_bad_where (fixP->fx_file, fixP->fx_line,
9308 _("conditional branch target not word aligned"));
9309 if (signed_overflow (value, 21))
9310 as_bad_where (fixP->fx_file, fixP->fx_line,
9311 _("conditional branch out of range"));
9312 insn = get_aarch64_insn (buf);
9313 insn |= encode_cond_branch_ofs_19 (value >> 2);
9314 put_aarch64_insn (buf, insn);
9315 }
9316 break;
9317
9318 case BFD_RELOC_AARCH64_TSTBR14:
9319 if (fixP->fx_done || !seg->use_rela_p)
9320 {
9321 if (value & 3)
9322 as_bad_where (fixP->fx_file, fixP->fx_line,
9323 _("conditional branch target not word aligned"));
9324 if (signed_overflow (value, 16))
9325 as_bad_where (fixP->fx_file, fixP->fx_line,
9326 _("conditional branch out of range"));
9327 insn = get_aarch64_insn (buf);
9328 insn |= encode_tst_branch_ofs_14 (value >> 2);
9329 put_aarch64_insn (buf, insn);
9330 }
9331 break;
9332
9333 case BFD_RELOC_AARCH64_CALL26:
9334 case BFD_RELOC_AARCH64_JUMP26:
9335 if (fixP->fx_done || !seg->use_rela_p)
9336 {
9337 if (value & 3)
9338 as_bad_where (fixP->fx_file, fixP->fx_line,
9339 _("branch target not word aligned"));
9340 if (signed_overflow (value, 28))
9341 as_bad_where (fixP->fx_file, fixP->fx_line,
9342 _("branch out of range"));
9343 insn = get_aarch64_insn (buf);
9344 insn |= encode_branch_ofs_26 (value >> 2);
9345 put_aarch64_insn (buf, insn);
9346 }
9347 break;
9348
9349 case BFD_RELOC_AARCH64_MOVW_G0:
9350 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9351 case BFD_RELOC_AARCH64_MOVW_G0_S:
9352 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9353 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9354 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9355 scale = 0;
9356 goto movw_common;
9357 case BFD_RELOC_AARCH64_MOVW_G1:
9358 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9359 case BFD_RELOC_AARCH64_MOVW_G1_S:
9360 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9361 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9362 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9363 scale = 16;
9364 goto movw_common;
9365 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9366 scale = 0;
9367 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9368 /* Should always be exported to object file, see
9369 aarch64_force_relocation(). */
9370 gas_assert (!fixP->fx_done);
9371 gas_assert (seg->use_rela_p);
9372 goto movw_common;
9373 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9374 scale = 16;
9375 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9376 /* Should always be exported to object file, see
9377 aarch64_force_relocation(). */
9378 gas_assert (!fixP->fx_done);
9379 gas_assert (seg->use_rela_p);
9380 goto movw_common;
9381 case BFD_RELOC_AARCH64_MOVW_G2:
9382 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9383 case BFD_RELOC_AARCH64_MOVW_G2_S:
9384 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9385 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9386 scale = 32;
9387 goto movw_common;
9388 case BFD_RELOC_AARCH64_MOVW_G3:
9389 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9390 scale = 48;
9391 movw_common:
9392 if (fixP->fx_done || !seg->use_rela_p)
9393 {
9394 insn = get_aarch64_insn (buf);
9395
9396 if (!fixP->fx_done)
9397 {
9398 /* REL signed addend must fit in 16 bits */
9399 if (signed_overflow (value, 16))
9400 as_bad_where (fixP->fx_file, fixP->fx_line,
9401 _("offset out of range"));
9402 }
9403 else
9404 {
9405 /* Check for overflow and scale. */
9406 switch (fixP->fx_r_type)
9407 {
9408 case BFD_RELOC_AARCH64_MOVW_G0:
9409 case BFD_RELOC_AARCH64_MOVW_G1:
9410 case BFD_RELOC_AARCH64_MOVW_G2:
9411 case BFD_RELOC_AARCH64_MOVW_G3:
9412 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9413 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9414 if (unsigned_overflow (value, scale + 16))
9415 as_bad_where (fixP->fx_file, fixP->fx_line,
9416 _("unsigned value out of range"));
9417 break;
9418 case BFD_RELOC_AARCH64_MOVW_G0_S:
9419 case BFD_RELOC_AARCH64_MOVW_G1_S:
9420 case BFD_RELOC_AARCH64_MOVW_G2_S:
9421 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9422 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9423 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9424 /* NOTE: We can only come here with movz or movn. */
9425 if (signed_overflow (value, scale + 16))
9426 as_bad_where (fixP->fx_file, fixP->fx_line,
9427 _("signed value out of range"));
9428 if (value < 0)
9429 {
9430 /* Force use of MOVN. */
9431 value = ~value;
9432 insn = reencode_movzn_to_movn (insn);
9433 }
9434 else
9435 {
9436 /* Force use of MOVZ. */
9437 insn = reencode_movzn_to_movz (insn);
9438 }
9439 break;
9440 default:
9441 /* Unchecked relocations. */
9442 break;
9443 }
9444 value >>= scale;
9445 }
9446
9447 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9448 insn |= encode_movw_imm (value & 0xffff);
9449
9450 put_aarch64_insn (buf, insn);
9451 }
9452 break;
9453
9454 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9455 fixP->fx_r_type = (ilp32_p
9456 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9457 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9458 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9459 /* Should always be exported to object file, see
9460 aarch64_force_relocation(). */
9461 gas_assert (!fixP->fx_done);
9462 gas_assert (seg->use_rela_p);
9463 break;
9464
9465 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9466 fixP->fx_r_type = (ilp32_p
9467 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9468 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9469 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9470 /* Should always be exported to object file, see
9471 aarch64_force_relocation(). */
9472 gas_assert (!fixP->fx_done);
9473 gas_assert (seg->use_rela_p);
9474 break;
9475
9476 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9477 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9478 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9479 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9480 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9481 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9482 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9483 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9484 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9485 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9486 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9487 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9488 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9489 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9490 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9491 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9492 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9493 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9494 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9495 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9496 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9497 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9498 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9499 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9500 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9501 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9502 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9503 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9504 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9505 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9506 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9507 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9508 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9509 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9510 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9511 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9512 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9513 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9514 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9515 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9516 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9517 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9518 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9519 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9520 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9521 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9522 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9523 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9524 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9525 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9526 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9527 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9528 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9529 /* Should always be exported to object file, see
9530 aarch64_force_relocation(). */
9531 gas_assert (!fixP->fx_done);
9532 gas_assert (seg->use_rela_p);
9533 break;
9534
9535 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9536 /* Should always be exported to object file, see
9537 aarch64_force_relocation(). */
9538 fixP->fx_r_type = (ilp32_p
9539 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9540 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9541 gas_assert (!fixP->fx_done);
9542 gas_assert (seg->use_rela_p);
9543 break;
9544
9545 case BFD_RELOC_AARCH64_ADD_LO12:
9546 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9547 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9548 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9549 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9550 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9551 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9552 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9553 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9554 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9555 case BFD_RELOC_AARCH64_LDST128_LO12:
9556 case BFD_RELOC_AARCH64_LDST16_LO12:
9557 case BFD_RELOC_AARCH64_LDST32_LO12:
9558 case BFD_RELOC_AARCH64_LDST64_LO12:
9559 case BFD_RELOC_AARCH64_LDST8_LO12:
9560 /* Should always be exported to object file, see
9561 aarch64_force_relocation(). */
9562 gas_assert (!fixP->fx_done);
9563 gas_assert (seg->use_rela_p);
9564 break;
9565
9566 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9567 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9568 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9569 break;
9570
9571 case BFD_RELOC_UNUSED:
9572 /* An error will already have been reported. */
9573 break;
9574
9575 case BFD_RELOC_RVA:
9576 case BFD_RELOC_32_SECREL:
9577 case BFD_RELOC_16_SECIDX:
9578 break;
9579
9580 default:
9581 as_bad_where (fixP->fx_file, fixP->fx_line,
9582 _("unexpected %s fixup"),
9583 bfd_get_reloc_code_name (fixP->fx_r_type));
9584 break;
9585 }
9586
9587 apply_fix_return:
9588 /* Free the allocated the struct aarch64_inst.
9589 N.B. currently there are very limited number of fix-up types actually use
9590 this field, so the impact on the performance should be minimal . */
9591 free (fixP->tc_fix_data.inst);
9592
9593 return;
9594 }
9595
9596 /* Translate internal representation of relocation info to BFD target
9597 format. */
9598
9599 arelent *
9600 tc_gen_reloc (asection * section, fixS * fixp)
9601 {
9602 arelent *reloc;
9603 bfd_reloc_code_real_type code;
9604
9605 reloc = XNEW (arelent);
9606
9607 reloc->sym_ptr_ptr = XNEW (asymbol *);
9608 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9609 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9610
9611 if (fixp->fx_pcrel)
9612 {
9613 if (section->use_rela_p)
9614 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9615 else
9616 fixp->fx_offset = reloc->address;
9617 }
9618 reloc->addend = fixp->fx_offset;
9619
9620 code = fixp->fx_r_type;
9621 switch (code)
9622 {
9623 case BFD_RELOC_16:
9624 if (fixp->fx_pcrel)
9625 code = BFD_RELOC_16_PCREL;
9626 break;
9627
9628 case BFD_RELOC_32:
9629 if (fixp->fx_pcrel)
9630 code = BFD_RELOC_32_PCREL;
9631 break;
9632
9633 case BFD_RELOC_64:
9634 if (fixp->fx_pcrel)
9635 code = BFD_RELOC_64_PCREL;
9636 break;
9637
9638 default:
9639 break;
9640 }
9641
9642 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9643 if (reloc->howto == NULL)
9644 {
9645 as_bad_where (fixp->fx_file, fixp->fx_line,
9646 _
9647 ("cannot represent %s relocation in this object file format"),
9648 bfd_get_reloc_code_name (code));
9649 return NULL;
9650 }
9651
9652 return reloc;
9653 }
9654
9655 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9656
9657 void
9658 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9659 {
9660 bfd_reloc_code_real_type type;
9661 int pcrel = 0;
9662
9663 #ifdef TE_PE
9664 if (exp->X_op == O_secrel)
9665 {
9666 exp->X_op = O_symbol;
9667 type = BFD_RELOC_32_SECREL;
9668 }
9669 else if (exp->X_op == O_secidx)
9670 {
9671 exp->X_op = O_symbol;
9672 type = BFD_RELOC_16_SECIDX;
9673 }
9674 else
9675 {
9676 #endif
9677 /* Pick a reloc.
9678 FIXME: @@ Should look at CPU word size. */
9679 switch (size)
9680 {
9681 case 1:
9682 type = BFD_RELOC_8;
9683 break;
9684 case 2:
9685 type = BFD_RELOC_16;
9686 break;
9687 case 4:
9688 type = BFD_RELOC_32;
9689 break;
9690 case 8:
9691 type = BFD_RELOC_64;
9692 break;
9693 default:
9694 as_bad (_("cannot do %u-byte relocation"), size);
9695 type = BFD_RELOC_UNUSED;
9696 break;
9697 }
9698 #ifdef TE_PE
9699 }
9700 #endif
9701
9702 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9703 }
9704
9705 /* Implement md_after_parse_args. This is the earliest time we need to decide
9706 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9707
9708 void
9709 aarch64_after_parse_args (void)
9710 {
9711 if (aarch64_abi != AARCH64_ABI_NONE)
9712 return;
9713
9714 #ifdef OBJ_ELF
9715 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9716 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9717 aarch64_abi = AARCH64_ABI_ILP32;
9718 else
9719 aarch64_abi = AARCH64_ABI_LP64;
9720 #else
9721 aarch64_abi = AARCH64_ABI_LLP64;
9722 #endif
9723 }
9724
9725 #ifdef OBJ_ELF
9726 const char *
9727 elf64_aarch64_target_format (void)
9728 {
9729 #ifdef TE_CLOUDABI
9730 /* FIXME: What to do for ilp32_p ? */
9731 if (target_big_endian)
9732 return "elf64-bigaarch64-cloudabi";
9733 else
9734 return "elf64-littleaarch64-cloudabi";
9735 #else
9736 if (target_big_endian)
9737 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9738 else
9739 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9740 #endif
9741 }
9742
9743 void
9744 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9745 {
9746 elf_frob_symbol (symp, puntp);
9747 }
9748 #elif defined OBJ_COFF
9749 const char *
9750 coff_aarch64_target_format (void)
9751 {
9752 return "pe-aarch64-little";
9753 }
9754 #endif
9755
9756 /* MD interface: Finalization. */
9757
9758 /* A good place to do this, although this was probably not intended
9759 for this kind of use. We need to dump the literal pool before
9760 references are made to a null symbol pointer. */
9761
9762 void
9763 aarch64_cleanup (void)
9764 {
9765 literal_pool *pool;
9766
9767 for (pool = list_of_pools; pool; pool = pool->next)
9768 {
9769 /* Put it at the end of the relevant section. */
9770 subseg_set (pool->section, pool->sub_section);
9771 s_ltorg (0);
9772 }
9773 }
9774
9775 #ifdef OBJ_ELF
9776 /* Remove any excess mapping symbols generated for alignment frags in
9777 SEC. We may have created a mapping symbol before a zero byte
9778 alignment; remove it if there's a mapping symbol after the
9779 alignment. */
9780 static void
9781 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9782 void *dummy ATTRIBUTE_UNUSED)
9783 {
9784 segment_info_type *seginfo = seg_info (sec);
9785 fragS *fragp;
9786
9787 if (seginfo == NULL || seginfo->frchainP == NULL)
9788 return;
9789
9790 for (fragp = seginfo->frchainP->frch_root;
9791 fragp != NULL; fragp = fragp->fr_next)
9792 {
9793 symbolS *sym = fragp->tc_frag_data.last_map;
9794 fragS *next = fragp->fr_next;
9795
9796 /* Variable-sized frags have been converted to fixed size by
9797 this point. But if this was variable-sized to start with,
9798 there will be a fixed-size frag after it. So don't handle
9799 next == NULL. */
9800 if (sym == NULL || next == NULL)
9801 continue;
9802
9803 if (S_GET_VALUE (sym) < next->fr_address)
9804 /* Not at the end of this frag. */
9805 continue;
9806 know (S_GET_VALUE (sym) == next->fr_address);
9807
9808 do
9809 {
9810 if (next->tc_frag_data.first_map != NULL)
9811 {
9812 /* Next frag starts with a mapping symbol. Discard this
9813 one. */
9814 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9815 break;
9816 }
9817
9818 if (next->fr_next == NULL)
9819 {
9820 /* This mapping symbol is at the end of the section. Discard
9821 it. */
9822 know (next->fr_fix == 0 && next->fr_var == 0);
9823 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9824 break;
9825 }
9826
9827 /* As long as we have empty frags without any mapping symbols,
9828 keep looking. */
9829 /* If the next frag is non-empty and does not start with a
9830 mapping symbol, then this mapping symbol is required. */
9831 if (next->fr_address != next->fr_next->fr_address)
9832 break;
9833
9834 next = next->fr_next;
9835 }
9836 while (next != NULL);
9837 }
9838 }
9839 #endif
9840
9841 /* Adjust the symbol table. */
9842
9843 void
9844 aarch64_adjust_symtab (void)
9845 {
9846 #ifdef OBJ_ELF
9847 /* Remove any overlapping mapping symbols generated by alignment frags. */
9848 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9849 /* Now do generic ELF adjustments. */
9850 elf_adjust_symtab ();
9851 #endif
9852 }
9853
9854 static void
9855 checked_hash_insert (htab_t table, const char *key, void *value)
9856 {
9857 str_hash_insert (table, key, value, 0);
9858 }
9859
9860 static void
9861 sysreg_hash_insert (htab_t table, const char *key, void *value)
9862 {
9863 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9864 checked_hash_insert (table, key, value);
9865 }
9866
9867 static void
9868 fill_instruction_hash_table (void)
9869 {
9870 const aarch64_opcode *opcode = aarch64_opcode_table;
9871
9872 while (opcode->name != NULL)
9873 {
9874 templates *templ, *new_templ;
9875 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9876
9877 new_templ = XNEW (templates);
9878 new_templ->opcode = opcode;
9879 new_templ->next = NULL;
9880
9881 if (!templ)
9882 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9883 else
9884 {
9885 new_templ->next = templ->next;
9886 templ->next = new_templ;
9887 }
9888 ++opcode;
9889 }
9890 }
9891
9892 static inline void
9893 convert_to_upper (char *dst, const char *src, size_t num)
9894 {
9895 unsigned int i;
9896 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9897 *dst = TOUPPER (*src);
9898 *dst = '\0';
9899 }
9900
9901 /* Assume STR point to a lower-case string, allocate, convert and return
9902 the corresponding upper-case string. */
9903 static inline const char*
9904 get_upper_str (const char *str)
9905 {
9906 char *ret;
9907 size_t len = strlen (str);
9908 ret = XNEWVEC (char, len + 1);
9909 convert_to_upper (ret, str, len);
9910 return ret;
9911 }
9912
9913 /* MD interface: Initialization. */
9914
9915 void
9916 md_begin (void)
9917 {
9918 unsigned mach;
9919 unsigned int i;
9920
9921 aarch64_ops_hsh = str_htab_create ();
9922 aarch64_cond_hsh = str_htab_create ();
9923 aarch64_shift_hsh = str_htab_create ();
9924 aarch64_sys_regs_hsh = str_htab_create ();
9925 aarch64_pstatefield_hsh = str_htab_create ();
9926 aarch64_sys_regs_ic_hsh = str_htab_create ();
9927 aarch64_sys_regs_dc_hsh = str_htab_create ();
9928 aarch64_sys_regs_at_hsh = str_htab_create ();
9929 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9930 aarch64_sys_regs_sr_hsh = str_htab_create ();
9931 aarch64_reg_hsh = str_htab_create ();
9932 aarch64_barrier_opt_hsh = str_htab_create ();
9933 aarch64_nzcv_hsh = str_htab_create ();
9934 aarch64_pldop_hsh = str_htab_create ();
9935 aarch64_hint_opt_hsh = str_htab_create ();
9936
9937 fill_instruction_hash_table ();
9938
9939 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9940 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9941 (void *) (aarch64_sys_regs + i));
9942
9943 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9944 sysreg_hash_insert (aarch64_pstatefield_hsh,
9945 aarch64_pstatefields[i].name,
9946 (void *) (aarch64_pstatefields + i));
9947
9948 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9949 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9950 aarch64_sys_regs_ic[i].name,
9951 (void *) (aarch64_sys_regs_ic + i));
9952
9953 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9954 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9955 aarch64_sys_regs_dc[i].name,
9956 (void *) (aarch64_sys_regs_dc + i));
9957
9958 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9959 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9960 aarch64_sys_regs_at[i].name,
9961 (void *) (aarch64_sys_regs_at + i));
9962
9963 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9964 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9965 aarch64_sys_regs_tlbi[i].name,
9966 (void *) (aarch64_sys_regs_tlbi + i));
9967
9968 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9969 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9970 aarch64_sys_regs_sr[i].name,
9971 (void *) (aarch64_sys_regs_sr + i));
9972
9973 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9974 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9975 (void *) (reg_names + i));
9976
9977 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9978 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9979 (void *) (nzcv_names + i));
9980
9981 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9982 {
9983 const char *name = aarch64_operand_modifiers[i].name;
9984 checked_hash_insert (aarch64_shift_hsh, name,
9985 (void *) (aarch64_operand_modifiers + i));
9986 /* Also hash the name in the upper case. */
9987 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9988 (void *) (aarch64_operand_modifiers + i));
9989 }
9990
9991 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9992 {
9993 unsigned int j;
9994 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9995 the same condition code. */
9996 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9997 {
9998 const char *name = aarch64_conds[i].names[j];
9999 if (name == NULL)
10000 break;
10001 checked_hash_insert (aarch64_cond_hsh, name,
10002 (void *) (aarch64_conds + i));
10003 /* Also hash the name in the upper case. */
10004 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
10005 (void *) (aarch64_conds + i));
10006 }
10007 }
10008
10009 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
10010 {
10011 const char *name = aarch64_barrier_options[i].name;
10012 /* Skip xx00 - the unallocated values of option. */
10013 if ((i & 0x3) == 0)
10014 continue;
10015 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10016 (void *) (aarch64_barrier_options + i));
10017 /* Also hash the name in the upper case. */
10018 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10019 (void *) (aarch64_barrier_options + i));
10020 }
10021
10022 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
10023 {
10024 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
10025 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10026 (void *) (aarch64_barrier_dsb_nxs_options + i));
10027 /* Also hash the name in the upper case. */
10028 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10029 (void *) (aarch64_barrier_dsb_nxs_options + i));
10030 }
10031
10032 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
10033 {
10034 const char* name = aarch64_prfops[i].name;
10035 /* Skip the unallocated hint encodings. */
10036 if (name == NULL)
10037 continue;
10038 checked_hash_insert (aarch64_pldop_hsh, name,
10039 (void *) (aarch64_prfops + i));
10040 /* Also hash the name in the upper case. */
10041 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
10042 (void *) (aarch64_prfops + i));
10043 }
10044
10045 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
10046 {
10047 const char* name = aarch64_hint_options[i].name;
10048 const char* upper_name = get_upper_str(name);
10049
10050 checked_hash_insert (aarch64_hint_opt_hsh, name,
10051 (void *) (aarch64_hint_options + i));
10052
10053 /* Also hash the name in the upper case if not the same. */
10054 if (strcmp (name, upper_name) != 0)
10055 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
10056 (void *) (aarch64_hint_options + i));
10057 }
10058
10059 /* Set the cpu variant based on the command-line options. */
10060 if (!mcpu_cpu_opt)
10061 mcpu_cpu_opt = march_cpu_opt;
10062
10063 if (!mcpu_cpu_opt)
10064 mcpu_cpu_opt = &cpu_default;
10065
10066 cpu_variant = *mcpu_cpu_opt;
10067
10068 /* Record the CPU type. */
10069 if(ilp32_p)
10070 mach = bfd_mach_aarch64_ilp32;
10071 else if (llp64_p)
10072 mach = bfd_mach_aarch64_llp64;
10073 else
10074 mach = bfd_mach_aarch64;
10075
10076 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
10077 #ifdef OBJ_ELF
10078 /* FIXME - is there a better way to do it ? */
10079 aarch64_sframe_cfa_sp_reg = 31;
10080 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
10081 aarch64_sframe_cfa_ra_reg = 30;
10082 #endif
10083 }
10084
10085 /* Command line processing. */
10086
10087 const char *md_shortopts = "m:";
10088
10089 #ifdef AARCH64_BI_ENDIAN
10090 #define OPTION_EB (OPTION_MD_BASE + 0)
10091 #define OPTION_EL (OPTION_MD_BASE + 1)
10092 #else
10093 #if TARGET_BYTES_BIG_ENDIAN
10094 #define OPTION_EB (OPTION_MD_BASE + 0)
10095 #else
10096 #define OPTION_EL (OPTION_MD_BASE + 1)
10097 #endif
10098 #endif
10099
10100 struct option md_longopts[] = {
10101 #ifdef OPTION_EB
10102 {"EB", no_argument, NULL, OPTION_EB},
10103 #endif
10104 #ifdef OPTION_EL
10105 {"EL", no_argument, NULL, OPTION_EL},
10106 #endif
10107 {NULL, no_argument, NULL, 0}
10108 };
10109
10110 size_t md_longopts_size = sizeof (md_longopts);
10111
10112 struct aarch64_option_table
10113 {
10114 const char *option; /* Option name to match. */
10115 const char *help; /* Help information. */
10116 int *var; /* Variable to change. */
10117 int value; /* What to change it to. */
10118 char *deprecated; /* If non-null, print this message. */
10119 };
10120
10121 static struct aarch64_option_table aarch64_opts[] = {
10122 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
10123 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
10124 NULL},
10125 #ifdef DEBUG_AARCH64
10126 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
10127 #endif /* DEBUG_AARCH64 */
10128 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
10129 NULL},
10130 {"mno-verbose-error", N_("do not output verbose error messages"),
10131 &verbose_error_p, 0, NULL},
10132 {NULL, NULL, NULL, 0, NULL}
10133 };
10134
10135 struct aarch64_cpu_option_table
10136 {
10137 const char *name;
10138 const aarch64_feature_set value;
10139 /* The canonical name of the CPU, or NULL to use NAME converted to upper
10140 case. */
10141 const char *canonical_name;
10142 };
10143
10144 /* This list should, at a minimum, contain all the cpu names
10145 recognized by GCC. */
10146 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
10147 {"all", AARCH64_ALL_FEATURES, NULL},
10148 {"cortex-a34", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A34"},
10149 {"cortex-a35", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A35"},
10150 {"cortex-a53", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A53"},
10151 {"cortex-a57", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A57"},
10152 {"cortex-a72", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A72"},
10153 {"cortex-a73", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A73"},
10154 {"cortex-a55", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10155 "Cortex-A55"},
10156 {"cortex-a75", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10157 "Cortex-A75"},
10158 {"cortex-a76", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10159 "Cortex-A76"},
10160 {"cortex-a76ae", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10161 SSBS), "Cortex-A76AE"},
10162 {"cortex-a77", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10163 SSBS), "Cortex-A77"},
10164 {"cortex-a65", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10165 SSBS), "Cortex-A65"},
10166 {"cortex-a65ae", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10167 SSBS), "Cortex-A65AE"},
10168 {"cortex-a78", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10169 SSBS, PROFILE), "Cortex-A78"},
10170 {"cortex-a78ae", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10171 SSBS, PROFILE), "Cortex-A78AE"},
10172 {"cortex-a78c", AARCH64_CPU_FEATURES (V8_2A, 7, DOTPROD, F16, FLAGM,
10173 PAC, PROFILE, RCPC, SSBS),
10174 "Cortex-A78C"},
10175 {"cortex-a510", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10176 SVE2_BITPERM), "Cortex-A510"},
10177 {"cortex-a520", AARCH64_CPU_FEATURES (V9_2A, 2, MEMTAG, SVE2_BITPERM),
10178 "Cortex-A520"},
10179 {"cortex-a710", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10180 SVE2_BITPERM), "Cortex-A710"},
10181 {"cortex-a720", AARCH64_CPU_FEATURES (V9_2A, 3, MEMTAG, PROFILE,
10182 SVE2_BITPERM), "Cortex-A720"},
10183 {"ares", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10184 PROFILE), "Ares"},
10185 {"exynos-m1", AARCH64_CPU_FEATURES (V8A, 3, CRC, SHA2, AES),
10186 "Samsung Exynos M1"},
10187 {"falkor", AARCH64_CPU_FEATURES (V8A, 4, CRC, SHA2, AES, RDMA),
10188 "Qualcomm Falkor"},
10189 {"neoverse-e1", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10190 SSBS), "Neoverse E1"},
10191 {"neoverse-n1", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10192 PROFILE), "Neoverse N1"},
10193 {"neoverse-n2", AARCH64_CPU_FEATURES (V8_5A, 8, BFLOAT16, I8MM, F16,
10194 SVE, SVE2, SVE2_BITPERM, MEMTAG,
10195 RNG), "Neoverse N2"},
10196 {"neoverse-v1", AARCH64_CPU_FEATURES (V8_4A, 8, PROFILE, CVADP, SVE,
10197 SSBS, RNG, F16, BFLOAT16, I8MM),
10198 "Neoverse V1"},
10199 {"qdf24xx", AARCH64_CPU_FEATURES (V8A, 4, CRC, SHA2, AES, RDMA),
10200 "Qualcomm QDF24XX"},
10201 {"saphira", AARCH64_CPU_FEATURES (V8_4A, 3, SHA2, AES, PROFILE),
10202 "Qualcomm Saphira"},
10203 {"thunderx", AARCH64_CPU_FEATURES (V8A, 3, CRC, SHA2, AES),
10204 "Cavium ThunderX"},
10205 {"vulcan", AARCH64_CPU_FEATURES (V8_1A, 2, SHA2, AES),
10206 "Broadcom Vulcan"},
10207 /* The 'xgene-1' name is an older name for 'xgene1', which was used
10208 in earlier releases and is superseded by 'xgene1' in all
10209 tools. */
10210 {"xgene-1", AARCH64_ARCH_FEATURES (V8A), "APM X-Gene 1"},
10211 {"xgene1", AARCH64_ARCH_FEATURES (V8A), "APM X-Gene 1"},
10212 {"xgene2", AARCH64_CPU_FEATURES (V8A, 1, CRC), "APM X-Gene 2"},
10213 {"cortex-r82", AARCH64_ARCH_FEATURES (V8R), "Cortex-R82"},
10214 {"cortex-x1", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10215 SSBS, PROFILE), "Cortex-X1"},
10216 {"cortex-x2", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10217 SVE2_BITPERM), "Cortex-X2"},
10218 {"generic", AARCH64_ARCH_FEATURES (V8A), NULL},
10219
10220 {NULL, AARCH64_NO_FEATURES, NULL}
10221 };
10222
10223 struct aarch64_arch_option_table
10224 {
10225 const char *name;
10226 const aarch64_feature_set value;
10227 };
10228
10229 /* This list should, at a minimum, contain all the architecture names
10230 recognized by GCC. */
10231 static const struct aarch64_arch_option_table aarch64_archs[] = {
10232 {"all", AARCH64_ALL_FEATURES},
10233 {"armv8-a", AARCH64_ARCH_FEATURES (V8A)},
10234 {"armv8.1-a", AARCH64_ARCH_FEATURES (V8_1A)},
10235 {"armv8.2-a", AARCH64_ARCH_FEATURES (V8_2A)},
10236 {"armv8.3-a", AARCH64_ARCH_FEATURES (V8_3A)},
10237 {"armv8.4-a", AARCH64_ARCH_FEATURES (V8_4A)},
10238 {"armv8.5-a", AARCH64_ARCH_FEATURES (V8_5A)},
10239 {"armv8.6-a", AARCH64_ARCH_FEATURES (V8_6A)},
10240 {"armv8.7-a", AARCH64_ARCH_FEATURES (V8_7A)},
10241 {"armv8.8-a", AARCH64_ARCH_FEATURES (V8_8A)},
10242 {"armv8-r", AARCH64_ARCH_FEATURES (V8R)},
10243 {"armv9-a", AARCH64_ARCH_FEATURES (V9A)},
10244 {"armv9.1-a", AARCH64_ARCH_FEATURES (V9_1A)},
10245 {"armv9.2-a", AARCH64_ARCH_FEATURES (V9_2A)},
10246 {"armv9.3-a", AARCH64_ARCH_FEATURES (V9_3A)},
10247 {NULL, AARCH64_NO_FEATURES}
10248 };
10249
10250 /* ISA extensions. */
10251 struct aarch64_option_cpu_value_table
10252 {
10253 const char *name;
10254 const aarch64_feature_set value;
10255 const aarch64_feature_set require; /* Feature dependencies. */
10256 };
10257
10258 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10259 {"crc", AARCH64_FEATURE (CRC), AARCH64_NO_FEATURES},
10260 {"crypto", AARCH64_FEATURES (2, AES, SHA2),
10261 AARCH64_FEATURE (SIMD)},
10262 {"fp", AARCH64_FEATURE (FP), AARCH64_NO_FEATURES},
10263 {"lse", AARCH64_FEATURE (LSE), AARCH64_NO_FEATURES},
10264 {"simd", AARCH64_FEATURE (SIMD), AARCH64_FEATURE (FP)},
10265 {"pan", AARCH64_FEATURE (PAN), AARCH64_NO_FEATURES},
10266 {"lor", AARCH64_FEATURE (LOR), AARCH64_NO_FEATURES},
10267 {"ras", AARCH64_FEATURE (RAS), AARCH64_NO_FEATURES},
10268 {"rdma", AARCH64_FEATURE (RDMA), AARCH64_FEATURE (SIMD)},
10269 {"fp16", AARCH64_FEATURE (F16), AARCH64_FEATURE (FP)},
10270 {"fp16fml", AARCH64_FEATURE (F16_FML), AARCH64_FEATURE (F16)},
10271 {"profile", AARCH64_FEATURE (PROFILE), AARCH64_NO_FEATURES},
10272 {"sve", AARCH64_FEATURE (SVE), AARCH64_FEATURE (COMPNUM)},
10273 {"tme", AARCH64_FEATURE (TME), AARCH64_NO_FEATURES},
10274 {"compnum", AARCH64_FEATURE (COMPNUM),
10275 AARCH64_FEATURES (2, F16, SIMD)},
10276 {"rcpc", AARCH64_FEATURE (RCPC), AARCH64_NO_FEATURES},
10277 {"dotprod", AARCH64_FEATURE (DOTPROD), AARCH64_FEATURE (SIMD)},
10278 {"sha2", AARCH64_FEATURE (SHA2), AARCH64_FEATURE (FP)},
10279 {"sb", AARCH64_FEATURE (SB), AARCH64_NO_FEATURES},
10280 {"predres", AARCH64_FEATURE (PREDRES), AARCH64_NO_FEATURES},
10281 {"aes", AARCH64_FEATURE (AES), AARCH64_FEATURE (SIMD)},
10282 {"sm4", AARCH64_FEATURE (SM4), AARCH64_FEATURE (SIMD)},
10283 {"sha3", AARCH64_FEATURE (SHA3), AARCH64_FEATURE (SHA2)},
10284 {"rng", AARCH64_FEATURE (RNG), AARCH64_NO_FEATURES},
10285 {"ssbs", AARCH64_FEATURE (SSBS), AARCH64_NO_FEATURES},
10286 {"memtag", AARCH64_FEATURE (MEMTAG), AARCH64_NO_FEATURES},
10287 {"sve2", AARCH64_FEATURE (SVE2), AARCH64_FEATURE (SVE)},
10288 {"sve2-sm4", AARCH64_FEATURE (SVE2_SM4),
10289 AARCH64_FEATURES (2, SVE2, SM4)},
10290 {"sve2-aes", AARCH64_FEATURE (SVE2_AES),
10291 AARCH64_FEATURES (2, SVE2, AES)},
10292 {"sve2-sha3", AARCH64_FEATURE (SVE2_SHA3),
10293 AARCH64_FEATURES (2, SVE2, SHA3)},
10294 {"sve2-bitperm", AARCH64_FEATURE (SVE2_BITPERM),
10295 AARCH64_FEATURE (SVE2)},
10296 {"sme", AARCH64_FEATURE (SME),
10297 AARCH64_FEATURES (2, SVE2, BFLOAT16)},
10298 {"sme-f64", AARCH64_FEATURE (SME_F64F64), AARCH64_FEATURE (SME)},
10299 {"sme-f64f64", AARCH64_FEATURE (SME_F64F64), AARCH64_FEATURE (SME)},
10300 {"sme-i64", AARCH64_FEATURE (SME_I16I64), AARCH64_FEATURE (SME)},
10301 {"sme-i16i64", AARCH64_FEATURE (SME_I16I64), AARCH64_FEATURE (SME)},
10302 {"sme2", AARCH64_FEATURE (SME2), AARCH64_FEATURE (SME)},
10303 {"bf16", AARCH64_FEATURE (BFLOAT16), AARCH64_FEATURE (FP)},
10304 {"i8mm", AARCH64_FEATURE (I8MM), AARCH64_FEATURE (SIMD)},
10305 {"f32mm", AARCH64_FEATURE (F32MM), AARCH64_FEATURE (SVE)},
10306 {"f64mm", AARCH64_FEATURE (F64MM), AARCH64_FEATURE (SVE)},
10307 {"ls64", AARCH64_FEATURE (LS64), AARCH64_NO_FEATURES},
10308 {"flagm", AARCH64_FEATURE (FLAGM), AARCH64_NO_FEATURES},
10309 {"pauth", AARCH64_FEATURE (PAC), AARCH64_NO_FEATURES},
10310 {"mops", AARCH64_FEATURE (MOPS), AARCH64_NO_FEATURES},
10311 {"hbc", AARCH64_FEATURE (HBC), AARCH64_NO_FEATURES},
10312 {"cssc", AARCH64_FEATURE (CSSC), AARCH64_NO_FEATURES},
10313 {NULL, AARCH64_NO_FEATURES, AARCH64_NO_FEATURES},
10314 };
10315
10316 struct aarch64_long_option_table
10317 {
10318 const char *option; /* Substring to match. */
10319 const char *help; /* Help information. */
10320 int (*func) (const char *subopt); /* Function to decode sub-option. */
10321 char *deprecated; /* If non-null, print this message. */
10322 };
10323
10324 /* Transitive closure of features depending on set. */
10325 static aarch64_feature_set
10326 aarch64_feature_disable_set (aarch64_feature_set set)
10327 {
10328 const struct aarch64_option_cpu_value_table *opt;
10329 aarch64_feature_set prev = AARCH64_NO_FEATURES;
10330
10331 while (!AARCH64_CPU_HAS_ALL_FEATURES (prev, set))
10332 {
10333 prev = set;
10334 for (opt = aarch64_features; opt->name != NULL; opt++)
10335 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10336 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10337 }
10338 return set;
10339 }
10340
10341 /* Transitive closure of dependencies of set. */
10342 static aarch64_feature_set
10343 aarch64_feature_enable_set (aarch64_feature_set set)
10344 {
10345 const struct aarch64_option_cpu_value_table *opt;
10346 aarch64_feature_set prev = AARCH64_NO_FEATURES;
10347
10348 while (!AARCH64_CPU_HAS_ALL_FEATURES (prev, set))
10349 {
10350 prev = set;
10351 for (opt = aarch64_features; opt->name != NULL; opt++)
10352 if (AARCH64_CPU_HAS_ALL_FEATURES (set, opt->value))
10353 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10354 }
10355 return set;
10356 }
10357
10358 static int
10359 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10360 bool ext_only)
10361 {
10362 /* We insist on extensions being added before being removed. We achieve
10363 this by using the ADDING_VALUE variable to indicate whether we are
10364 adding an extension (1) or removing it (0) and only allowing it to
10365 change in the order -1 -> 1 -> 0. */
10366 int adding_value = -1;
10367 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10368
10369 /* Copy the feature set, so that we can modify it. */
10370 *ext_set = **opt_p;
10371 *opt_p = ext_set;
10372
10373 while (str != NULL && *str != 0)
10374 {
10375 const struct aarch64_option_cpu_value_table *opt;
10376 const char *ext = NULL;
10377 int optlen;
10378
10379 if (!ext_only)
10380 {
10381 if (*str != '+')
10382 {
10383 as_bad (_("invalid architectural extension"));
10384 return 0;
10385 }
10386
10387 ext = strchr (++str, '+');
10388 }
10389
10390 if (ext != NULL)
10391 optlen = ext - str;
10392 else
10393 optlen = strlen (str);
10394
10395 if (optlen >= 2 && startswith (str, "no"))
10396 {
10397 if (adding_value != 0)
10398 adding_value = 0;
10399 optlen -= 2;
10400 str += 2;
10401 }
10402 else if (optlen > 0)
10403 {
10404 if (adding_value == -1)
10405 adding_value = 1;
10406 else if (adding_value != 1)
10407 {
10408 as_bad (_("must specify extensions to add before specifying "
10409 "those to remove"));
10410 return false;
10411 }
10412 }
10413
10414 if (optlen == 0)
10415 {
10416 as_bad (_("missing architectural extension"));
10417 return 0;
10418 }
10419
10420 gas_assert (adding_value != -1);
10421
10422 for (opt = aarch64_features; opt->name != NULL; opt++)
10423 if (strncmp (opt->name, str, optlen) == 0)
10424 {
10425 aarch64_feature_set set;
10426
10427 /* Add or remove the extension. */
10428 if (adding_value)
10429 {
10430 set = aarch64_feature_enable_set (opt->value);
10431 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10432 }
10433 else
10434 {
10435 set = aarch64_feature_disable_set (opt->value);
10436 AARCH64_CLEAR_FEATURES (*ext_set, *ext_set, set);
10437 }
10438 break;
10439 }
10440
10441 if (opt->name == NULL)
10442 {
10443 as_bad (_("unknown architectural extension `%s'"), str);
10444 return 0;
10445 }
10446
10447 str = ext;
10448 };
10449
10450 return 1;
10451 }
10452
10453 static int
10454 aarch64_parse_cpu (const char *str)
10455 {
10456 const struct aarch64_cpu_option_table *opt;
10457 const char *ext = strchr (str, '+');
10458 size_t optlen;
10459
10460 if (ext != NULL)
10461 optlen = ext - str;
10462 else
10463 optlen = strlen (str);
10464
10465 if (optlen == 0)
10466 {
10467 as_bad (_("missing cpu name `%s'"), str);
10468 return 0;
10469 }
10470
10471 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10472 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10473 {
10474 mcpu_cpu_opt = &opt->value;
10475 if (ext != NULL)
10476 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10477
10478 return 1;
10479 }
10480
10481 as_bad (_("unknown cpu `%s'"), str);
10482 return 0;
10483 }
10484
10485 static int
10486 aarch64_parse_arch (const char *str)
10487 {
10488 const struct aarch64_arch_option_table *opt;
10489 const char *ext = strchr (str, '+');
10490 size_t optlen;
10491
10492 if (ext != NULL)
10493 optlen = ext - str;
10494 else
10495 optlen = strlen (str);
10496
10497 if (optlen == 0)
10498 {
10499 as_bad (_("missing architecture name `%s'"), str);
10500 return 0;
10501 }
10502
10503 for (opt = aarch64_archs; opt->name != NULL; opt++)
10504 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10505 {
10506 march_cpu_opt = &opt->value;
10507 if (ext != NULL)
10508 return aarch64_parse_features (ext, &march_cpu_opt, false);
10509
10510 return 1;
10511 }
10512
10513 as_bad (_("unknown architecture `%s'\n"), str);
10514 return 0;
10515 }
10516
10517 /* ABIs. */
10518 struct aarch64_option_abi_value_table
10519 {
10520 const char *name;
10521 enum aarch64_abi_type value;
10522 };
10523
10524 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10525 #ifdef OBJ_ELF
10526 {"ilp32", AARCH64_ABI_ILP32},
10527 {"lp64", AARCH64_ABI_LP64},
10528 #else
10529 {"llp64", AARCH64_ABI_LLP64},
10530 #endif
10531 };
10532
10533 static int
10534 aarch64_parse_abi (const char *str)
10535 {
10536 unsigned int i;
10537
10538 if (str[0] == '\0')
10539 {
10540 as_bad (_("missing abi name `%s'"), str);
10541 return 0;
10542 }
10543
10544 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10545 if (strcmp (str, aarch64_abis[i].name) == 0)
10546 {
10547 aarch64_abi = aarch64_abis[i].value;
10548 return 1;
10549 }
10550
10551 as_bad (_("unknown abi `%s'\n"), str);
10552 return 0;
10553 }
10554
10555 static struct aarch64_long_option_table aarch64_long_opts[] = {
10556 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10557 aarch64_parse_abi, NULL},
10558 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10559 aarch64_parse_cpu, NULL},
10560 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10561 aarch64_parse_arch, NULL},
10562 {NULL, NULL, 0, NULL}
10563 };
10564
10565 int
10566 md_parse_option (int c, const char *arg)
10567 {
10568 struct aarch64_option_table *opt;
10569 struct aarch64_long_option_table *lopt;
10570
10571 switch (c)
10572 {
10573 #ifdef OPTION_EB
10574 case OPTION_EB:
10575 target_big_endian = 1;
10576 break;
10577 #endif
10578
10579 #ifdef OPTION_EL
10580 case OPTION_EL:
10581 target_big_endian = 0;
10582 break;
10583 #endif
10584
10585 case 'a':
10586 /* Listing option. Just ignore these, we don't support additional
10587 ones. */
10588 return 0;
10589
10590 default:
10591 for (opt = aarch64_opts; opt->option != NULL; opt++)
10592 {
10593 if (c == opt->option[0]
10594 && ((arg == NULL && opt->option[1] == 0)
10595 || streq (arg, opt->option + 1)))
10596 {
10597 /* If the option is deprecated, tell the user. */
10598 if (opt->deprecated != NULL)
10599 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10600 arg ? arg : "", _(opt->deprecated));
10601
10602 if (opt->var != NULL)
10603 *opt->var = opt->value;
10604
10605 return 1;
10606 }
10607 }
10608
10609 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10610 {
10611 /* These options are expected to have an argument. */
10612 if (c == lopt->option[0]
10613 && arg != NULL
10614 && startswith (arg, lopt->option + 1))
10615 {
10616 /* If the option is deprecated, tell the user. */
10617 if (lopt->deprecated != NULL)
10618 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10619 _(lopt->deprecated));
10620
10621 /* Call the sup-option parser. */
10622 return lopt->func (arg + strlen (lopt->option) - 1);
10623 }
10624 }
10625
10626 return 0;
10627 }
10628
10629 return 1;
10630 }
10631
10632 void
10633 md_show_usage (FILE * fp)
10634 {
10635 struct aarch64_option_table *opt;
10636 struct aarch64_long_option_table *lopt;
10637
10638 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10639
10640 for (opt = aarch64_opts; opt->option != NULL; opt++)
10641 if (opt->help != NULL)
10642 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10643
10644 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10645 if (lopt->help != NULL)
10646 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10647
10648 #ifdef OPTION_EB
10649 fprintf (fp, _("\
10650 -EB assemble code for a big-endian cpu\n"));
10651 #endif
10652
10653 #ifdef OPTION_EL
10654 fprintf (fp, _("\
10655 -EL assemble code for a little-endian cpu\n"));
10656 #endif
10657 }
10658
10659 /* Parse a .cpu directive. */
10660
10661 static void
10662 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10663 {
10664 const struct aarch64_cpu_option_table *opt;
10665 char saved_char;
10666 char *name;
10667 char *ext;
10668 size_t optlen;
10669
10670 name = input_line_pointer;
10671 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10672 saved_char = *input_line_pointer;
10673 *input_line_pointer = 0;
10674
10675 ext = strchr (name, '+');
10676
10677 if (ext != NULL)
10678 optlen = ext - name;
10679 else
10680 optlen = strlen (name);
10681
10682 /* Skip the first "all" entry. */
10683 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10684 if (strlen (opt->name) == optlen
10685 && strncmp (name, opt->name, optlen) == 0)
10686 {
10687 mcpu_cpu_opt = &opt->value;
10688 if (ext != NULL)
10689 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10690 return;
10691
10692 cpu_variant = *mcpu_cpu_opt;
10693
10694 *input_line_pointer = saved_char;
10695 demand_empty_rest_of_line ();
10696 return;
10697 }
10698 as_bad (_("unknown cpu `%s'"), name);
10699 *input_line_pointer = saved_char;
10700 ignore_rest_of_line ();
10701 }
10702
10703
10704 /* Parse a .arch directive. */
10705
10706 static void
10707 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10708 {
10709 const struct aarch64_arch_option_table *opt;
10710 char saved_char;
10711 char *name;
10712 char *ext;
10713 size_t optlen;
10714
10715 name = input_line_pointer;
10716 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10717 saved_char = *input_line_pointer;
10718 *input_line_pointer = 0;
10719
10720 ext = strchr (name, '+');
10721
10722 if (ext != NULL)
10723 optlen = ext - name;
10724 else
10725 optlen = strlen (name);
10726
10727 /* Skip the first "all" entry. */
10728 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10729 if (strlen (opt->name) == optlen
10730 && strncmp (name, opt->name, optlen) == 0)
10731 {
10732 mcpu_cpu_opt = &opt->value;
10733 if (ext != NULL)
10734 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10735 return;
10736
10737 cpu_variant = *mcpu_cpu_opt;
10738
10739 *input_line_pointer = saved_char;
10740 demand_empty_rest_of_line ();
10741 return;
10742 }
10743
10744 as_bad (_("unknown architecture `%s'\n"), name);
10745 *input_line_pointer = saved_char;
10746 ignore_rest_of_line ();
10747 }
10748
10749 /* Parse a .arch_extension directive. */
10750
10751 static void
10752 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10753 {
10754 char saved_char;
10755 char *ext = input_line_pointer;
10756
10757 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10758 saved_char = *input_line_pointer;
10759 *input_line_pointer = 0;
10760
10761 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10762 return;
10763
10764 cpu_variant = *mcpu_cpu_opt;
10765
10766 *input_line_pointer = saved_char;
10767 demand_empty_rest_of_line ();
10768 }
10769
10770 /* Copy symbol information. */
10771
10772 void
10773 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10774 {
10775 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10776 }
10777
10778 #ifdef OBJ_ELF
10779 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10780 This is needed so AArch64 specific st_other values can be independently
10781 specified for an IFUNC resolver (that is called by the dynamic linker)
10782 and the symbol it resolves (aliased to the resolver). In particular,
10783 if a function symbol has special st_other value set via directives,
10784 then attaching an IFUNC resolver to that symbol should not override
10785 the st_other setting. Requiring the directive on the IFUNC resolver
10786 symbol would be unexpected and problematic in C code, where the two
10787 symbols appear as two independent function declarations. */
10788
10789 void
10790 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10791 {
10792 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10793 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10794 /* If size is unset, copy size from src. Because we don't track whether
10795 .size has been used, we can't differentiate .size dest, 0 from the case
10796 where dest's size is unset. */
10797 if (!destelf->size && S_GET_SIZE (dest) == 0)
10798 {
10799 if (srcelf->size)
10800 {
10801 destelf->size = XNEW (expressionS);
10802 *destelf->size = *srcelf->size;
10803 }
10804 S_SET_SIZE (dest, S_GET_SIZE (src));
10805 }
10806 }
10807 #endif