[AArch64][SVE 27/32] Add SVE integer immediate operands
[binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
268 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
269 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
270 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
271 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
272 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
273 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
274 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
275 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
276 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
277 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
278 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
279 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
280 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
281 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
282 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
283 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
284 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
285 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
286 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
287 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
288 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
289 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
290 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
291 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
292 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
293 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
294 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
295 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
296 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
297 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
298 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
299 };
300
301 enum aarch64_operand_class
302 aarch64_get_operand_class (enum aarch64_opnd type)
303 {
304 return aarch64_operands[type].op_class;
305 }
306
307 const char *
308 aarch64_get_operand_name (enum aarch64_opnd type)
309 {
310 return aarch64_operands[type].name;
311 }
312
313 /* Get operand description string.
314 This is usually for the diagnosis purpose. */
315 const char *
316 aarch64_get_operand_desc (enum aarch64_opnd type)
317 {
318 return aarch64_operands[type].desc;
319 }
320
321 /* Table of all conditional affixes. */
322 const aarch64_cond aarch64_conds[16] =
323 {
324 {{"eq"}, 0x0},
325 {{"ne"}, 0x1},
326 {{"cs", "hs"}, 0x2},
327 {{"cc", "lo", "ul"}, 0x3},
328 {{"mi"}, 0x4},
329 {{"pl"}, 0x5},
330 {{"vs"}, 0x6},
331 {{"vc"}, 0x7},
332 {{"hi"}, 0x8},
333 {{"ls"}, 0x9},
334 {{"ge"}, 0xa},
335 {{"lt"}, 0xb},
336 {{"gt"}, 0xc},
337 {{"le"}, 0xd},
338 {{"al"}, 0xe},
339 {{"nv"}, 0xf},
340 };
341
342 const aarch64_cond *
343 get_cond_from_value (aarch64_insn value)
344 {
345 assert (value < 16);
346 return &aarch64_conds[(unsigned int) value];
347 }
348
349 const aarch64_cond *
350 get_inverted_cond (const aarch64_cond *cond)
351 {
352 return &aarch64_conds[cond->value ^ 0x1];
353 }
354
355 /* Table describing the operand extension/shifting operators; indexed by
356 enum aarch64_modifier_kind.
357
358 The value column provides the most common values for encoding modifiers,
359 which enables table-driven encoding/decoding for the modifiers. */
360 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
361 {
362 {"none", 0x0},
363 {"msl", 0x0},
364 {"ror", 0x3},
365 {"asr", 0x2},
366 {"lsr", 0x1},
367 {"lsl", 0x0},
368 {"uxtb", 0x0},
369 {"uxth", 0x1},
370 {"uxtw", 0x2},
371 {"uxtx", 0x3},
372 {"sxtb", 0x4},
373 {"sxth", 0x5},
374 {"sxtw", 0x6},
375 {"sxtx", 0x7},
376 {"mul", 0x0},
377 {"mul vl", 0x0},
378 {NULL, 0},
379 };
380
381 enum aarch64_modifier_kind
382 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
383 {
384 return desc - aarch64_operand_modifiers;
385 }
386
387 aarch64_insn
388 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
389 {
390 return aarch64_operand_modifiers[kind].value;
391 }
392
393 enum aarch64_modifier_kind
394 aarch64_get_operand_modifier_from_value (aarch64_insn value,
395 bfd_boolean extend_p)
396 {
397 if (extend_p == TRUE)
398 return AARCH64_MOD_UXTB + value;
399 else
400 return AARCH64_MOD_LSL - value;
401 }
402
403 bfd_boolean
404 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
405 {
406 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
407 ? TRUE : FALSE;
408 }
409
410 static inline bfd_boolean
411 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
412 {
413 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
414 ? TRUE : FALSE;
415 }
416
417 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
418 {
419 { "#0x00", 0x0 },
420 { "oshld", 0x1 },
421 { "oshst", 0x2 },
422 { "osh", 0x3 },
423 { "#0x04", 0x4 },
424 { "nshld", 0x5 },
425 { "nshst", 0x6 },
426 { "nsh", 0x7 },
427 { "#0x08", 0x8 },
428 { "ishld", 0x9 },
429 { "ishst", 0xa },
430 { "ish", 0xb },
431 { "#0x0c", 0xc },
432 { "ld", 0xd },
433 { "st", 0xe },
434 { "sy", 0xf },
435 };
436
437 /* Table describing the operands supported by the aliases of the HINT
438 instruction.
439
440 The name column is the operand that is accepted for the alias. The value
441 column is the hint number of the alias. The list of operands is terminated
442 by NULL in the name column. */
443
444 const struct aarch64_name_value_pair aarch64_hint_options[] =
445 {
446 { "csync", 0x11 }, /* PSB CSYNC. */
447 { NULL, 0x0 },
448 };
449
450 /* op -> op: load = 0 instruction = 1 store = 2
451 l -> level: 1-3
452 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
453 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
454 const struct aarch64_name_value_pair aarch64_prfops[32] =
455 {
456 { "pldl1keep", B(0, 1, 0) },
457 { "pldl1strm", B(0, 1, 1) },
458 { "pldl2keep", B(0, 2, 0) },
459 { "pldl2strm", B(0, 2, 1) },
460 { "pldl3keep", B(0, 3, 0) },
461 { "pldl3strm", B(0, 3, 1) },
462 { NULL, 0x06 },
463 { NULL, 0x07 },
464 { "plil1keep", B(1, 1, 0) },
465 { "plil1strm", B(1, 1, 1) },
466 { "plil2keep", B(1, 2, 0) },
467 { "plil2strm", B(1, 2, 1) },
468 { "plil3keep", B(1, 3, 0) },
469 { "plil3strm", B(1, 3, 1) },
470 { NULL, 0x0e },
471 { NULL, 0x0f },
472 { "pstl1keep", B(2, 1, 0) },
473 { "pstl1strm", B(2, 1, 1) },
474 { "pstl2keep", B(2, 2, 0) },
475 { "pstl2strm", B(2, 2, 1) },
476 { "pstl3keep", B(2, 3, 0) },
477 { "pstl3strm", B(2, 3, 1) },
478 { NULL, 0x16 },
479 { NULL, 0x17 },
480 { NULL, 0x18 },
481 { NULL, 0x19 },
482 { NULL, 0x1a },
483 { NULL, 0x1b },
484 { NULL, 0x1c },
485 { NULL, 0x1d },
486 { NULL, 0x1e },
487 { NULL, 0x1f },
488 };
489 #undef B
490 \f
491 /* Utilities on value constraint. */
492
493 static inline int
494 value_in_range_p (int64_t value, int low, int high)
495 {
496 return (value >= low && value <= high) ? 1 : 0;
497 }
498
499 /* Return true if VALUE is a multiple of ALIGN. */
500 static inline int
501 value_aligned_p (int64_t value, int align)
502 {
503 return (value % align) == 0;
504 }
505
506 /* A signed value fits in a field. */
507 static inline int
508 value_fit_signed_field_p (int64_t value, unsigned width)
509 {
510 assert (width < 32);
511 if (width < sizeof (value) * 8)
512 {
513 int64_t lim = (int64_t)1 << (width - 1);
514 if (value >= -lim && value < lim)
515 return 1;
516 }
517 return 0;
518 }
519
520 /* An unsigned value fits in a field. */
521 static inline int
522 value_fit_unsigned_field_p (int64_t value, unsigned width)
523 {
524 assert (width < 32);
525 if (width < sizeof (value) * 8)
526 {
527 int64_t lim = (int64_t)1 << width;
528 if (value >= 0 && value < lim)
529 return 1;
530 }
531 return 0;
532 }
533
534 /* Return 1 if OPERAND is SP or WSP. */
535 int
536 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
537 {
538 return ((aarch64_get_operand_class (operand->type)
539 == AARCH64_OPND_CLASS_INT_REG)
540 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
541 && operand->reg.regno == 31);
542 }
543
544 /* Return 1 if OPERAND is XZR or WZP. */
545 int
546 aarch64_zero_register_p (const aarch64_opnd_info *operand)
547 {
548 return ((aarch64_get_operand_class (operand->type)
549 == AARCH64_OPND_CLASS_INT_REG)
550 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
551 && operand->reg.regno == 31);
552 }
553
554 /* Return true if the operand *OPERAND that has the operand code
555 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
556 qualified by the qualifier TARGET. */
557
558 static inline int
559 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
560 aarch64_opnd_qualifier_t target)
561 {
562 switch (operand->qualifier)
563 {
564 case AARCH64_OPND_QLF_W:
565 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
566 return 1;
567 break;
568 case AARCH64_OPND_QLF_X:
569 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
570 return 1;
571 break;
572 case AARCH64_OPND_QLF_WSP:
573 if (target == AARCH64_OPND_QLF_W
574 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
575 return 1;
576 break;
577 case AARCH64_OPND_QLF_SP:
578 if (target == AARCH64_OPND_QLF_X
579 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
580 return 1;
581 break;
582 default:
583 break;
584 }
585
586 return 0;
587 }
588
589 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
590 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
591
592 Return NIL if more than one expected qualifiers are found. */
593
594 aarch64_opnd_qualifier_t
595 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
596 int idx,
597 const aarch64_opnd_qualifier_t known_qlf,
598 int known_idx)
599 {
600 int i, saved_i;
601
602 /* Special case.
603
604 When the known qualifier is NIL, we have to assume that there is only
605 one qualifier sequence in the *QSEQ_LIST and return the corresponding
606 qualifier directly. One scenario is that for instruction
607 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
608 which has only one possible valid qualifier sequence
609 NIL, S_D
610 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
611 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
612
613 Because the qualifier NIL has dual roles in the qualifier sequence:
614 it can mean no qualifier for the operand, or the qualifer sequence is
615 not in use (when all qualifiers in the sequence are NILs), we have to
616 handle this special case here. */
617 if (known_qlf == AARCH64_OPND_NIL)
618 {
619 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
620 return qseq_list[0][idx];
621 }
622
623 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
624 {
625 if (qseq_list[i][known_idx] == known_qlf)
626 {
627 if (saved_i != -1)
628 /* More than one sequences are found to have KNOWN_QLF at
629 KNOWN_IDX. */
630 return AARCH64_OPND_NIL;
631 saved_i = i;
632 }
633 }
634
635 return qseq_list[saved_i][idx];
636 }
637
638 enum operand_qualifier_kind
639 {
640 OQK_NIL,
641 OQK_OPD_VARIANT,
642 OQK_VALUE_IN_RANGE,
643 OQK_MISC,
644 };
645
646 /* Operand qualifier description. */
647 struct operand_qualifier_data
648 {
649 /* The usage of the three data fields depends on the qualifier kind. */
650 int data0;
651 int data1;
652 int data2;
653 /* Description. */
654 const char *desc;
655 /* Kind. */
656 enum operand_qualifier_kind kind;
657 };
658
659 /* Indexed by the operand qualifier enumerators. */
660 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
661 {
662 {0, 0, 0, "NIL", OQK_NIL},
663
664 /* Operand variant qualifiers.
665 First 3 fields:
666 element size, number of elements and common value for encoding. */
667
668 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
669 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
670 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
671 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
672
673 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
674 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
675 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
676 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
677 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
678
679 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
680 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
681 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
682 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
683 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
684 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
685 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
686 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
687 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
688 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
689
690 {0, 0, 0, "z", OQK_OPD_VARIANT},
691 {0, 0, 0, "m", OQK_OPD_VARIANT},
692
693 /* Qualifiers constraining the value range.
694 First 3 fields:
695 Lower bound, higher bound, unused. */
696
697 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
698 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
699 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
700 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
701 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
702 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
703
704 /* Qualifiers for miscellaneous purpose.
705 First 3 fields:
706 unused, unused and unused. */
707
708 {0, 0, 0, "lsl", 0},
709 {0, 0, 0, "msl", 0},
710
711 {0, 0, 0, "retrieving", 0},
712 };
713
714 static inline bfd_boolean
715 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
716 {
717 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
718 ? TRUE : FALSE;
719 }
720
721 static inline bfd_boolean
722 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
723 {
724 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
725 ? TRUE : FALSE;
726 }
727
728 const char*
729 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
730 {
731 return aarch64_opnd_qualifiers[qualifier].desc;
732 }
733
734 /* Given an operand qualifier, return the expected data element size
735 of a qualified operand. */
736 unsigned char
737 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
738 {
739 assert (operand_variant_qualifier_p (qualifier) == TRUE);
740 return aarch64_opnd_qualifiers[qualifier].data0;
741 }
742
743 unsigned char
744 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
745 {
746 assert (operand_variant_qualifier_p (qualifier) == TRUE);
747 return aarch64_opnd_qualifiers[qualifier].data1;
748 }
749
750 aarch64_insn
751 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
752 {
753 assert (operand_variant_qualifier_p (qualifier) == TRUE);
754 return aarch64_opnd_qualifiers[qualifier].data2;
755 }
756
757 static int
758 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
759 {
760 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
761 return aarch64_opnd_qualifiers[qualifier].data0;
762 }
763
764 static int
765 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
766 {
767 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
768 return aarch64_opnd_qualifiers[qualifier].data1;
769 }
770
771 #ifdef DEBUG_AARCH64
772 void
773 aarch64_verbose (const char *str, ...)
774 {
775 va_list ap;
776 va_start (ap, str);
777 printf ("#### ");
778 vprintf (str, ap);
779 printf ("\n");
780 va_end (ap);
781 }
782
783 static inline void
784 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
785 {
786 int i;
787 printf ("#### \t");
788 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
789 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
790 printf ("\n");
791 }
792
793 static void
794 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
795 const aarch64_opnd_qualifier_t *qualifier)
796 {
797 int i;
798 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
799
800 aarch64_verbose ("dump_match_qualifiers:");
801 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
802 curr[i] = opnd[i].qualifier;
803 dump_qualifier_sequence (curr);
804 aarch64_verbose ("against");
805 dump_qualifier_sequence (qualifier);
806 }
807 #endif /* DEBUG_AARCH64 */
808
809 /* TODO improve this, we can have an extra field at the runtime to
810 store the number of operands rather than calculating it every time. */
811
812 int
813 aarch64_num_of_operands (const aarch64_opcode *opcode)
814 {
815 int i = 0;
816 const enum aarch64_opnd *opnds = opcode->operands;
817 while (opnds[i++] != AARCH64_OPND_NIL)
818 ;
819 --i;
820 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
821 return i;
822 }
823
824 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
825 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
826
827 N.B. on the entry, it is very likely that only some operands in *INST
828 have had their qualifiers been established.
829
830 If STOP_AT is not -1, the function will only try to match
831 the qualifier sequence for operands before and including the operand
832 of index STOP_AT; and on success *RET will only be filled with the first
833 (STOP_AT+1) qualifiers.
834
835 A couple examples of the matching algorithm:
836
837 X,W,NIL should match
838 X,W,NIL
839
840 NIL,NIL should match
841 X ,NIL
842
843 Apart from serving the main encoding routine, this can also be called
844 during or after the operand decoding. */
845
846 int
847 aarch64_find_best_match (const aarch64_inst *inst,
848 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
849 int stop_at, aarch64_opnd_qualifier_t *ret)
850 {
851 int found = 0;
852 int i, num_opnds;
853 const aarch64_opnd_qualifier_t *qualifiers;
854
855 num_opnds = aarch64_num_of_operands (inst->opcode);
856 if (num_opnds == 0)
857 {
858 DEBUG_TRACE ("SUCCEED: no operand");
859 return 1;
860 }
861
862 if (stop_at < 0 || stop_at >= num_opnds)
863 stop_at = num_opnds - 1;
864
865 /* For each pattern. */
866 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
867 {
868 int j;
869 qualifiers = *qualifiers_list;
870
871 /* Start as positive. */
872 found = 1;
873
874 DEBUG_TRACE ("%d", i);
875 #ifdef DEBUG_AARCH64
876 if (debug_dump)
877 dump_match_qualifiers (inst->operands, qualifiers);
878 #endif
879
880 /* Most opcodes has much fewer patterns in the list.
881 First NIL qualifier indicates the end in the list. */
882 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
883 {
884 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
885 if (i)
886 found = 0;
887 break;
888 }
889
890 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
891 {
892 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
893 {
894 /* Either the operand does not have qualifier, or the qualifier
895 for the operand needs to be deduced from the qualifier
896 sequence.
897 In the latter case, any constraint checking related with
898 the obtained qualifier should be done later in
899 operand_general_constraint_met_p. */
900 continue;
901 }
902 else if (*qualifiers != inst->operands[j].qualifier)
903 {
904 /* Unless the target qualifier can also qualify the operand
905 (which has already had a non-nil qualifier), non-equal
906 qualifiers are generally un-matched. */
907 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
908 continue;
909 else
910 {
911 found = 0;
912 break;
913 }
914 }
915 else
916 continue; /* Equal qualifiers are certainly matched. */
917 }
918
919 /* Qualifiers established. */
920 if (found == 1)
921 break;
922 }
923
924 if (found == 1)
925 {
926 /* Fill the result in *RET. */
927 int j;
928 qualifiers = *qualifiers_list;
929
930 DEBUG_TRACE ("complete qualifiers using list %d", i);
931 #ifdef DEBUG_AARCH64
932 if (debug_dump)
933 dump_qualifier_sequence (qualifiers);
934 #endif
935
936 for (j = 0; j <= stop_at; ++j, ++qualifiers)
937 ret[j] = *qualifiers;
938 for (; j < AARCH64_MAX_OPND_NUM; ++j)
939 ret[j] = AARCH64_OPND_QLF_NIL;
940
941 DEBUG_TRACE ("SUCCESS");
942 return 1;
943 }
944
945 DEBUG_TRACE ("FAIL");
946 return 0;
947 }
948
949 /* Operand qualifier matching and resolving.
950
951 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
952 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
953
954 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
955 succeeds. */
956
957 static int
958 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
959 {
960 int i, nops;
961 aarch64_opnd_qualifier_seq_t qualifiers;
962
963 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
964 qualifiers))
965 {
966 DEBUG_TRACE ("matching FAIL");
967 return 0;
968 }
969
970 if (inst->opcode->flags & F_STRICT)
971 {
972 /* Require an exact qualifier match, even for NIL qualifiers. */
973 nops = aarch64_num_of_operands (inst->opcode);
974 for (i = 0; i < nops; ++i)
975 if (inst->operands[i].qualifier != qualifiers[i])
976 return FALSE;
977 }
978
979 /* Update the qualifiers. */
980 if (update_p == TRUE)
981 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
982 {
983 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
984 break;
985 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
986 "update %s with %s for operand %d",
987 aarch64_get_qualifier_name (inst->operands[i].qualifier),
988 aarch64_get_qualifier_name (qualifiers[i]), i);
989 inst->operands[i].qualifier = qualifiers[i];
990 }
991
992 DEBUG_TRACE ("matching SUCCESS");
993 return 1;
994 }
995
996 /* Return TRUE if VALUE is a wide constant that can be moved into a general
997 register by MOVZ.
998
999 IS32 indicates whether value is a 32-bit immediate or not.
1000 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1001 amount will be returned in *SHIFT_AMOUNT. */
1002
1003 bfd_boolean
1004 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1005 {
1006 int amount;
1007
1008 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1009
1010 if (is32)
1011 {
1012 /* Allow all zeros or all ones in top 32-bits, so that
1013 32-bit constant expressions like ~0x80000000 are
1014 permitted. */
1015 uint64_t ext = value;
1016 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1017 /* Immediate out of range. */
1018 return FALSE;
1019 value &= (int64_t) 0xffffffff;
1020 }
1021
1022 /* first, try movz then movn */
1023 amount = -1;
1024 if ((value & ((int64_t) 0xffff << 0)) == value)
1025 amount = 0;
1026 else if ((value & ((int64_t) 0xffff << 16)) == value)
1027 amount = 16;
1028 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1029 amount = 32;
1030 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1031 amount = 48;
1032
1033 if (amount == -1)
1034 {
1035 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1036 return FALSE;
1037 }
1038
1039 if (shift_amount != NULL)
1040 *shift_amount = amount;
1041
1042 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1043
1044 return TRUE;
1045 }
1046
1047 /* Build the accepted values for immediate logical SIMD instructions.
1048
1049 The standard encodings of the immediate value are:
1050 N imms immr SIMD size R S
1051 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1052 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1053 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1054 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1055 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1056 0 11110s 00000r 2 UInt(r) UInt(s)
1057 where all-ones value of S is reserved.
1058
1059 Let's call E the SIMD size.
1060
1061 The immediate value is: S+1 bits '1' rotated to the right by R.
1062
1063 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1064 (remember S != E - 1). */
1065
1066 #define TOTAL_IMM_NB 5334
1067
1068 typedef struct
1069 {
1070 uint64_t imm;
1071 aarch64_insn encoding;
1072 } simd_imm_encoding;
1073
1074 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1075
1076 static int
1077 simd_imm_encoding_cmp(const void *i1, const void *i2)
1078 {
1079 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1080 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1081
1082 if (imm1->imm < imm2->imm)
1083 return -1;
1084 if (imm1->imm > imm2->imm)
1085 return +1;
1086 return 0;
1087 }
1088
1089 /* immediate bitfield standard encoding
1090 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1091 1 ssssss rrrrrr 64 rrrrrr ssssss
1092 0 0sssss 0rrrrr 32 rrrrr sssss
1093 0 10ssss 00rrrr 16 rrrr ssss
1094 0 110sss 000rrr 8 rrr sss
1095 0 1110ss 0000rr 4 rr ss
1096 0 11110s 00000r 2 r s */
1097 static inline int
1098 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1099 {
1100 return (is64 << 12) | (r << 6) | s;
1101 }
1102
1103 static void
1104 build_immediate_table (void)
1105 {
1106 uint32_t log_e, e, s, r, s_mask;
1107 uint64_t mask, imm;
1108 int nb_imms;
1109 int is64;
1110
1111 nb_imms = 0;
1112 for (log_e = 1; log_e <= 6; log_e++)
1113 {
1114 /* Get element size. */
1115 e = 1u << log_e;
1116 if (log_e == 6)
1117 {
1118 is64 = 1;
1119 mask = 0xffffffffffffffffull;
1120 s_mask = 0;
1121 }
1122 else
1123 {
1124 is64 = 0;
1125 mask = (1ull << e) - 1;
1126 /* log_e s_mask
1127 1 ((1 << 4) - 1) << 2 = 111100
1128 2 ((1 << 3) - 1) << 3 = 111000
1129 3 ((1 << 2) - 1) << 4 = 110000
1130 4 ((1 << 1) - 1) << 5 = 100000
1131 5 ((1 << 0) - 1) << 6 = 000000 */
1132 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1133 }
1134 for (s = 0; s < e - 1; s++)
1135 for (r = 0; r < e; r++)
1136 {
1137 /* s+1 consecutive bits to 1 (s < 63) */
1138 imm = (1ull << (s + 1)) - 1;
1139 /* rotate right by r */
1140 if (r != 0)
1141 imm = (imm >> r) | ((imm << (e - r)) & mask);
1142 /* replicate the constant depending on SIMD size */
1143 switch (log_e)
1144 {
1145 case 1: imm = (imm << 2) | imm;
1146 case 2: imm = (imm << 4) | imm;
1147 case 3: imm = (imm << 8) | imm;
1148 case 4: imm = (imm << 16) | imm;
1149 case 5: imm = (imm << 32) | imm;
1150 case 6: break;
1151 default: abort ();
1152 }
1153 simd_immediates[nb_imms].imm = imm;
1154 simd_immediates[nb_imms].encoding =
1155 encode_immediate_bitfield(is64, s | s_mask, r);
1156 nb_imms++;
1157 }
1158 }
1159 assert (nb_imms == TOTAL_IMM_NB);
1160 qsort(simd_immediates, nb_imms,
1161 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1162 }
1163
1164 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1165 be accepted by logical (immediate) instructions
1166 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1167
1168 ESIZE is the number of bytes in the decoded immediate value.
1169 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1170 VALUE will be returned in *ENCODING. */
1171
1172 bfd_boolean
1173 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1174 {
1175 simd_imm_encoding imm_enc;
1176 const simd_imm_encoding *imm_encoding;
1177 static bfd_boolean initialized = FALSE;
1178 uint64_t upper;
1179 int i;
1180
1181 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1182 value, is32);
1183
1184 if (initialized == FALSE)
1185 {
1186 build_immediate_table ();
1187 initialized = TRUE;
1188 }
1189
1190 /* Allow all zeros or all ones in top bits, so that
1191 constant expressions like ~1 are permitted. */
1192 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1193 if ((value & ~upper) != value && (value | upper) != value)
1194 return FALSE;
1195
1196 /* Replicate to a full 64-bit value. */
1197 value &= ~upper;
1198 for (i = esize * 8; i < 64; i *= 2)
1199 value |= (value << i);
1200
1201 imm_enc.imm = value;
1202 imm_encoding = (const simd_imm_encoding *)
1203 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1204 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1205 if (imm_encoding == NULL)
1206 {
1207 DEBUG_TRACE ("exit with FALSE");
1208 return FALSE;
1209 }
1210 if (encoding != NULL)
1211 *encoding = imm_encoding->encoding;
1212 DEBUG_TRACE ("exit with TRUE");
1213 return TRUE;
1214 }
1215
1216 /* If 64-bit immediate IMM is in the format of
1217 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1218 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1219 of value "abcdefgh". Otherwise return -1. */
1220 int
1221 aarch64_shrink_expanded_imm8 (uint64_t imm)
1222 {
1223 int i, ret;
1224 uint32_t byte;
1225
1226 ret = 0;
1227 for (i = 0; i < 8; i++)
1228 {
1229 byte = (imm >> (8 * i)) & 0xff;
1230 if (byte == 0xff)
1231 ret |= 1 << i;
1232 else if (byte != 0x00)
1233 return -1;
1234 }
1235 return ret;
1236 }
1237
1238 /* Utility inline functions for operand_general_constraint_met_p. */
1239
1240 static inline void
1241 set_error (aarch64_operand_error *mismatch_detail,
1242 enum aarch64_operand_error_kind kind, int idx,
1243 const char* error)
1244 {
1245 if (mismatch_detail == NULL)
1246 return;
1247 mismatch_detail->kind = kind;
1248 mismatch_detail->index = idx;
1249 mismatch_detail->error = error;
1250 }
1251
1252 static inline void
1253 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1254 const char* error)
1255 {
1256 if (mismatch_detail == NULL)
1257 return;
1258 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1259 }
1260
1261 static inline void
1262 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1263 int idx, int lower_bound, int upper_bound,
1264 const char* error)
1265 {
1266 if (mismatch_detail == NULL)
1267 return;
1268 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1269 mismatch_detail->data[0] = lower_bound;
1270 mismatch_detail->data[1] = upper_bound;
1271 }
1272
1273 static inline void
1274 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1275 int idx, int lower_bound, int upper_bound)
1276 {
1277 if (mismatch_detail == NULL)
1278 return;
1279 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1280 _("immediate value"));
1281 }
1282
1283 static inline void
1284 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1285 int idx, int lower_bound, int upper_bound)
1286 {
1287 if (mismatch_detail == NULL)
1288 return;
1289 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1290 _("immediate offset"));
1291 }
1292
1293 static inline void
1294 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1295 int idx, int lower_bound, int upper_bound)
1296 {
1297 if (mismatch_detail == NULL)
1298 return;
1299 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1300 _("register number"));
1301 }
1302
1303 static inline void
1304 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1305 int idx, int lower_bound, int upper_bound)
1306 {
1307 if (mismatch_detail == NULL)
1308 return;
1309 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1310 _("register element index"));
1311 }
1312
1313 static inline void
1314 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1315 int idx, int lower_bound, int upper_bound)
1316 {
1317 if (mismatch_detail == NULL)
1318 return;
1319 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1320 _("shift amount"));
1321 }
1322
1323 /* Report that the MUL modifier in operand IDX should be in the range
1324 [LOWER_BOUND, UPPER_BOUND]. */
1325 static inline void
1326 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1327 int idx, int lower_bound, int upper_bound)
1328 {
1329 if (mismatch_detail == NULL)
1330 return;
1331 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1332 _("multiplier"));
1333 }
1334
1335 static inline void
1336 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1337 int alignment)
1338 {
1339 if (mismatch_detail == NULL)
1340 return;
1341 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1342 mismatch_detail->data[0] = alignment;
1343 }
1344
1345 static inline void
1346 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1347 int expected_num)
1348 {
1349 if (mismatch_detail == NULL)
1350 return;
1351 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1352 mismatch_detail->data[0] = expected_num;
1353 }
1354
1355 static inline void
1356 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1357 const char* error)
1358 {
1359 if (mismatch_detail == NULL)
1360 return;
1361 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1362 }
1363
1364 /* General constraint checking based on operand code.
1365
1366 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1367 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1368
1369 This function has to be called after the qualifiers for all operands
1370 have been resolved.
1371
1372 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1373 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1374 of error message during the disassembling where error message is not
1375 wanted. We avoid the dynamic construction of strings of error messages
1376 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1377 use a combination of error code, static string and some integer data to
1378 represent an error. */
1379
1380 static int
1381 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1382 enum aarch64_opnd type,
1383 const aarch64_opcode *opcode,
1384 aarch64_operand_error *mismatch_detail)
1385 {
1386 unsigned num, modifiers, shift;
1387 unsigned char size;
1388 int64_t imm, min_value, max_value;
1389 uint64_t uvalue, mask;
1390 const aarch64_opnd_info *opnd = opnds + idx;
1391 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1392
1393 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1394
1395 switch (aarch64_operands[type].op_class)
1396 {
1397 case AARCH64_OPND_CLASS_INT_REG:
1398 /* Check pair reg constraints for cas* instructions. */
1399 if (type == AARCH64_OPND_PAIRREG)
1400 {
1401 assert (idx == 1 || idx == 3);
1402 if (opnds[idx - 1].reg.regno % 2 != 0)
1403 {
1404 set_syntax_error (mismatch_detail, idx - 1,
1405 _("reg pair must start from even reg"));
1406 return 0;
1407 }
1408 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1409 {
1410 set_syntax_error (mismatch_detail, idx,
1411 _("reg pair must be contiguous"));
1412 return 0;
1413 }
1414 break;
1415 }
1416
1417 /* <Xt> may be optional in some IC and TLBI instructions. */
1418 if (type == AARCH64_OPND_Rt_SYS)
1419 {
1420 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1421 == AARCH64_OPND_CLASS_SYSTEM));
1422 if (opnds[1].present
1423 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1424 {
1425 set_other_error (mismatch_detail, idx, _("extraneous register"));
1426 return 0;
1427 }
1428 if (!opnds[1].present
1429 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1430 {
1431 set_other_error (mismatch_detail, idx, _("missing register"));
1432 return 0;
1433 }
1434 }
1435 switch (qualifier)
1436 {
1437 case AARCH64_OPND_QLF_WSP:
1438 case AARCH64_OPND_QLF_SP:
1439 if (!aarch64_stack_pointer_p (opnd))
1440 {
1441 set_other_error (mismatch_detail, idx,
1442 _("stack pointer register expected"));
1443 return 0;
1444 }
1445 break;
1446 default:
1447 break;
1448 }
1449 break;
1450
1451 case AARCH64_OPND_CLASS_SVE_REG:
1452 switch (type)
1453 {
1454 case AARCH64_OPND_SVE_Zn_INDEX:
1455 size = aarch64_get_qualifier_esize (opnd->qualifier);
1456 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1457 {
1458 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1459 0, 64 / size - 1);
1460 return 0;
1461 }
1462 break;
1463
1464 case AARCH64_OPND_SVE_ZnxN:
1465 case AARCH64_OPND_SVE_ZtxN:
1466 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1467 {
1468 set_other_error (mismatch_detail, idx,
1469 _("invalid register list"));
1470 return 0;
1471 }
1472 break;
1473
1474 default:
1475 break;
1476 }
1477 break;
1478
1479 case AARCH64_OPND_CLASS_PRED_REG:
1480 if (opnd->reg.regno >= 8
1481 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1482 {
1483 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1484 return 0;
1485 }
1486 break;
1487
1488 case AARCH64_OPND_CLASS_COND:
1489 if (type == AARCH64_OPND_COND1
1490 && (opnds[idx].cond->value & 0xe) == 0xe)
1491 {
1492 /* Not allow AL or NV. */
1493 set_syntax_error (mismatch_detail, idx, NULL);
1494 }
1495 break;
1496
1497 case AARCH64_OPND_CLASS_ADDRESS:
1498 /* Check writeback. */
1499 switch (opcode->iclass)
1500 {
1501 case ldst_pos:
1502 case ldst_unscaled:
1503 case ldstnapair_offs:
1504 case ldstpair_off:
1505 case ldst_unpriv:
1506 if (opnd->addr.writeback == 1)
1507 {
1508 set_syntax_error (mismatch_detail, idx,
1509 _("unexpected address writeback"));
1510 return 0;
1511 }
1512 break;
1513 case ldst_imm9:
1514 case ldstpair_indexed:
1515 case asisdlsep:
1516 case asisdlsop:
1517 if (opnd->addr.writeback == 0)
1518 {
1519 set_syntax_error (mismatch_detail, idx,
1520 _("address writeback expected"));
1521 return 0;
1522 }
1523 break;
1524 default:
1525 assert (opnd->addr.writeback == 0);
1526 break;
1527 }
1528 switch (type)
1529 {
1530 case AARCH64_OPND_ADDR_SIMM7:
1531 /* Scaled signed 7 bits immediate offset. */
1532 /* Get the size of the data element that is accessed, which may be
1533 different from that of the source register size,
1534 e.g. in strb/ldrb. */
1535 size = aarch64_get_qualifier_esize (opnd->qualifier);
1536 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1537 {
1538 set_offset_out_of_range_error (mismatch_detail, idx,
1539 -64 * size, 63 * size);
1540 return 0;
1541 }
1542 if (!value_aligned_p (opnd->addr.offset.imm, size))
1543 {
1544 set_unaligned_error (mismatch_detail, idx, size);
1545 return 0;
1546 }
1547 break;
1548 case AARCH64_OPND_ADDR_SIMM9:
1549 /* Unscaled signed 9 bits immediate offset. */
1550 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1551 {
1552 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1553 return 0;
1554 }
1555 break;
1556
1557 case AARCH64_OPND_ADDR_SIMM9_2:
1558 /* Unscaled signed 9 bits immediate offset, which has to be negative
1559 or unaligned. */
1560 size = aarch64_get_qualifier_esize (qualifier);
1561 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1562 && !value_aligned_p (opnd->addr.offset.imm, size))
1563 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1564 return 1;
1565 set_other_error (mismatch_detail, idx,
1566 _("negative or unaligned offset expected"));
1567 return 0;
1568
1569 case AARCH64_OPND_SIMD_ADDR_POST:
1570 /* AdvSIMD load/store multiple structures, post-index. */
1571 assert (idx == 1);
1572 if (opnd->addr.offset.is_reg)
1573 {
1574 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1575 return 1;
1576 else
1577 {
1578 set_other_error (mismatch_detail, idx,
1579 _("invalid register offset"));
1580 return 0;
1581 }
1582 }
1583 else
1584 {
1585 const aarch64_opnd_info *prev = &opnds[idx-1];
1586 unsigned num_bytes; /* total number of bytes transferred. */
1587 /* The opcode dependent area stores the number of elements in
1588 each structure to be loaded/stored. */
1589 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1590 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1591 /* Special handling of loading single structure to all lane. */
1592 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1593 * aarch64_get_qualifier_esize (prev->qualifier);
1594 else
1595 num_bytes = prev->reglist.num_regs
1596 * aarch64_get_qualifier_esize (prev->qualifier)
1597 * aarch64_get_qualifier_nelem (prev->qualifier);
1598 if ((int) num_bytes != opnd->addr.offset.imm)
1599 {
1600 set_other_error (mismatch_detail, idx,
1601 _("invalid post-increment amount"));
1602 return 0;
1603 }
1604 }
1605 break;
1606
1607 case AARCH64_OPND_ADDR_REGOFF:
1608 /* Get the size of the data element that is accessed, which may be
1609 different from that of the source register size,
1610 e.g. in strb/ldrb. */
1611 size = aarch64_get_qualifier_esize (opnd->qualifier);
1612 /* It is either no shift or shift by the binary logarithm of SIZE. */
1613 if (opnd->shifter.amount != 0
1614 && opnd->shifter.amount != (int)get_logsz (size))
1615 {
1616 set_other_error (mismatch_detail, idx,
1617 _("invalid shift amount"));
1618 return 0;
1619 }
1620 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1621 operators. */
1622 switch (opnd->shifter.kind)
1623 {
1624 case AARCH64_MOD_UXTW:
1625 case AARCH64_MOD_LSL:
1626 case AARCH64_MOD_SXTW:
1627 case AARCH64_MOD_SXTX: break;
1628 default:
1629 set_other_error (mismatch_detail, idx,
1630 _("invalid extend/shift operator"));
1631 return 0;
1632 }
1633 break;
1634
1635 case AARCH64_OPND_ADDR_UIMM12:
1636 imm = opnd->addr.offset.imm;
1637 /* Get the size of the data element that is accessed, which may be
1638 different from that of the source register size,
1639 e.g. in strb/ldrb. */
1640 size = aarch64_get_qualifier_esize (qualifier);
1641 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1642 {
1643 set_offset_out_of_range_error (mismatch_detail, idx,
1644 0, 4095 * size);
1645 return 0;
1646 }
1647 if (!value_aligned_p (opnd->addr.offset.imm, size))
1648 {
1649 set_unaligned_error (mismatch_detail, idx, size);
1650 return 0;
1651 }
1652 break;
1653
1654 case AARCH64_OPND_ADDR_PCREL14:
1655 case AARCH64_OPND_ADDR_PCREL19:
1656 case AARCH64_OPND_ADDR_PCREL21:
1657 case AARCH64_OPND_ADDR_PCREL26:
1658 imm = opnd->imm.value;
1659 if (operand_need_shift_by_two (get_operand_from_code (type)))
1660 {
1661 /* The offset value in a PC-relative branch instruction is alway
1662 4-byte aligned and is encoded without the lowest 2 bits. */
1663 if (!value_aligned_p (imm, 4))
1664 {
1665 set_unaligned_error (mismatch_detail, idx, 4);
1666 return 0;
1667 }
1668 /* Right shift by 2 so that we can carry out the following check
1669 canonically. */
1670 imm >>= 2;
1671 }
1672 size = get_operand_fields_width (get_operand_from_code (type));
1673 if (!value_fit_signed_field_p (imm, size))
1674 {
1675 set_other_error (mismatch_detail, idx,
1676 _("immediate out of range"));
1677 return 0;
1678 }
1679 break;
1680
1681 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1682 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1683 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1684 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1685 min_value = -8;
1686 max_value = 7;
1687 sve_imm_offset_vl:
1688 assert (!opnd->addr.offset.is_reg);
1689 assert (opnd->addr.preind);
1690 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1691 min_value *= num;
1692 max_value *= num;
1693 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1694 || (opnd->shifter.operator_present
1695 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1696 {
1697 set_other_error (mismatch_detail, idx,
1698 _("invalid addressing mode"));
1699 return 0;
1700 }
1701 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1702 {
1703 set_offset_out_of_range_error (mismatch_detail, idx,
1704 min_value, max_value);
1705 return 0;
1706 }
1707 if (!value_aligned_p (opnd->addr.offset.imm, num))
1708 {
1709 set_unaligned_error (mismatch_detail, idx, num);
1710 return 0;
1711 }
1712 break;
1713
1714 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1715 min_value = -32;
1716 max_value = 31;
1717 goto sve_imm_offset_vl;
1718
1719 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1720 min_value = -256;
1721 max_value = 255;
1722 goto sve_imm_offset_vl;
1723
1724 case AARCH64_OPND_SVE_ADDR_RI_U6:
1725 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1726 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1727 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1728 min_value = 0;
1729 max_value = 63;
1730 sve_imm_offset:
1731 assert (!opnd->addr.offset.is_reg);
1732 assert (opnd->addr.preind);
1733 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1734 min_value *= num;
1735 max_value *= num;
1736 if (opnd->shifter.operator_present
1737 || opnd->shifter.amount_present)
1738 {
1739 set_other_error (mismatch_detail, idx,
1740 _("invalid addressing mode"));
1741 return 0;
1742 }
1743 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1744 {
1745 set_offset_out_of_range_error (mismatch_detail, idx,
1746 min_value, max_value);
1747 return 0;
1748 }
1749 if (!value_aligned_p (opnd->addr.offset.imm, num))
1750 {
1751 set_unaligned_error (mismatch_detail, idx, num);
1752 return 0;
1753 }
1754 break;
1755
1756 case AARCH64_OPND_SVE_ADDR_RR:
1757 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1758 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1759 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1760 case AARCH64_OPND_SVE_ADDR_RX:
1761 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1762 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1763 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1764 case AARCH64_OPND_SVE_ADDR_RZ:
1765 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1766 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1767 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1768 modifiers = 1 << AARCH64_MOD_LSL;
1769 sve_rr_operand:
1770 assert (opnd->addr.offset.is_reg);
1771 assert (opnd->addr.preind);
1772 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1773 && opnd->addr.offset.regno == 31)
1774 {
1775 set_other_error (mismatch_detail, idx,
1776 _("index register xzr is not allowed"));
1777 return 0;
1778 }
1779 if (((1 << opnd->shifter.kind) & modifiers) == 0
1780 || (opnd->shifter.amount
1781 != get_operand_specific_data (&aarch64_operands[type])))
1782 {
1783 set_other_error (mismatch_detail, idx,
1784 _("invalid addressing mode"));
1785 return 0;
1786 }
1787 break;
1788
1789 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1790 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1791 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1792 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1793 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1794 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1795 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1796 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1797 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1798 goto sve_rr_operand;
1799
1800 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1801 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1802 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1803 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1804 min_value = 0;
1805 max_value = 31;
1806 goto sve_imm_offset;
1807
1808 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1809 modifiers = 1 << AARCH64_MOD_LSL;
1810 sve_zz_operand:
1811 assert (opnd->addr.offset.is_reg);
1812 assert (opnd->addr.preind);
1813 if (((1 << opnd->shifter.kind) & modifiers) == 0
1814 || opnd->shifter.amount < 0
1815 || opnd->shifter.amount > 3)
1816 {
1817 set_other_error (mismatch_detail, idx,
1818 _("invalid addressing mode"));
1819 return 0;
1820 }
1821 break;
1822
1823 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1824 modifiers = (1 << AARCH64_MOD_SXTW);
1825 goto sve_zz_operand;
1826
1827 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1828 modifiers = 1 << AARCH64_MOD_UXTW;
1829 goto sve_zz_operand;
1830
1831 default:
1832 break;
1833 }
1834 break;
1835
1836 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1837 if (type == AARCH64_OPND_LEt)
1838 {
1839 /* Get the upper bound for the element index. */
1840 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1841 if (!value_in_range_p (opnd->reglist.index, 0, num))
1842 {
1843 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1844 return 0;
1845 }
1846 }
1847 /* The opcode dependent area stores the number of elements in
1848 each structure to be loaded/stored. */
1849 num = get_opcode_dependent_value (opcode);
1850 switch (type)
1851 {
1852 case AARCH64_OPND_LVt:
1853 assert (num >= 1 && num <= 4);
1854 /* Unless LD1/ST1, the number of registers should be equal to that
1855 of the structure elements. */
1856 if (num != 1 && opnd->reglist.num_regs != num)
1857 {
1858 set_reg_list_error (mismatch_detail, idx, num);
1859 return 0;
1860 }
1861 break;
1862 case AARCH64_OPND_LVt_AL:
1863 case AARCH64_OPND_LEt:
1864 assert (num >= 1 && num <= 4);
1865 /* The number of registers should be equal to that of the structure
1866 elements. */
1867 if (opnd->reglist.num_regs != num)
1868 {
1869 set_reg_list_error (mismatch_detail, idx, num);
1870 return 0;
1871 }
1872 break;
1873 default:
1874 break;
1875 }
1876 break;
1877
1878 case AARCH64_OPND_CLASS_IMMEDIATE:
1879 /* Constraint check on immediate operand. */
1880 imm = opnd->imm.value;
1881 /* E.g. imm_0_31 constrains value to be 0..31. */
1882 if (qualifier_value_in_range_constraint_p (qualifier)
1883 && !value_in_range_p (imm, get_lower_bound (qualifier),
1884 get_upper_bound (qualifier)))
1885 {
1886 set_imm_out_of_range_error (mismatch_detail, idx,
1887 get_lower_bound (qualifier),
1888 get_upper_bound (qualifier));
1889 return 0;
1890 }
1891
1892 switch (type)
1893 {
1894 case AARCH64_OPND_AIMM:
1895 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1896 {
1897 set_other_error (mismatch_detail, idx,
1898 _("invalid shift operator"));
1899 return 0;
1900 }
1901 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1902 {
1903 set_other_error (mismatch_detail, idx,
1904 _("shift amount expected to be 0 or 12"));
1905 return 0;
1906 }
1907 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1908 {
1909 set_other_error (mismatch_detail, idx,
1910 _("immediate out of range"));
1911 return 0;
1912 }
1913 break;
1914
1915 case AARCH64_OPND_HALF:
1916 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1917 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1918 {
1919 set_other_error (mismatch_detail, idx,
1920 _("invalid shift operator"));
1921 return 0;
1922 }
1923 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1924 if (!value_aligned_p (opnd->shifter.amount, 16))
1925 {
1926 set_other_error (mismatch_detail, idx,
1927 _("shift amount should be a multiple of 16"));
1928 return 0;
1929 }
1930 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1931 {
1932 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1933 0, size * 8 - 16);
1934 return 0;
1935 }
1936 if (opnd->imm.value < 0)
1937 {
1938 set_other_error (mismatch_detail, idx,
1939 _("negative immediate value not allowed"));
1940 return 0;
1941 }
1942 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1943 {
1944 set_other_error (mismatch_detail, idx,
1945 _("immediate out of range"));
1946 return 0;
1947 }
1948 break;
1949
1950 case AARCH64_OPND_IMM_MOV:
1951 {
1952 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1953 imm = opnd->imm.value;
1954 assert (idx == 1);
1955 switch (opcode->op)
1956 {
1957 case OP_MOV_IMM_WIDEN:
1958 imm = ~imm;
1959 /* Fall through... */
1960 case OP_MOV_IMM_WIDE:
1961 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1962 {
1963 set_other_error (mismatch_detail, idx,
1964 _("immediate out of range"));
1965 return 0;
1966 }
1967 break;
1968 case OP_MOV_IMM_LOG:
1969 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1970 {
1971 set_other_error (mismatch_detail, idx,
1972 _("immediate out of range"));
1973 return 0;
1974 }
1975 break;
1976 default:
1977 assert (0);
1978 return 0;
1979 }
1980 }
1981 break;
1982
1983 case AARCH64_OPND_NZCV:
1984 case AARCH64_OPND_CCMP_IMM:
1985 case AARCH64_OPND_EXCEPTION:
1986 case AARCH64_OPND_UIMM4:
1987 case AARCH64_OPND_UIMM7:
1988 case AARCH64_OPND_UIMM3_OP1:
1989 case AARCH64_OPND_UIMM3_OP2:
1990 case AARCH64_OPND_SVE_UIMM3:
1991 case AARCH64_OPND_SVE_UIMM7:
1992 case AARCH64_OPND_SVE_UIMM8:
1993 case AARCH64_OPND_SVE_UIMM8_53:
1994 size = get_operand_fields_width (get_operand_from_code (type));
1995 assert (size < 32);
1996 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1997 {
1998 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1999 (1 << size) - 1);
2000 return 0;
2001 }
2002 break;
2003
2004 case AARCH64_OPND_SIMM5:
2005 case AARCH64_OPND_SVE_SIMM5:
2006 case AARCH64_OPND_SVE_SIMM5B:
2007 case AARCH64_OPND_SVE_SIMM6:
2008 case AARCH64_OPND_SVE_SIMM8:
2009 size = get_operand_fields_width (get_operand_from_code (type));
2010 assert (size < 32);
2011 if (!value_fit_signed_field_p (opnd->imm.value, size))
2012 {
2013 set_imm_out_of_range_error (mismatch_detail, idx,
2014 -(1 << (size - 1)),
2015 (1 << (size - 1)) - 1);
2016 return 0;
2017 }
2018 break;
2019
2020 case AARCH64_OPND_WIDTH:
2021 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2022 && opnds[0].type == AARCH64_OPND_Rd);
2023 size = get_upper_bound (qualifier);
2024 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2025 /* lsb+width <= reg.size */
2026 {
2027 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2028 size - opnds[idx-1].imm.value);
2029 return 0;
2030 }
2031 break;
2032
2033 case AARCH64_OPND_LIMM:
2034 case AARCH64_OPND_SVE_LIMM:
2035 {
2036 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2037 uint64_t uimm = opnd->imm.value;
2038 if (opcode->op == OP_BIC)
2039 uimm = ~uimm;
2040 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
2041 {
2042 set_other_error (mismatch_detail, idx,
2043 _("immediate out of range"));
2044 return 0;
2045 }
2046 }
2047 break;
2048
2049 case AARCH64_OPND_IMM0:
2050 case AARCH64_OPND_FPIMM0:
2051 if (opnd->imm.value != 0)
2052 {
2053 set_other_error (mismatch_detail, idx,
2054 _("immediate zero expected"));
2055 return 0;
2056 }
2057 break;
2058
2059 case AARCH64_OPND_SHLL_IMM:
2060 assert (idx == 2);
2061 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2062 if (opnd->imm.value != size)
2063 {
2064 set_other_error (mismatch_detail, idx,
2065 _("invalid shift amount"));
2066 return 0;
2067 }
2068 break;
2069
2070 case AARCH64_OPND_IMM_VLSL:
2071 size = aarch64_get_qualifier_esize (qualifier);
2072 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2073 {
2074 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2075 size * 8 - 1);
2076 return 0;
2077 }
2078 break;
2079
2080 case AARCH64_OPND_IMM_VLSR:
2081 size = aarch64_get_qualifier_esize (qualifier);
2082 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2083 {
2084 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2085 return 0;
2086 }
2087 break;
2088
2089 case AARCH64_OPND_SIMD_IMM:
2090 case AARCH64_OPND_SIMD_IMM_SFT:
2091 /* Qualifier check. */
2092 switch (qualifier)
2093 {
2094 case AARCH64_OPND_QLF_LSL:
2095 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2096 {
2097 set_other_error (mismatch_detail, idx,
2098 _("invalid shift operator"));
2099 return 0;
2100 }
2101 break;
2102 case AARCH64_OPND_QLF_MSL:
2103 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2104 {
2105 set_other_error (mismatch_detail, idx,
2106 _("invalid shift operator"));
2107 return 0;
2108 }
2109 break;
2110 case AARCH64_OPND_QLF_NIL:
2111 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2112 {
2113 set_other_error (mismatch_detail, idx,
2114 _("shift is not permitted"));
2115 return 0;
2116 }
2117 break;
2118 default:
2119 assert (0);
2120 return 0;
2121 }
2122 /* Is the immediate valid? */
2123 assert (idx == 1);
2124 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2125 {
2126 /* uimm8 or simm8 */
2127 if (!value_in_range_p (opnd->imm.value, -128, 255))
2128 {
2129 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2130 return 0;
2131 }
2132 }
2133 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2134 {
2135 /* uimm64 is not
2136 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2137 ffffffffgggggggghhhhhhhh'. */
2138 set_other_error (mismatch_detail, idx,
2139 _("invalid value for immediate"));
2140 return 0;
2141 }
2142 /* Is the shift amount valid? */
2143 switch (opnd->shifter.kind)
2144 {
2145 case AARCH64_MOD_LSL:
2146 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2147 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2148 {
2149 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2150 (size - 1) * 8);
2151 return 0;
2152 }
2153 if (!value_aligned_p (opnd->shifter.amount, 8))
2154 {
2155 set_unaligned_error (mismatch_detail, idx, 8);
2156 return 0;
2157 }
2158 break;
2159 case AARCH64_MOD_MSL:
2160 /* Only 8 and 16 are valid shift amount. */
2161 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2162 {
2163 set_other_error (mismatch_detail, idx,
2164 _("shift amount expected to be 0 or 16"));
2165 return 0;
2166 }
2167 break;
2168 default:
2169 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2170 {
2171 set_other_error (mismatch_detail, idx,
2172 _("invalid shift operator"));
2173 return 0;
2174 }
2175 break;
2176 }
2177 break;
2178
2179 case AARCH64_OPND_FPIMM:
2180 case AARCH64_OPND_SIMD_FPIMM:
2181 if (opnd->imm.is_fp == 0)
2182 {
2183 set_other_error (mismatch_detail, idx,
2184 _("floating-point immediate expected"));
2185 return 0;
2186 }
2187 /* The value is expected to be an 8-bit floating-point constant with
2188 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2189 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2190 instruction). */
2191 if (!value_in_range_p (opnd->imm.value, 0, 255))
2192 {
2193 set_other_error (mismatch_detail, idx,
2194 _("immediate out of range"));
2195 return 0;
2196 }
2197 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2198 {
2199 set_other_error (mismatch_detail, idx,
2200 _("invalid shift operator"));
2201 return 0;
2202 }
2203 break;
2204
2205 case AARCH64_OPND_SVE_AIMM:
2206 min_value = 0;
2207 sve_aimm:
2208 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2209 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2210 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2211 uvalue = opnd->imm.value;
2212 shift = opnd->shifter.amount;
2213 if (size == 1)
2214 {
2215 if (shift != 0)
2216 {
2217 set_other_error (mismatch_detail, idx,
2218 _("no shift amount allowed for"
2219 " 8-bit constants"));
2220 return 0;
2221 }
2222 }
2223 else
2224 {
2225 if (shift != 0 && shift != 8)
2226 {
2227 set_other_error (mismatch_detail, idx,
2228 _("shift amount must be 0 or 8"));
2229 return 0;
2230 }
2231 if (shift == 0 && (uvalue & 0xff) == 0)
2232 {
2233 shift = 8;
2234 uvalue = (int64_t) uvalue / 256;
2235 }
2236 }
2237 mask >>= shift;
2238 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2239 {
2240 set_other_error (mismatch_detail, idx,
2241 _("immediate too big for element size"));
2242 return 0;
2243 }
2244 uvalue = (uvalue - min_value) & mask;
2245 if (uvalue > 0xff)
2246 {
2247 set_other_error (mismatch_detail, idx,
2248 _("invalid arithmetic immediate"));
2249 return 0;
2250 }
2251 break;
2252
2253 case AARCH64_OPND_SVE_ASIMM:
2254 min_value = -128;
2255 goto sve_aimm;
2256
2257 case AARCH64_OPND_SVE_INV_LIMM:
2258 {
2259 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2260 uint64_t uimm = ~opnd->imm.value;
2261 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2262 {
2263 set_other_error (mismatch_detail, idx,
2264 _("immediate out of range"));
2265 return 0;
2266 }
2267 }
2268 break;
2269
2270 case AARCH64_OPND_SVE_LIMM_MOV:
2271 {
2272 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2273 uint64_t uimm = opnd->imm.value;
2274 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2275 {
2276 set_other_error (mismatch_detail, idx,
2277 _("immediate out of range"));
2278 return 0;
2279 }
2280 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2281 {
2282 set_other_error (mismatch_detail, idx,
2283 _("invalid replicated MOV immediate"));
2284 return 0;
2285 }
2286 }
2287 break;
2288
2289 case AARCH64_OPND_SVE_PATTERN_SCALED:
2290 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2291 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2292 {
2293 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2294 return 0;
2295 }
2296 break;
2297
2298 case AARCH64_OPND_SVE_SHLIMM_PRED:
2299 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2300 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2301 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2302 {
2303 set_imm_out_of_range_error (mismatch_detail, idx,
2304 0, 8 * size - 1);
2305 return 0;
2306 }
2307 break;
2308
2309 case AARCH64_OPND_SVE_SHRIMM_PRED:
2310 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2311 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2312 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2313 {
2314 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2315 return 0;
2316 }
2317 break;
2318
2319 default:
2320 break;
2321 }
2322 break;
2323
2324 case AARCH64_OPND_CLASS_CP_REG:
2325 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2326 valid range: C0 - C15. */
2327 if (opnd->reg.regno > 15)
2328 {
2329 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2330 return 0;
2331 }
2332 break;
2333
2334 case AARCH64_OPND_CLASS_SYSTEM:
2335 switch (type)
2336 {
2337 case AARCH64_OPND_PSTATEFIELD:
2338 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2339 /* MSR UAO, #uimm4
2340 MSR PAN, #uimm4
2341 The immediate must be #0 or #1. */
2342 if ((opnd->pstatefield == 0x03 /* UAO. */
2343 || opnd->pstatefield == 0x04) /* PAN. */
2344 && opnds[1].imm.value > 1)
2345 {
2346 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2347 return 0;
2348 }
2349 /* MSR SPSel, #uimm4
2350 Uses uimm4 as a control value to select the stack pointer: if
2351 bit 0 is set it selects the current exception level's stack
2352 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2353 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2354 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2355 {
2356 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2357 return 0;
2358 }
2359 break;
2360 default:
2361 break;
2362 }
2363 break;
2364
2365 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2366 /* Get the upper bound for the element index. */
2367 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2368 /* Index out-of-range. */
2369 if (!value_in_range_p (opnd->reglane.index, 0, num))
2370 {
2371 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2372 return 0;
2373 }
2374 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2375 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2376 number is encoded in "size:M:Rm":
2377 size <Vm>
2378 00 RESERVED
2379 01 0:Rm
2380 10 M:Rm
2381 11 RESERVED */
2382 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2383 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2384 {
2385 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2386 return 0;
2387 }
2388 break;
2389
2390 case AARCH64_OPND_CLASS_MODIFIED_REG:
2391 assert (idx == 1 || idx == 2);
2392 switch (type)
2393 {
2394 case AARCH64_OPND_Rm_EXT:
2395 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2396 && opnd->shifter.kind != AARCH64_MOD_LSL)
2397 {
2398 set_other_error (mismatch_detail, idx,
2399 _("extend operator expected"));
2400 return 0;
2401 }
2402 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2403 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2404 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2405 case. */
2406 if (!aarch64_stack_pointer_p (opnds + 0)
2407 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2408 {
2409 if (!opnd->shifter.operator_present)
2410 {
2411 set_other_error (mismatch_detail, idx,
2412 _("missing extend operator"));
2413 return 0;
2414 }
2415 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2416 {
2417 set_other_error (mismatch_detail, idx,
2418 _("'LSL' operator not allowed"));
2419 return 0;
2420 }
2421 }
2422 assert (opnd->shifter.operator_present /* Default to LSL. */
2423 || opnd->shifter.kind == AARCH64_MOD_LSL);
2424 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2425 {
2426 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2427 return 0;
2428 }
2429 /* In the 64-bit form, the final register operand is written as Wm
2430 for all but the (possibly omitted) UXTX/LSL and SXTX
2431 operators.
2432 N.B. GAS allows X register to be used with any operator as a
2433 programming convenience. */
2434 if (qualifier == AARCH64_OPND_QLF_X
2435 && opnd->shifter.kind != AARCH64_MOD_LSL
2436 && opnd->shifter.kind != AARCH64_MOD_UXTX
2437 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2438 {
2439 set_other_error (mismatch_detail, idx, _("W register expected"));
2440 return 0;
2441 }
2442 break;
2443
2444 case AARCH64_OPND_Rm_SFT:
2445 /* ROR is not available to the shifted register operand in
2446 arithmetic instructions. */
2447 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2448 {
2449 set_other_error (mismatch_detail, idx,
2450 _("shift operator expected"));
2451 return 0;
2452 }
2453 if (opnd->shifter.kind == AARCH64_MOD_ROR
2454 && opcode->iclass != log_shift)
2455 {
2456 set_other_error (mismatch_detail, idx,
2457 _("'ROR' operator not allowed"));
2458 return 0;
2459 }
2460 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2461 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2462 {
2463 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2464 return 0;
2465 }
2466 break;
2467
2468 default:
2469 break;
2470 }
2471 break;
2472
2473 default:
2474 break;
2475 }
2476
2477 return 1;
2478 }
2479
2480 /* Main entrypoint for the operand constraint checking.
2481
2482 Return 1 if operands of *INST meet the constraint applied by the operand
2483 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2484 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2485 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2486 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2487 error kind when it is notified that an instruction does not pass the check).
2488
2489 Un-determined operand qualifiers may get established during the process. */
2490
2491 int
2492 aarch64_match_operands_constraint (aarch64_inst *inst,
2493 aarch64_operand_error *mismatch_detail)
2494 {
2495 int i;
2496
2497 DEBUG_TRACE ("enter");
2498
2499 /* Check for cases where a source register needs to be the same as the
2500 destination register. Do this before matching qualifiers since if
2501 an instruction has both invalid tying and invalid qualifiers,
2502 the error about qualifiers would suggest several alternative
2503 instructions that also have invalid tying. */
2504 i = inst->opcode->tied_operand;
2505 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2506 {
2507 if (mismatch_detail)
2508 {
2509 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2510 mismatch_detail->index = i;
2511 mismatch_detail->error = NULL;
2512 }
2513 return 0;
2514 }
2515
2516 /* Match operands' qualifier.
2517 *INST has already had qualifier establish for some, if not all, of
2518 its operands; we need to find out whether these established
2519 qualifiers match one of the qualifier sequence in
2520 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2521 with the corresponding qualifier in such a sequence.
2522 Only basic operand constraint checking is done here; the more thorough
2523 constraint checking will carried out by operand_general_constraint_met_p,
2524 which has be to called after this in order to get all of the operands'
2525 qualifiers established. */
2526 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2527 {
2528 DEBUG_TRACE ("FAIL on operand qualifier matching");
2529 if (mismatch_detail)
2530 {
2531 /* Return an error type to indicate that it is the qualifier
2532 matching failure; we don't care about which operand as there
2533 are enough information in the opcode table to reproduce it. */
2534 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2535 mismatch_detail->index = -1;
2536 mismatch_detail->error = NULL;
2537 }
2538 return 0;
2539 }
2540
2541 /* Match operands' constraint. */
2542 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2543 {
2544 enum aarch64_opnd type = inst->opcode->operands[i];
2545 if (type == AARCH64_OPND_NIL)
2546 break;
2547 if (inst->operands[i].skip)
2548 {
2549 DEBUG_TRACE ("skip the incomplete operand %d", i);
2550 continue;
2551 }
2552 if (operand_general_constraint_met_p (inst->operands, i, type,
2553 inst->opcode, mismatch_detail) == 0)
2554 {
2555 DEBUG_TRACE ("FAIL on operand %d", i);
2556 return 0;
2557 }
2558 }
2559
2560 DEBUG_TRACE ("PASS");
2561
2562 return 1;
2563 }
2564
2565 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2566 Also updates the TYPE of each INST->OPERANDS with the corresponding
2567 value of OPCODE->OPERANDS.
2568
2569 Note that some operand qualifiers may need to be manually cleared by
2570 the caller before it further calls the aarch64_opcode_encode; by
2571 doing this, it helps the qualifier matching facilities work
2572 properly. */
2573
2574 const aarch64_opcode*
2575 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2576 {
2577 int i;
2578 const aarch64_opcode *old = inst->opcode;
2579
2580 inst->opcode = opcode;
2581
2582 /* Update the operand types. */
2583 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2584 {
2585 inst->operands[i].type = opcode->operands[i];
2586 if (opcode->operands[i] == AARCH64_OPND_NIL)
2587 break;
2588 }
2589
2590 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2591
2592 return old;
2593 }
2594
2595 int
2596 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2597 {
2598 int i;
2599 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2600 if (operands[i] == operand)
2601 return i;
2602 else if (operands[i] == AARCH64_OPND_NIL)
2603 break;
2604 return -1;
2605 }
2606 \f
2607 /* R0...R30, followed by FOR31. */
2608 #define BANK(R, FOR31) \
2609 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2610 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2611 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2612 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2613 /* [0][0] 32-bit integer regs with sp Wn
2614 [0][1] 64-bit integer regs with sp Xn sf=1
2615 [1][0] 32-bit integer regs with #0 Wn
2616 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2617 static const char *int_reg[2][2][32] = {
2618 #define R32(X) "w" #X
2619 #define R64(X) "x" #X
2620 { BANK (R32, "wsp"), BANK (R64, "sp") },
2621 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2622 #undef R64
2623 #undef R32
2624 };
2625
2626 /* Names of the SVE vector registers, first with .S suffixes,
2627 then with .D suffixes. */
2628
2629 static const char *sve_reg[2][32] = {
2630 #define ZS(X) "z" #X ".s"
2631 #define ZD(X) "z" #X ".d"
2632 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2633 #undef ZD
2634 #undef ZS
2635 };
2636 #undef BANK
2637
2638 /* Return the integer register name.
2639 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2640
2641 static inline const char *
2642 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2643 {
2644 const int has_zr = sp_reg_p ? 0 : 1;
2645 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2646 return int_reg[has_zr][is_64][regno];
2647 }
2648
2649 /* Like get_int_reg_name, but IS_64 is always 1. */
2650
2651 static inline const char *
2652 get_64bit_int_reg_name (int regno, int sp_reg_p)
2653 {
2654 const int has_zr = sp_reg_p ? 0 : 1;
2655 return int_reg[has_zr][1][regno];
2656 }
2657
2658 /* Get the name of the integer offset register in OPND, using the shift type
2659 to decide whether it's a word or doubleword. */
2660
2661 static inline const char *
2662 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2663 {
2664 switch (opnd->shifter.kind)
2665 {
2666 case AARCH64_MOD_UXTW:
2667 case AARCH64_MOD_SXTW:
2668 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2669
2670 case AARCH64_MOD_LSL:
2671 case AARCH64_MOD_SXTX:
2672 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2673
2674 default:
2675 abort ();
2676 }
2677 }
2678
2679 /* Get the name of the SVE vector offset register in OPND, using the operand
2680 qualifier to decide whether the suffix should be .S or .D. */
2681
2682 static inline const char *
2683 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2684 {
2685 assert (qualifier == AARCH64_OPND_QLF_S_S
2686 || qualifier == AARCH64_OPND_QLF_S_D);
2687 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2688 }
2689
2690 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2691
2692 typedef union
2693 {
2694 uint64_t i;
2695 double d;
2696 } double_conv_t;
2697
2698 typedef union
2699 {
2700 uint32_t i;
2701 float f;
2702 } single_conv_t;
2703
2704 typedef union
2705 {
2706 uint32_t i;
2707 float f;
2708 } half_conv_t;
2709
2710 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2711 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2712 (depending on the type of the instruction). IMM8 will be expanded to a
2713 single-precision floating-point value (SIZE == 4) or a double-precision
2714 floating-point value (SIZE == 8). A half-precision floating-point value
2715 (SIZE == 2) is expanded to a single-precision floating-point value. The
2716 expanded value is returned. */
2717
2718 static uint64_t
2719 expand_fp_imm (int size, uint32_t imm8)
2720 {
2721 uint64_t imm;
2722 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2723
2724 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2725 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2726 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2727 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2728 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2729 if (size == 8)
2730 {
2731 imm = (imm8_7 << (63-32)) /* imm8<7> */
2732 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2733 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2734 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2735 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2736 imm <<= 32;
2737 }
2738 else if (size == 4 || size == 2)
2739 {
2740 imm = (imm8_7 << 31) /* imm8<7> */
2741 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2742 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2743 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2744 }
2745 else
2746 {
2747 /* An unsupported size. */
2748 assert (0);
2749 }
2750
2751 return imm;
2752 }
2753
2754 /* Produce the string representation of the register list operand *OPND
2755 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2756 the register name that comes before the register number, such as "v". */
2757 static void
2758 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2759 const char *prefix)
2760 {
2761 const int num_regs = opnd->reglist.num_regs;
2762 const int first_reg = opnd->reglist.first_regno;
2763 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2764 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2765 char tb[8]; /* Temporary buffer. */
2766
2767 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2768 assert (num_regs >= 1 && num_regs <= 4);
2769
2770 /* Prepare the index if any. */
2771 if (opnd->reglist.has_index)
2772 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2773 else
2774 tb[0] = '\0';
2775
2776 /* The hyphenated form is preferred for disassembly if there are
2777 more than two registers in the list, and the register numbers
2778 are monotonically increasing in increments of one. */
2779 if (num_regs > 2 && last_reg > first_reg)
2780 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2781 prefix, last_reg, qlf_name, tb);
2782 else
2783 {
2784 const int reg0 = first_reg;
2785 const int reg1 = (first_reg + 1) & 0x1f;
2786 const int reg2 = (first_reg + 2) & 0x1f;
2787 const int reg3 = (first_reg + 3) & 0x1f;
2788
2789 switch (num_regs)
2790 {
2791 case 1:
2792 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2793 break;
2794 case 2:
2795 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2796 prefix, reg1, qlf_name, tb);
2797 break;
2798 case 3:
2799 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2800 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2801 prefix, reg2, qlf_name, tb);
2802 break;
2803 case 4:
2804 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2805 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2806 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2807 break;
2808 }
2809 }
2810 }
2811
2812 /* Print the register+immediate address in OPND to BUF, which has SIZE
2813 characters. BASE is the name of the base register. */
2814
2815 static void
2816 print_immediate_offset_address (char *buf, size_t size,
2817 const aarch64_opnd_info *opnd,
2818 const char *base)
2819 {
2820 if (opnd->addr.writeback)
2821 {
2822 if (opnd->addr.preind)
2823 snprintf (buf, size, "[%s,#%d]!", base, opnd->addr.offset.imm);
2824 else
2825 snprintf (buf, size, "[%s],#%d", base, opnd->addr.offset.imm);
2826 }
2827 else
2828 {
2829 if (opnd->shifter.operator_present)
2830 {
2831 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2832 snprintf (buf, size, "[%s,#%d,mul vl]",
2833 base, opnd->addr.offset.imm);
2834 }
2835 else if (opnd->addr.offset.imm)
2836 snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
2837 else
2838 snprintf (buf, size, "[%s]", base);
2839 }
2840 }
2841
2842 /* Produce the string representation of the register offset address operand
2843 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2844 the names of the base and offset registers. */
2845 static void
2846 print_register_offset_address (char *buf, size_t size,
2847 const aarch64_opnd_info *opnd,
2848 const char *base, const char *offset)
2849 {
2850 char tb[16]; /* Temporary buffer. */
2851 bfd_boolean print_extend_p = TRUE;
2852 bfd_boolean print_amount_p = TRUE;
2853 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2854
2855 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2856 || !opnd->shifter.amount_present))
2857 {
2858 /* Not print the shift/extend amount when the amount is zero and
2859 when it is not the special case of 8-bit load/store instruction. */
2860 print_amount_p = FALSE;
2861 /* Likewise, no need to print the shift operator LSL in such a
2862 situation. */
2863 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2864 print_extend_p = FALSE;
2865 }
2866
2867 /* Prepare for the extend/shift. */
2868 if (print_extend_p)
2869 {
2870 if (print_amount_p)
2871 snprintf (tb, sizeof (tb), ",%s #%" PRIi64, shift_name,
2872 opnd->shifter.amount);
2873 else
2874 snprintf (tb, sizeof (tb), ",%s", shift_name);
2875 }
2876 else
2877 tb[0] = '\0';
2878
2879 snprintf (buf, size, "[%s,%s%s]", base, offset, tb);
2880 }
2881
2882 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2883 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2884 PC, PCREL_P and ADDRESS are used to pass in and return information about
2885 the PC-relative address calculation, where the PC value is passed in
2886 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2887 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2888 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2889
2890 The function serves both the disassembler and the assembler diagnostics
2891 issuer, which is the reason why it lives in this file. */
2892
2893 void
2894 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2895 const aarch64_opcode *opcode,
2896 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2897 bfd_vma *address)
2898 {
2899 int i;
2900 const char *name = NULL;
2901 const aarch64_opnd_info *opnd = opnds + idx;
2902 enum aarch64_modifier_kind kind;
2903 uint64_t addr, enum_value;
2904
2905 buf[0] = '\0';
2906 if (pcrel_p)
2907 *pcrel_p = 0;
2908
2909 switch (opnd->type)
2910 {
2911 case AARCH64_OPND_Rd:
2912 case AARCH64_OPND_Rn:
2913 case AARCH64_OPND_Rm:
2914 case AARCH64_OPND_Rt:
2915 case AARCH64_OPND_Rt2:
2916 case AARCH64_OPND_Rs:
2917 case AARCH64_OPND_Ra:
2918 case AARCH64_OPND_Rt_SYS:
2919 case AARCH64_OPND_PAIRREG:
2920 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2921 the <ic_op>, therefore we we use opnd->present to override the
2922 generic optional-ness information. */
2923 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2924 break;
2925 /* Omit the operand, e.g. RET. */
2926 if (optional_operand_p (opcode, idx)
2927 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2928 break;
2929 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2930 || opnd->qualifier == AARCH64_OPND_QLF_X);
2931 snprintf (buf, size, "%s",
2932 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2933 break;
2934
2935 case AARCH64_OPND_Rd_SP:
2936 case AARCH64_OPND_Rn_SP:
2937 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2938 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2939 || opnd->qualifier == AARCH64_OPND_QLF_X
2940 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2941 snprintf (buf, size, "%s",
2942 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2943 break;
2944
2945 case AARCH64_OPND_Rm_EXT:
2946 kind = opnd->shifter.kind;
2947 assert (idx == 1 || idx == 2);
2948 if ((aarch64_stack_pointer_p (opnds)
2949 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2950 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2951 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2952 && kind == AARCH64_MOD_UXTW)
2953 || (opnd->qualifier == AARCH64_OPND_QLF_X
2954 && kind == AARCH64_MOD_UXTX)))
2955 {
2956 /* 'LSL' is the preferred form in this case. */
2957 kind = AARCH64_MOD_LSL;
2958 if (opnd->shifter.amount == 0)
2959 {
2960 /* Shifter omitted. */
2961 snprintf (buf, size, "%s",
2962 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2963 break;
2964 }
2965 }
2966 if (opnd->shifter.amount)
2967 snprintf (buf, size, "%s, %s #%" PRIi64,
2968 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2969 aarch64_operand_modifiers[kind].name,
2970 opnd->shifter.amount);
2971 else
2972 snprintf (buf, size, "%s, %s",
2973 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2974 aarch64_operand_modifiers[kind].name);
2975 break;
2976
2977 case AARCH64_OPND_Rm_SFT:
2978 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2979 || opnd->qualifier == AARCH64_OPND_QLF_X);
2980 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2981 snprintf (buf, size, "%s",
2982 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2983 else
2984 snprintf (buf, size, "%s, %s #%" PRIi64,
2985 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2986 aarch64_operand_modifiers[opnd->shifter.kind].name,
2987 opnd->shifter.amount);
2988 break;
2989
2990 case AARCH64_OPND_Fd:
2991 case AARCH64_OPND_Fn:
2992 case AARCH64_OPND_Fm:
2993 case AARCH64_OPND_Fa:
2994 case AARCH64_OPND_Ft:
2995 case AARCH64_OPND_Ft2:
2996 case AARCH64_OPND_Sd:
2997 case AARCH64_OPND_Sn:
2998 case AARCH64_OPND_Sm:
2999 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3000 opnd->reg.regno);
3001 break;
3002
3003 case AARCH64_OPND_Vd:
3004 case AARCH64_OPND_Vn:
3005 case AARCH64_OPND_Vm:
3006 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3007 aarch64_get_qualifier_name (opnd->qualifier));
3008 break;
3009
3010 case AARCH64_OPND_Ed:
3011 case AARCH64_OPND_En:
3012 case AARCH64_OPND_Em:
3013 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3014 aarch64_get_qualifier_name (opnd->qualifier),
3015 opnd->reglane.index);
3016 break;
3017
3018 case AARCH64_OPND_VdD1:
3019 case AARCH64_OPND_VnD1:
3020 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3021 break;
3022
3023 case AARCH64_OPND_LVn:
3024 case AARCH64_OPND_LVt:
3025 case AARCH64_OPND_LVt_AL:
3026 case AARCH64_OPND_LEt:
3027 print_register_list (buf, size, opnd, "v");
3028 break;
3029
3030 case AARCH64_OPND_SVE_Pd:
3031 case AARCH64_OPND_SVE_Pg3:
3032 case AARCH64_OPND_SVE_Pg4_5:
3033 case AARCH64_OPND_SVE_Pg4_10:
3034 case AARCH64_OPND_SVE_Pg4_16:
3035 case AARCH64_OPND_SVE_Pm:
3036 case AARCH64_OPND_SVE_Pn:
3037 case AARCH64_OPND_SVE_Pt:
3038 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3039 snprintf (buf, size, "p%d", opnd->reg.regno);
3040 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3041 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3042 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3043 aarch64_get_qualifier_name (opnd->qualifier));
3044 else
3045 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3046 aarch64_get_qualifier_name (opnd->qualifier));
3047 break;
3048
3049 case AARCH64_OPND_SVE_Za_5:
3050 case AARCH64_OPND_SVE_Za_16:
3051 case AARCH64_OPND_SVE_Zd:
3052 case AARCH64_OPND_SVE_Zm_5:
3053 case AARCH64_OPND_SVE_Zm_16:
3054 case AARCH64_OPND_SVE_Zn:
3055 case AARCH64_OPND_SVE_Zt:
3056 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3057 snprintf (buf, size, "z%d", opnd->reg.regno);
3058 else
3059 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3060 aarch64_get_qualifier_name (opnd->qualifier));
3061 break;
3062
3063 case AARCH64_OPND_SVE_ZnxN:
3064 case AARCH64_OPND_SVE_ZtxN:
3065 print_register_list (buf, size, opnd, "z");
3066 break;
3067
3068 case AARCH64_OPND_SVE_Zn_INDEX:
3069 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3070 aarch64_get_qualifier_name (opnd->qualifier),
3071 opnd->reglane.index);
3072 break;
3073
3074 case AARCH64_OPND_Cn:
3075 case AARCH64_OPND_Cm:
3076 snprintf (buf, size, "C%d", opnd->reg.regno);
3077 break;
3078
3079 case AARCH64_OPND_IDX:
3080 case AARCH64_OPND_IMM:
3081 case AARCH64_OPND_WIDTH:
3082 case AARCH64_OPND_UIMM3_OP1:
3083 case AARCH64_OPND_UIMM3_OP2:
3084 case AARCH64_OPND_BIT_NUM:
3085 case AARCH64_OPND_IMM_VLSL:
3086 case AARCH64_OPND_IMM_VLSR:
3087 case AARCH64_OPND_SHLL_IMM:
3088 case AARCH64_OPND_IMM0:
3089 case AARCH64_OPND_IMMR:
3090 case AARCH64_OPND_IMMS:
3091 case AARCH64_OPND_FBITS:
3092 case AARCH64_OPND_SIMM5:
3093 case AARCH64_OPND_SVE_SHLIMM_PRED:
3094 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3095 case AARCH64_OPND_SVE_SHRIMM_PRED:
3096 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3097 case AARCH64_OPND_SVE_SIMM5:
3098 case AARCH64_OPND_SVE_SIMM5B:
3099 case AARCH64_OPND_SVE_SIMM6:
3100 case AARCH64_OPND_SVE_SIMM8:
3101 case AARCH64_OPND_SVE_UIMM3:
3102 case AARCH64_OPND_SVE_UIMM7:
3103 case AARCH64_OPND_SVE_UIMM8:
3104 case AARCH64_OPND_SVE_UIMM8_53:
3105 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3106 break;
3107
3108 case AARCH64_OPND_SVE_PATTERN:
3109 if (optional_operand_p (opcode, idx)
3110 && opnd->imm.value == get_optional_operand_default_value (opcode))
3111 break;
3112 enum_value = opnd->imm.value;
3113 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3114 if (aarch64_sve_pattern_array[enum_value])
3115 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3116 else
3117 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3118 break;
3119
3120 case AARCH64_OPND_SVE_PATTERN_SCALED:
3121 if (optional_operand_p (opcode, idx)
3122 && !opnd->shifter.operator_present
3123 && opnd->imm.value == get_optional_operand_default_value (opcode))
3124 break;
3125 enum_value = opnd->imm.value;
3126 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3127 if (aarch64_sve_pattern_array[opnd->imm.value])
3128 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3129 else
3130 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3131 if (opnd->shifter.operator_present)
3132 {
3133 size_t len = strlen (buf);
3134 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3135 aarch64_operand_modifiers[opnd->shifter.kind].name,
3136 opnd->shifter.amount);
3137 }
3138 break;
3139
3140 case AARCH64_OPND_SVE_PRFOP:
3141 enum_value = opnd->imm.value;
3142 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3143 if (aarch64_sve_prfop_array[enum_value])
3144 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3145 else
3146 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3147 break;
3148
3149 case AARCH64_OPND_IMM_MOV:
3150 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3151 {
3152 case 4: /* e.g. MOV Wd, #<imm32>. */
3153 {
3154 int imm32 = opnd->imm.value;
3155 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3156 }
3157 break;
3158 case 8: /* e.g. MOV Xd, #<imm64>. */
3159 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3160 opnd->imm.value, opnd->imm.value);
3161 break;
3162 default: assert (0);
3163 }
3164 break;
3165
3166 case AARCH64_OPND_FPIMM0:
3167 snprintf (buf, size, "#0.0");
3168 break;
3169
3170 case AARCH64_OPND_LIMM:
3171 case AARCH64_OPND_AIMM:
3172 case AARCH64_OPND_HALF:
3173 case AARCH64_OPND_SVE_INV_LIMM:
3174 case AARCH64_OPND_SVE_LIMM:
3175 case AARCH64_OPND_SVE_LIMM_MOV:
3176 if (opnd->shifter.amount)
3177 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3178 opnd->shifter.amount);
3179 else
3180 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3181 break;
3182
3183 case AARCH64_OPND_SIMD_IMM:
3184 case AARCH64_OPND_SIMD_IMM_SFT:
3185 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3186 || opnd->shifter.kind == AARCH64_MOD_NONE)
3187 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3188 else
3189 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3190 aarch64_operand_modifiers[opnd->shifter.kind].name,
3191 opnd->shifter.amount);
3192 break;
3193
3194 case AARCH64_OPND_SVE_AIMM:
3195 case AARCH64_OPND_SVE_ASIMM:
3196 if (opnd->shifter.amount)
3197 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3198 opnd->shifter.amount);
3199 else
3200 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3201 break;
3202
3203 case AARCH64_OPND_FPIMM:
3204 case AARCH64_OPND_SIMD_FPIMM:
3205 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3206 {
3207 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3208 {
3209 half_conv_t c;
3210 c.i = expand_fp_imm (2, opnd->imm.value);
3211 snprintf (buf, size, "#%.18e", c.f);
3212 }
3213 break;
3214 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3215 {
3216 single_conv_t c;
3217 c.i = expand_fp_imm (4, opnd->imm.value);
3218 snprintf (buf, size, "#%.18e", c.f);
3219 }
3220 break;
3221 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3222 {
3223 double_conv_t c;
3224 c.i = expand_fp_imm (8, opnd->imm.value);
3225 snprintf (buf, size, "#%.18e", c.d);
3226 }
3227 break;
3228 default: assert (0);
3229 }
3230 break;
3231
3232 case AARCH64_OPND_CCMP_IMM:
3233 case AARCH64_OPND_NZCV:
3234 case AARCH64_OPND_EXCEPTION:
3235 case AARCH64_OPND_UIMM4:
3236 case AARCH64_OPND_UIMM7:
3237 if (optional_operand_p (opcode, idx) == TRUE
3238 && (opnd->imm.value ==
3239 (int64_t) get_optional_operand_default_value (opcode)))
3240 /* Omit the operand, e.g. DCPS1. */
3241 break;
3242 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3243 break;
3244
3245 case AARCH64_OPND_COND:
3246 case AARCH64_OPND_COND1:
3247 snprintf (buf, size, "%s", opnd->cond->names[0]);
3248 break;
3249
3250 case AARCH64_OPND_ADDR_ADRP:
3251 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3252 + opnd->imm.value;
3253 if (pcrel_p)
3254 *pcrel_p = 1;
3255 if (address)
3256 *address = addr;
3257 /* This is not necessary during the disassembling, as print_address_func
3258 in the disassemble_info will take care of the printing. But some
3259 other callers may be still interested in getting the string in *STR,
3260 so here we do snprintf regardless. */
3261 snprintf (buf, size, "#0x%" PRIx64, addr);
3262 break;
3263
3264 case AARCH64_OPND_ADDR_PCREL14:
3265 case AARCH64_OPND_ADDR_PCREL19:
3266 case AARCH64_OPND_ADDR_PCREL21:
3267 case AARCH64_OPND_ADDR_PCREL26:
3268 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3269 if (pcrel_p)
3270 *pcrel_p = 1;
3271 if (address)
3272 *address = addr;
3273 /* This is not necessary during the disassembling, as print_address_func
3274 in the disassemble_info will take care of the printing. But some
3275 other callers may be still interested in getting the string in *STR,
3276 so here we do snprintf regardless. */
3277 snprintf (buf, size, "#0x%" PRIx64, addr);
3278 break;
3279
3280 case AARCH64_OPND_ADDR_SIMPLE:
3281 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3282 case AARCH64_OPND_SIMD_ADDR_POST:
3283 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3284 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3285 {
3286 if (opnd->addr.offset.is_reg)
3287 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3288 else
3289 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3290 }
3291 else
3292 snprintf (buf, size, "[%s]", name);
3293 break;
3294
3295 case AARCH64_OPND_ADDR_REGOFF:
3296 case AARCH64_OPND_SVE_ADDR_RR:
3297 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3298 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3299 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3300 case AARCH64_OPND_SVE_ADDR_RX:
3301 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3302 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3303 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3304 print_register_offset_address
3305 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3306 get_offset_int_reg_name (opnd));
3307 break;
3308
3309 case AARCH64_OPND_SVE_ADDR_RZ:
3310 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3311 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3312 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3313 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3314 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3315 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3316 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3317 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3318 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3319 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3320 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3321 print_register_offset_address
3322 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3323 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3324 break;
3325
3326 case AARCH64_OPND_ADDR_SIMM7:
3327 case AARCH64_OPND_ADDR_SIMM9:
3328 case AARCH64_OPND_ADDR_SIMM9_2:
3329 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3330 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3331 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3332 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3333 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3334 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3335 case AARCH64_OPND_SVE_ADDR_RI_U6:
3336 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3337 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3338 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3339 print_immediate_offset_address
3340 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3341 break;
3342
3343 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3344 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3345 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3346 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3347 print_immediate_offset_address
3348 (buf, size, opnd,
3349 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3350 break;
3351
3352 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3353 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3354 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3355 print_register_offset_address
3356 (buf, size, opnd,
3357 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3358 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3359 break;
3360
3361 case AARCH64_OPND_ADDR_UIMM12:
3362 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3363 if (opnd->addr.offset.imm)
3364 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
3365 else
3366 snprintf (buf, size, "[%s]", name);
3367 break;
3368
3369 case AARCH64_OPND_SYSREG:
3370 for (i = 0; aarch64_sys_regs[i].name; ++i)
3371 if (aarch64_sys_regs[i].value == opnd->sysreg
3372 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3373 break;
3374 if (aarch64_sys_regs[i].name)
3375 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3376 else
3377 {
3378 /* Implementation defined system register. */
3379 unsigned int value = opnd->sysreg;
3380 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3381 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3382 value & 0x7);
3383 }
3384 break;
3385
3386 case AARCH64_OPND_PSTATEFIELD:
3387 for (i = 0; aarch64_pstatefields[i].name; ++i)
3388 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3389 break;
3390 assert (aarch64_pstatefields[i].name);
3391 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3392 break;
3393
3394 case AARCH64_OPND_SYSREG_AT:
3395 case AARCH64_OPND_SYSREG_DC:
3396 case AARCH64_OPND_SYSREG_IC:
3397 case AARCH64_OPND_SYSREG_TLBI:
3398 snprintf (buf, size, "%s", opnd->sysins_op->name);
3399 break;
3400
3401 case AARCH64_OPND_BARRIER:
3402 snprintf (buf, size, "%s", opnd->barrier->name);
3403 break;
3404
3405 case AARCH64_OPND_BARRIER_ISB:
3406 /* Operand can be omitted, e.g. in DCPS1. */
3407 if (! optional_operand_p (opcode, idx)
3408 || (opnd->barrier->value
3409 != get_optional_operand_default_value (opcode)))
3410 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3411 break;
3412
3413 case AARCH64_OPND_PRFOP:
3414 if (opnd->prfop->name != NULL)
3415 snprintf (buf, size, "%s", opnd->prfop->name);
3416 else
3417 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3418 break;
3419
3420 case AARCH64_OPND_BARRIER_PSB:
3421 snprintf (buf, size, "%s", opnd->hint_option->name);
3422 break;
3423
3424 default:
3425 assert (0);
3426 }
3427 }
3428 \f
3429 #define CPENC(op0,op1,crn,crm,op2) \
3430 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3431 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3432 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3433 /* for 3.9.10 System Instructions */
3434 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3435
3436 #define C0 0
3437 #define C1 1
3438 #define C2 2
3439 #define C3 3
3440 #define C4 4
3441 #define C5 5
3442 #define C6 6
3443 #define C7 7
3444 #define C8 8
3445 #define C9 9
3446 #define C10 10
3447 #define C11 11
3448 #define C12 12
3449 #define C13 13
3450 #define C14 14
3451 #define C15 15
3452
3453 #ifdef F_DEPRECATED
3454 #undef F_DEPRECATED
3455 #endif
3456 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3457
3458 #ifdef F_ARCHEXT
3459 #undef F_ARCHEXT
3460 #endif
3461 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3462
3463 #ifdef F_HASXT
3464 #undef F_HASXT
3465 #endif
3466 #define F_HASXT 0x4 /* System instruction register <Xt>
3467 operand. */
3468
3469
3470 /* TODO there are two more issues need to be resolved
3471 1. handle read-only and write-only system registers
3472 2. handle cpu-implementation-defined system registers. */
3473 const aarch64_sys_reg aarch64_sys_regs [] =
3474 {
3475 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3476 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3477 { "elr_el1", CPEN_(0,C0,1), 0 },
3478 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3479 { "sp_el0", CPEN_(0,C1,0), 0 },
3480 { "spsel", CPEN_(0,C2,0), 0 },
3481 { "daif", CPEN_(3,C2,1), 0 },
3482 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3483 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3484 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3485 { "nzcv", CPEN_(3,C2,0), 0 },
3486 { "fpcr", CPEN_(3,C4,0), 0 },
3487 { "fpsr", CPEN_(3,C4,1), 0 },
3488 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3489 { "dlr_el0", CPEN_(3,C5,1), 0 },
3490 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3491 { "elr_el2", CPEN_(4,C0,1), 0 },
3492 { "sp_el1", CPEN_(4,C1,0), 0 },
3493 { "spsr_irq", CPEN_(4,C3,0), 0 },
3494 { "spsr_abt", CPEN_(4,C3,1), 0 },
3495 { "spsr_und", CPEN_(4,C3,2), 0 },
3496 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3497 { "spsr_el3", CPEN_(6,C0,0), 0 },
3498 { "elr_el3", CPEN_(6,C0,1), 0 },
3499 { "sp_el2", CPEN_(6,C1,0), 0 },
3500 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3501 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3502 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3503 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3504 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3505 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3506 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3507 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3508 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3509 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3510 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3511 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3512 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3513 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3514 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3515 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3516 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3517 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3518 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3519 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3520 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3521 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3522 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3523 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3524 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3525 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3526 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3527 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3528 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3529 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3530 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3531 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3532 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3533 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3534 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3535 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3536 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3537 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3538 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3539 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3540 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3541 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3542 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3543 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3544 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3545 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3546 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3547 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3548 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3549 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3550 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3551 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3552 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3553 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3554 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3555 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3556 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3557 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3558 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3559 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3560 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3561 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3562 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3563 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3564 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3565 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3566 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3567 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3568 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3569 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3570 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3571 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3572 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3573 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3574 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3575 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3576 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3577 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3578 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3579 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3580 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3581 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3582 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3583 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3584 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3585 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3586 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3587 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3588 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3589 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3590 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3591 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3592 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3593 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3594 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3595 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3596 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3597 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3598 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3599 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3600 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3601 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3602 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3603 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3604 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3605 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3606 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3607 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3608 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3609 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3610 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3611 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3612 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3613 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3614 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3615 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3616 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3617 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3618 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3619 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3620 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3621 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3622 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3623 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3624 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3625 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3626 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3627 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3628 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3629 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3630 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3631 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3632 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3633 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3634 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3635 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3636 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3637 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3638 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3639 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3640 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3641 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3642 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3643 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3644 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3645 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3646 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3647 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3648 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3649 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3650 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3651 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3652 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3653 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3654 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3655 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3656 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3657 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3658 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3659 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3660 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3661 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3662 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3663 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3664 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3665 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3666 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3667 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3668 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3669 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3670 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3671 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3672 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3673 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3674 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3675 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3676 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3677 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3678 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3679 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3680 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3681 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3682 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3683 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3684 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3685 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3686 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3687 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3688 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3689 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3690 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3691 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3692 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3693 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3694 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3695 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3696 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3697 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3698 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3699 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3700 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3701 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3702 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3703 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3704 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3705 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3706 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3707 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3708 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3709 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3710 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3711 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3712 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3713 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3714 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3715 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3716 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3717 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3718 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3719 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3720 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3721 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3722 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3723 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3724 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3725 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3726 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3727 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3728 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3729 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3730 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3731 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3732 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3733 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3734 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3735 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3736 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3737 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3738 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3739 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3740 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3741 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3742 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3743 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3744 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3745 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3746 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3747 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3748 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3749 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3750 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3751 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3752 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3753 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3754 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3755 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3756 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3757 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3758 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3759 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3760 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3761 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3762 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3763 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3764 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3765 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3766 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3767 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3768 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3769 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3770 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3771 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3772 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3773 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3774 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3775 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3776 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3777 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3778 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3779 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3780 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3781 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3782 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3783 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3784 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3785 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3786 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3787 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3788 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3789 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3790 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3791 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3792 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3793 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3794 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3795 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3796 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3797 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3798 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3799 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3800 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3801 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3802 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3803 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3804 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3805 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3806 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3807 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3808 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3809 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3810 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3811 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3812 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3813 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3814 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3815 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3816 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3817 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3818 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3819 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3820 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3821 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3822 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3823 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3824 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3825 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3826 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3827 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3828 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3829 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3830 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3831 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3832 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3833 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3834 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3835 { 0, CPENC(0,0,0,0,0), 0 },
3836 };
3837
3838 bfd_boolean
3839 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3840 {
3841 return (reg->flags & F_DEPRECATED) != 0;
3842 }
3843
3844 bfd_boolean
3845 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3846 const aarch64_sys_reg *reg)
3847 {
3848 if (!(reg->flags & F_ARCHEXT))
3849 return TRUE;
3850
3851 /* PAN. Values are from aarch64_sys_regs. */
3852 if (reg->value == CPEN_(0,C2,3)
3853 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3854 return FALSE;
3855
3856 /* Virtualization host extensions: system registers. */
3857 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3858 || reg->value == CPENC (3, 4, C13, C0, 1)
3859 || reg->value == CPENC (3, 4, C14, C3, 0)
3860 || reg->value == CPENC (3, 4, C14, C3, 1)
3861 || reg->value == CPENC (3, 4, C14, C3, 2))
3862 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3863 return FALSE;
3864
3865 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3866 if ((reg->value == CPEN_ (5, C0, 0)
3867 || reg->value == CPEN_ (5, C0, 1)
3868 || reg->value == CPENC (3, 5, C1, C0, 0)
3869 || reg->value == CPENC (3, 5, C1, C0, 2)
3870 || reg->value == CPENC (3, 5, C2, C0, 0)
3871 || reg->value == CPENC (3, 5, C2, C0, 1)
3872 || reg->value == CPENC (3, 5, C2, C0, 2)
3873 || reg->value == CPENC (3, 5, C5, C1, 0)
3874 || reg->value == CPENC (3, 5, C5, C1, 1)
3875 || reg->value == CPENC (3, 5, C5, C2, 0)
3876 || reg->value == CPENC (3, 5, C6, C0, 0)
3877 || reg->value == CPENC (3, 5, C10, C2, 0)
3878 || reg->value == CPENC (3, 5, C10, C3, 0)
3879 || reg->value == CPENC (3, 5, C12, C0, 0)
3880 || reg->value == CPENC (3, 5, C13, C0, 1)
3881 || reg->value == CPENC (3, 5, C14, C1, 0))
3882 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3883 return FALSE;
3884
3885 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3886 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3887 || reg->value == CPENC (3, 5, C14, C2, 1)
3888 || reg->value == CPENC (3, 5, C14, C2, 2)
3889 || reg->value == CPENC (3, 5, C14, C3, 0)
3890 || reg->value == CPENC (3, 5, C14, C3, 1)
3891 || reg->value == CPENC (3, 5, C14, C3, 2))
3892 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3893 return FALSE;
3894
3895 /* ARMv8.2 features. */
3896
3897 /* ID_AA64MMFR2_EL1. */
3898 if (reg->value == CPENC (3, 0, C0, C7, 2)
3899 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3900 return FALSE;
3901
3902 /* PSTATE.UAO. */
3903 if (reg->value == CPEN_ (0, C2, 4)
3904 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3905 return FALSE;
3906
3907 /* RAS extension. */
3908
3909 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3910 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3911 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3912 || reg->value == CPENC (3, 0, C5, C3, 1)
3913 || reg->value == CPENC (3, 0, C5, C3, 2)
3914 || reg->value == CPENC (3, 0, C5, C3, 3)
3915 || reg->value == CPENC (3, 0, C5, C4, 0)
3916 || reg->value == CPENC (3, 0, C5, C4, 1)
3917 || reg->value == CPENC (3, 0, C5, C4, 2)
3918 || reg->value == CPENC (3, 0, C5, C4, 3)
3919 || reg->value == CPENC (3, 0, C5, C5, 0)
3920 || reg->value == CPENC (3, 0, C5, C5, 1))
3921 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3922 return FALSE;
3923
3924 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3925 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3926 || reg->value == CPENC (3, 0, C12, C1, 1)
3927 || reg->value == CPENC (3, 4, C12, C1, 1))
3928 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3929 return FALSE;
3930
3931 /* Statistical Profiling extension. */
3932 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3933 || reg->value == CPENC (3, 0, C9, C10, 1)
3934 || reg->value == CPENC (3, 0, C9, C10, 3)
3935 || reg->value == CPENC (3, 0, C9, C10, 7)
3936 || reg->value == CPENC (3, 0, C9, C9, 0)
3937 || reg->value == CPENC (3, 0, C9, C9, 2)
3938 || reg->value == CPENC (3, 0, C9, C9, 3)
3939 || reg->value == CPENC (3, 0, C9, C9, 4)
3940 || reg->value == CPENC (3, 0, C9, C9, 5)
3941 || reg->value == CPENC (3, 0, C9, C9, 6)
3942 || reg->value == CPENC (3, 0, C9, C9, 7)
3943 || reg->value == CPENC (3, 4, C9, C9, 0)
3944 || reg->value == CPENC (3, 5, C9, C9, 0))
3945 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3946 return FALSE;
3947
3948 return TRUE;
3949 }
3950
3951 const aarch64_sys_reg aarch64_pstatefields [] =
3952 {
3953 { "spsel", 0x05, 0 },
3954 { "daifset", 0x1e, 0 },
3955 { "daifclr", 0x1f, 0 },
3956 { "pan", 0x04, F_ARCHEXT },
3957 { "uao", 0x03, F_ARCHEXT },
3958 { 0, CPENC(0,0,0,0,0), 0 },
3959 };
3960
3961 bfd_boolean
3962 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
3963 const aarch64_sys_reg *reg)
3964 {
3965 if (!(reg->flags & F_ARCHEXT))
3966 return TRUE;
3967
3968 /* PAN. Values are from aarch64_pstatefields. */
3969 if (reg->value == 0x04
3970 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3971 return FALSE;
3972
3973 /* UAO. Values are from aarch64_pstatefields. */
3974 if (reg->value == 0x03
3975 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3976 return FALSE;
3977
3978 return TRUE;
3979 }
3980
3981 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3982 {
3983 { "ialluis", CPENS(0,C7,C1,0), 0 },
3984 { "iallu", CPENS(0,C7,C5,0), 0 },
3985 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
3986 { 0, CPENS(0,0,0,0), 0 }
3987 };
3988
3989 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3990 {
3991 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
3992 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
3993 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
3994 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
3995 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
3996 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
3997 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
3998 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
3999 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4000 { 0, CPENS(0,0,0,0), 0 }
4001 };
4002
4003 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4004 {
4005 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4006 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4007 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4008 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4009 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4010 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4011 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4012 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4013 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4014 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4015 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4016 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4017 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4018 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4019 { 0, CPENS(0,0,0,0), 0 }
4020 };
4021
4022 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4023 {
4024 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4025 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4026 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4027 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4028 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4029 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4030 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4031 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4032 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4033 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4034 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4035 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4036 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4037 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4038 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4039 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4040 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4041 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4042 { "alle2", CPENS(4,C8,C7,0), 0 },
4043 { "alle2is", CPENS(4,C8,C3,0), 0 },
4044 { "alle1", CPENS(4,C8,C7,4), 0 },
4045 { "alle1is", CPENS(4,C8,C3,4), 0 },
4046 { "alle3", CPENS(6,C8,C7,0), 0 },
4047 { "alle3is", CPENS(6,C8,C3,0), 0 },
4048 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4049 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4050 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4051 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4052 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4053 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4054 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4055 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4056 { 0, CPENS(0,0,0,0), 0 }
4057 };
4058
4059 bfd_boolean
4060 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4061 {
4062 return (sys_ins_reg->flags & F_HASXT) != 0;
4063 }
4064
4065 extern bfd_boolean
4066 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4067 const aarch64_sys_ins_reg *reg)
4068 {
4069 if (!(reg->flags & F_ARCHEXT))
4070 return TRUE;
4071
4072 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4073 if (reg->value == CPENS (3, C7, C12, 1)
4074 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4075 return FALSE;
4076
4077 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4078 if ((reg->value == CPENS (0, C7, C9, 0)
4079 || reg->value == CPENS (0, C7, C9, 1))
4080 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4081 return FALSE;
4082
4083 return TRUE;
4084 }
4085
4086 #undef C0
4087 #undef C1
4088 #undef C2
4089 #undef C3
4090 #undef C4
4091 #undef C5
4092 #undef C6
4093 #undef C7
4094 #undef C8
4095 #undef C9
4096 #undef C10
4097 #undef C11
4098 #undef C12
4099 #undef C13
4100 #undef C14
4101 #undef C15
4102
4103 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4104 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4105
4106 static bfd_boolean
4107 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4108 const aarch64_insn insn)
4109 {
4110 int t = BITS (insn, 4, 0);
4111 int n = BITS (insn, 9, 5);
4112 int t2 = BITS (insn, 14, 10);
4113
4114 if (BIT (insn, 23))
4115 {
4116 /* Write back enabled. */
4117 if ((t == n || t2 == n) && n != 31)
4118 return FALSE;
4119 }
4120
4121 if (BIT (insn, 22))
4122 {
4123 /* Load */
4124 if (t == t2)
4125 return FALSE;
4126 }
4127
4128 return TRUE;
4129 }
4130
4131 /* Return true if VALUE cannot be moved into an SVE register using DUP
4132 (with any element size, not just ESIZE) and if using DUPM would
4133 therefore be OK. ESIZE is the number of bytes in the immediate. */
4134
4135 bfd_boolean
4136 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4137 {
4138 int64_t svalue = uvalue;
4139 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4140
4141 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4142 return FALSE;
4143 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4144 {
4145 svalue = (int32_t) uvalue;
4146 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4147 {
4148 svalue = (int16_t) uvalue;
4149 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4150 return FALSE;
4151 }
4152 }
4153 if ((svalue & 0xff) == 0)
4154 svalue /= 256;
4155 return svalue < -128 || svalue >= 128;
4156 }
4157
4158 /* Include the opcode description table as well as the operand description
4159 table. */
4160 #define VERIFIER(x) verify_##x
4161 #include "aarch64-tbl.h"